rtnetlink: do not depend on RTNL for IFLA_IFNAME output
[linux-2.6-block.git] / net / core / rtnetlink.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
1da177e4 11 * Fixes:
d467d0bc 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
1da177e4
LT
13 */
14
ee5d032f 15#include <linux/bitops.h>
1da177e4
LT
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
1da177e4
LT
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
6756ae4b 33#include <linux/mutex.h>
1823730f 34#include <linux/if_addr.h>
77162022 35#include <linux/if_bridge.h>
f6f6424b 36#include <linux/if_vlan.h>
ebc08a6f 37#include <linux/pci.h>
77162022 38#include <linux/etherdevice.h>
58038695 39#include <linux/bpf.h>
1da177e4 40
7c0f6ba6 41#include <linux/uaccess.h>
1da177e4
LT
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
ea697639 50#include <net/tcp.h>
1da177e4
LT
51#include <net/sock.h>
52#include <net/pkt_sched.h>
14c0b97d 53#include <net/fib_rules.h>
e2849863 54#include <net/rtnetlink.h>
30ffee84 55#include <net/net_namespace.h>
dca56c30 56#include <net/devlink.h>
cc7f5022
IS
57#if IS_ENABLED(CONFIG_IPV6)
58#include <net/addrconf.h>
59#endif
5f184269 60#include <linux/dpll.h>
1da177e4 61
6264f58c
JK
62#include "dev.h"
63
a428afe8 64#define RTNL_MAX_TYPE 50
29cfb2aa 65#define RTNL_SLAVE_MAX_TYPE 44
ccf8dbcd 66
e0d087af 67struct rtnl_link {
e2849863
TG
68 rtnl_doit_func doit;
69 rtnl_dumpit_func dumpit;
e4202511 70 struct module *owner;
62256f98 71 unsigned int flags;
addf9b90 72 struct rcu_head rcu;
e2849863
TG
73};
74
6756ae4b 75static DEFINE_MUTEX(rtnl_mutex);
1da177e4
LT
76
77void rtnl_lock(void)
78{
6756ae4b 79 mutex_lock(&rtnl_mutex);
1da177e4 80}
e0d087af 81EXPORT_SYMBOL(rtnl_lock);
1da177e4 82
79ffdfc6
KT
83int rtnl_lock_killable(void)
84{
85 return mutex_lock_killable(&rtnl_mutex);
86}
87EXPORT_SYMBOL(rtnl_lock_killable);
88
1b5c5493
ED
89static struct sk_buff *defer_kfree_skb_list;
90void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91{
92 if (head && tail) {
93 tail->next = defer_kfree_skb_list;
94 defer_kfree_skb_list = head;
95 }
96}
97EXPORT_SYMBOL(rtnl_kfree_skbs);
98
6756ae4b 99void __rtnl_unlock(void)
1da177e4 100{
1b5c5493
ED
101 struct sk_buff *head = defer_kfree_skb_list;
102
103 defer_kfree_skb_list = NULL;
104
0b5c21bb
JB
105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
106 * is used. In some places, e.g. in cfg80211, we have code that will do
107 * something like
108 * rtnl_lock()
109 * wiphy_lock()
110 * ...
111 * rtnl_unlock()
112 *
113 * and because netdev_run_todo() acquires the RTNL for items on the list
114 * we could cause a situation such as this:
115 * Thread 1 Thread 2
116 * rtnl_lock()
117 * unregister_netdevice()
118 * __rtnl_unlock()
119 * rtnl_lock()
120 * wiphy_lock()
121 * rtnl_unlock()
122 * netdev_run_todo()
123 * __rtnl_unlock()
124 *
125 * // list not empty now
126 * // because of thread 2
127 * rtnl_lock()
128 * while (!list_empty(...))
129 * rtnl_lock()
130 * wiphy_lock()
131 * **** DEADLOCK ****
132 *
133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
134 * it's not used in cases where something is added to do the list.
135 */
136 WARN_ON(!list_empty(&net_todo_list));
137
6756ae4b 138 mutex_unlock(&rtnl_mutex);
1b5c5493
ED
139
140 while (head) {
141 struct sk_buff *next = head->next;
142
143 kfree_skb(head);
144 cond_resched();
145 head = next;
146 }
1da177e4 147}
6756ae4b 148
1da177e4
LT
149void rtnl_unlock(void)
150{
58ec3b4d 151 /* This fellow will unlock it for us. */
1da177e4
LT
152 netdev_run_todo();
153}
e0d087af 154EXPORT_SYMBOL(rtnl_unlock);
1da177e4 155
6756ae4b
SH
156int rtnl_trylock(void)
157{
158 return mutex_trylock(&rtnl_mutex);
159}
e0d087af 160EXPORT_SYMBOL(rtnl_trylock);
6756ae4b 161
c9c1014b
PM
162int rtnl_is_locked(void)
163{
164 return mutex_is_locked(&rtnl_mutex);
165}
e0d087af 166EXPORT_SYMBOL(rtnl_is_locked);
c9c1014b 167
6f99528e
VB
168bool refcount_dec_and_rtnl_lock(refcount_t *r)
169{
170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
171}
172EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
173
a898def2 174#ifdef CONFIG_PROVE_LOCKING
0cbf3343 175bool lockdep_rtnl_is_held(void)
a898def2
PM
176{
177 return lockdep_is_held(&rtnl_mutex);
178}
179EXPORT_SYMBOL(lockdep_rtnl_is_held);
180#endif /* #ifdef CONFIG_PROVE_LOCKING */
181
51e13685 182static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
e2849863
TG
183
184static inline int rtm_msgindex(int msgtype)
185{
186 int msgindex = msgtype - RTM_BASE;
187
188 /*
189 * msgindex < 0 implies someone tried to register a netlink
190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
191 * the message type has not been added to linux/rtnetlink.h
192 */
193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
194
195 return msgindex;
196}
197
addf9b90
FW
198static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
199{
51e13685 200 struct rtnl_link __rcu **tab;
addf9b90
FW
201
202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
203 protocol = PF_UNSPEC;
204
205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
206 if (!tab)
207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
208
51e13685 209 return rcu_dereference_rtnl(tab[msgtype]);
addf9b90
FW
210}
211
e4202511
FW
212static int rtnl_register_internal(struct module *owner,
213 int protocol, int msgtype,
214 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
215 unsigned int flags)
e2849863 216{
b0e9fe1b
FW
217 struct rtnl_link *link, *old;
218 struct rtnl_link __rcu **tab;
e2849863 219 int msgindex;
addf9b90 220 int ret = -ENOBUFS;
e2849863 221
25239cee 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863
TG
223 msgindex = rtm_msgindex(msgtype);
224
addf9b90 225 rtnl_lock();
51e13685 226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
e2849863 227 if (tab == NULL) {
addf9b90
FW
228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
229 if (!tab)
230 goto unlock;
e2849863 231
addf9b90 232 /* ensures we see the 0 stores */
6853dd48 233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
e2849863
TG
234 }
235
addf9b90
FW
236 old = rtnl_dereference(tab[msgindex]);
237 if (old) {
238 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 } else {
242 link = kzalloc(sizeof(*link), GFP_KERNEL);
243 if (!link)
244 goto unlock;
245 }
246
e4202511
FW
247 WARN_ON(link->owner && link->owner != owner);
248 link->owner = owner;
249
addf9b90 250 WARN_ON(doit && link->doit && link->doit != doit);
e2849863 251 if (doit)
addf9b90
FW
252 link->doit = doit;
253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
e2849863 254 if (dumpit)
addf9b90 255 link->dumpit = dumpit;
e2849863 256
a6cec0bc
NA
257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
addf9b90
FW
259 link->flags |= flags;
260
261 /* publish protocol:msgtype */
262 rcu_assign_pointer(tab[msgindex], link);
263 ret = 0;
264 if (old)
265 kfree_rcu(old, rcu);
266unlock:
267 rtnl_unlock();
268 return ret;
e2849863 269}
e4202511
FW
270
271/**
272 * rtnl_register_module - Register a rtnetlink message type
273 *
274 * @owner: module registering the hook (THIS_MODULE)
275 * @protocol: Protocol family or PF_UNSPEC
276 * @msgtype: rtnetlink message type
277 * @doit: Function pointer called for each request message
278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
d467d0bc 279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
e4202511
FW
280 *
281 * Like rtnl_register, but for use by removable modules.
282 */
283int rtnl_register_module(struct module *owner,
284 int protocol, int msgtype,
285 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
286 unsigned int flags)
287{
288 return rtnl_register_internal(owner, protocol, msgtype,
289 doit, dumpit, flags);
290}
291EXPORT_SYMBOL_GPL(rtnl_register_module);
292
293/**
16feebcf 294 * rtnl_register - Register a rtnetlink message type
e4202511
FW
295 * @protocol: Protocol family or PF_UNSPEC
296 * @msgtype: rtnetlink message type
297 * @doit: Function pointer called for each request message
298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
d467d0bc 299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
e4202511
FW
300 *
301 * Registers the specified function pointers (at least one of them has
302 * to be non-NULL) to be called whenever a request message for the
303 * specified protocol family and message type is received.
304 *
305 * The special protocol family PF_UNSPEC may be used to define fallback
306 * function pointers for the case when no entry for the specific protocol
307 * family exists.
e2849863
TG
308 */
309void rtnl_register(int protocol, int msgtype,
c7ac8679 310 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
b97bac64 311 unsigned int flags)
e2849863 312{
16feebcf
FW
313 int err;
314
315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
316 flags);
317 if (err)
318 pr_err("Unable to register rtnetlink message handler, "
319 "protocol = %d, message type = %d\n", protocol, msgtype);
e2849863 320}
e2849863
TG
321
322/**
323 * rtnl_unregister - Unregister a rtnetlink message type
324 * @protocol: Protocol family or PF_UNSPEC
325 * @msgtype: rtnetlink message type
326 *
327 * Returns 0 on success or a negative error code.
328 */
329int rtnl_unregister(int protocol, int msgtype)
330{
51e13685
JK
331 struct rtnl_link __rcu **tab;
332 struct rtnl_link *link;
e2849863
TG
333 int msgindex;
334
25239cee 335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863
TG
336 msgindex = rtm_msgindex(msgtype);
337
6853dd48 338 rtnl_lock();
addf9b90
FW
339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
340 if (!tab) {
6853dd48 341 rtnl_unlock();
e2849863 342 return -ENOENT;
6853dd48 343 }
e2849863 344
17452347 345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
6853dd48 346 rtnl_unlock();
e2849863 347
addf9b90
FW
348 kfree_rcu(link, rcu);
349
e2849863
TG
350 return 0;
351}
e2849863
TG
352EXPORT_SYMBOL_GPL(rtnl_unregister);
353
354/**
355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
356 * @protocol : Protocol family or PF_UNSPEC
357 *
358 * Identical to calling rtnl_unregster() for all registered message types
359 * of a certain protocol family.
360 */
361void rtnl_unregister_all(int protocol)
362{
51e13685
JK
363 struct rtnl_link __rcu **tab;
364 struct rtnl_link *link;
addf9b90 365 int msgindex;
019a3169 366
25239cee 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863 368
019a3169 369 rtnl_lock();
17452347 370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
f707ef61
SD
371 if (!tab) {
372 rtnl_unlock();
373 return;
374 }
addf9b90 375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
17452347 376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
addf9b90
FW
377 kfree_rcu(link, rcu);
378 }
019a3169
FW
379 rtnl_unlock();
380
6853dd48
FW
381 synchronize_net();
382
addf9b90 383 kfree(tab);
e2849863 384}
e2849863 385EXPORT_SYMBOL_GPL(rtnl_unregister_all);
1da177e4 386
38f7b870
PM
387static LIST_HEAD(link_ops);
388
c63044f0
ED
389static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
390{
391 const struct rtnl_link_ops *ops;
392
393 list_for_each_entry(ops, &link_ops, list) {
394 if (!strcmp(ops->kind, kind))
395 return ops;
396 }
397 return NULL;
398}
399
38f7b870
PM
400/**
401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
402 * @ops: struct rtnl_link_ops * to register
403 *
404 * The caller must hold the rtnl_mutex. This function should be used
405 * by drivers that create devices during module initialization. It
406 * must be called before registering the devices.
407 *
408 * Returns 0 on success or a negative error code.
409 */
410int __rtnl_link_register(struct rtnl_link_ops *ops)
411{
c63044f0
ED
412 if (rtnl_link_ops_get(ops->kind))
413 return -EEXIST;
414
8c713dc9 415 /* The check for alloc/setup is here because if ops
b0ab2fab
JP
416 * does not have that filled up, it is not possible
417 * to use the ops for creating device. So do not
418 * fill up dellink as well. That disables rtnl_dellink.
419 */
8c713dc9 420 if ((ops->alloc || ops->setup) && !ops->dellink)
23289a37 421 ops->dellink = unregister_netdevice_queue;
2d85cba2 422
38f7b870
PM
423 list_add_tail(&ops->list, &link_ops);
424 return 0;
425}
38f7b870
PM
426EXPORT_SYMBOL_GPL(__rtnl_link_register);
427
428/**
429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
430 * @ops: struct rtnl_link_ops * to register
431 *
432 * Returns 0 on success or a negative error code.
433 */
434int rtnl_link_register(struct rtnl_link_ops *ops)
435{
436 int err;
437
ccf8dbcd
KC
438 /* Sanity-check max sizes to avoid stack buffer overflow. */
439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
441 return -EINVAL;
442
38f7b870
PM
443 rtnl_lock();
444 err = __rtnl_link_register(ops);
445 rtnl_unlock();
446 return err;
447}
38f7b870
PM
448EXPORT_SYMBOL_GPL(rtnl_link_register);
449
669f87ba
PE
450static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
451{
452 struct net_device *dev;
23289a37
ED
453 LIST_HEAD(list_kill);
454
669f87ba 455 for_each_netdev(net, dev) {
23289a37
ED
456 if (dev->rtnl_link_ops == ops)
457 ops->dellink(dev, &list_kill);
669f87ba 458 }
23289a37 459 unregister_netdevice_many(&list_kill);
669f87ba
PE
460}
461
38f7b870
PM
462/**
463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
464 * @ops: struct rtnl_link_ops * to unregister
465 *
554873e5
KT
466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
467 * integrity (hold pernet_ops_rwsem for writing to close the race
468 * with setup_net() and cleanup_net()).
38f7b870
PM
469 */
470void __rtnl_link_unregister(struct rtnl_link_ops *ops)
471{
881d966b 472 struct net *net;
2d85cba2 473
881d966b 474 for_each_net(net) {
669f87ba 475 __rtnl_kill_links(net, ops);
2d85cba2 476 }
38f7b870
PM
477 list_del(&ops->list);
478}
38f7b870
PM
479EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
480
200b916f
CW
481/* Return with the rtnl_lock held when there are no network
482 * devices unregistering in any network namespace.
483 */
484static void rtnl_lock_unregistering_all(void)
485{
ff960a73 486 DEFINE_WAIT_FUNC(wait, woken_wake_function);
200b916f 487
ff960a73 488 add_wait_queue(&netdev_unregistering_wq, &wait);
200b916f 489 for (;;) {
200b916f 490 rtnl_lock();
f0b07bb1
KT
491 /* We held write locked pernet_ops_rwsem, and parallel
492 * setup_net() and cleanup_net() are not possible.
493 */
ffabe98c 494 if (!atomic_read(&dev_unreg_count))
200b916f
CW
495 break;
496 __rtnl_unlock();
ff960a73
PZ
497
498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
200b916f 499 }
ff960a73 500 remove_wait_queue(&netdev_unregistering_wq, &wait);
200b916f
CW
501}
502
38f7b870
PM
503/**
504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
505 * @ops: struct rtnl_link_ops * to unregister
506 */
507void rtnl_link_unregister(struct rtnl_link_ops *ops)
508{
8518e9bb 509 /* Close the race with setup_net() and cleanup_net() */
4420bf21 510 down_write(&pernet_ops_rwsem);
200b916f 511 rtnl_lock_unregistering_all();
38f7b870
PM
512 __rtnl_link_unregister(ops);
513 rtnl_unlock();
4420bf21 514 up_write(&pernet_ops_rwsem);
38f7b870 515}
38f7b870
PM
516EXPORT_SYMBOL_GPL(rtnl_link_unregister);
517
ba7d49b1
JP
518static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
519{
520 struct net_device *master_dev;
521 const struct rtnl_link_ops *ops;
8515ae38 522 size_t size = 0;
ba7d49b1 523
8515ae38
FW
524 rcu_read_lock();
525
526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
ba7d49b1 527 if (!master_dev)
8515ae38
FW
528 goto out;
529
ba7d49b1 530 ops = master_dev->rtnl_link_ops;
6049f253 531 if (!ops || !ops->get_slave_size)
8515ae38 532 goto out;
ba7d49b1 533 /* IFLA_INFO_SLAVE_DATA + nested data */
8515ae38 534 size = nla_total_size(sizeof(struct nlattr)) +
ba7d49b1 535 ops->get_slave_size(master_dev, dev);
8515ae38
FW
536
537out:
538 rcu_read_unlock();
539 return size;
ba7d49b1
JP
540}
541
38f7b870
PM
542static size_t rtnl_link_get_size(const struct net_device *dev)
543{
544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
545 size_t size;
546
547 if (!ops)
548 return 0;
549
369cf77a
TG
550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
38f7b870
PM
552
553 if (ops->get_size)
554 /* IFLA_INFO_DATA + nested data */
369cf77a 555 size += nla_total_size(sizeof(struct nlattr)) +
38f7b870
PM
556 ops->get_size(dev);
557
558 if (ops->get_xstats_size)
369cf77a
TG
559 /* IFLA_INFO_XSTATS */
560 size += nla_total_size(ops->get_xstats_size(dev));
38f7b870 561
ba7d49b1
JP
562 size += rtnl_link_get_slave_info_data_size(dev);
563
38f7b870
PM
564 return size;
565}
566
f8ff182c
TG
567static LIST_HEAD(rtnl_af_ops);
568
569static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
570{
571 const struct rtnl_af_ops *ops;
572
a100243d
CW
573 ASSERT_RTNL();
574
575 list_for_each_entry(ops, &rtnl_af_ops, list) {
f8ff182c
TG
576 if (ops->family == family)
577 return ops;
578 }
579
580 return NULL;
581}
582
f8ff182c
TG
583/**
584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
585 * @ops: struct rtnl_af_ops * to register
586 *
587 * Returns 0 on success or a negative error code.
588 */
3678a9d8 589void rtnl_af_register(struct rtnl_af_ops *ops)
f8ff182c 590{
f8ff182c 591 rtnl_lock();
5fa85a09 592 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
f8ff182c 593 rtnl_unlock();
f8ff182c
TG
594}
595EXPORT_SYMBOL_GPL(rtnl_af_register);
596
f8ff182c
TG
597/**
598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
599 * @ops: struct rtnl_af_ops * to unregister
600 */
601void rtnl_af_unregister(struct rtnl_af_ops *ops)
602{
603 rtnl_lock();
5fa85a09 604 list_del_rcu(&ops->list);
f8ff182c 605 rtnl_unlock();
5fa85a09
FW
606
607 synchronize_rcu();
f8ff182c
TG
608}
609EXPORT_SYMBOL_GPL(rtnl_af_unregister);
610
b1974ed0
AR
611static size_t rtnl_link_get_af_size(const struct net_device *dev,
612 u32 ext_filter_mask)
f8ff182c
TG
613{
614 struct rtnl_af_ops *af_ops;
615 size_t size;
616
617 /* IFLA_AF_SPEC */
618 size = nla_total_size(sizeof(struct nlattr));
619
5fa85a09
FW
620 rcu_read_lock();
621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
f8ff182c
TG
622 if (af_ops->get_link_af_size) {
623 /* AF_* + nested data */
624 size += nla_total_size(sizeof(struct nlattr)) +
b1974ed0 625 af_ops->get_link_af_size(dev, ext_filter_mask);
f8ff182c
TG
626 }
627 }
5fa85a09 628 rcu_read_unlock();
f8ff182c
TG
629
630 return size;
631}
632
ba7d49b1 633static bool rtnl_have_link_slave_info(const struct net_device *dev)
38f7b870 634{
ba7d49b1 635 struct net_device *master_dev;
4c82a95e 636 bool ret = false;
38f7b870 637
4c82a95e
FW
638 rcu_read_lock();
639
640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
813f020c 641 if (master_dev && master_dev->rtnl_link_ops)
4c82a95e
FW
642 ret = true;
643 rcu_read_unlock();
644 return ret;
ba7d49b1
JP
645}
646
647static int rtnl_link_slave_info_fill(struct sk_buff *skb,
648 const struct net_device *dev)
649{
650 struct net_device *master_dev;
651 const struct rtnl_link_ops *ops;
652 struct nlattr *slave_data;
653 int err;
38f7b870 654
ba7d49b1
JP
655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
656 if (!master_dev)
657 return 0;
658 ops = master_dev->rtnl_link_ops;
659 if (!ops)
660 return 0;
661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
662 return -EMSGSIZE;
663 if (ops->fill_slave_info) {
ae0be8de 664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
ba7d49b1
JP
665 if (!slave_data)
666 return -EMSGSIZE;
667 err = ops->fill_slave_info(skb, master_dev, dev);
668 if (err < 0)
669 goto err_cancel_slave_data;
670 nla_nest_end(skb, slave_data);
671 }
672 return 0;
673
674err_cancel_slave_data:
675 nla_nest_cancel(skb, slave_data);
676 return err;
677}
678
679static int rtnl_link_info_fill(struct sk_buff *skb,
680 const struct net_device *dev)
681{
682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
683 struct nlattr *data;
684 int err;
685
686 if (!ops)
687 return 0;
38f7b870 688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
ba7d49b1 689 return -EMSGSIZE;
38f7b870
PM
690 if (ops->fill_xstats) {
691 err = ops->fill_xstats(skb, dev);
692 if (err < 0)
ba7d49b1 693 return err;
38f7b870
PM
694 }
695 if (ops->fill_info) {
ae0be8de 696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
ba7d49b1
JP
697 if (data == NULL)
698 return -EMSGSIZE;
38f7b870
PM
699 err = ops->fill_info(skb, dev);
700 if (err < 0)
701 goto err_cancel_data;
702 nla_nest_end(skb, data);
703 }
38f7b870
PM
704 return 0;
705
706err_cancel_data:
707 nla_nest_cancel(skb, data);
ba7d49b1
JP
708 return err;
709}
710
711static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
712{
713 struct nlattr *linkinfo;
714 int err = -EMSGSIZE;
715
ae0be8de 716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
ba7d49b1
JP
717 if (linkinfo == NULL)
718 goto out;
719
720 err = rtnl_link_info_fill(skb, dev);
721 if (err < 0)
722 goto err_cancel_link;
723
724 err = rtnl_link_slave_info_fill(skb, dev);
725 if (err < 0)
726 goto err_cancel_link;
727
728 nla_nest_end(skb, linkinfo);
729 return 0;
730
38f7b870
PM
731err_cancel_link:
732 nla_nest_cancel(skb, linkinfo);
733out:
734 return err;
735}
736
95c96174 737int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
1da177e4 738{
97c53cac 739 struct sock *rtnl = net->rtnl;
cfdf0d9a
YD
740
741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
1da177e4
LT
742}
743
97c53cac 744int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
2942e900 745{
97c53cac
DL
746 struct sock *rtnl = net->rtnl;
747
2942e900
TG
748 return nlmsg_unicast(rtnl, skb, pid);
749}
e0d087af 750EXPORT_SYMBOL(rtnl_unicast);
2942e900 751
1ce85fe4 752void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
1d997f10 753 const struct nlmsghdr *nlh, gfp_t flags)
97676b6b 754{
97c53cac 755 struct sock *rtnl = net->rtnl;
97676b6b 756
f9b282b3 757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
97676b6b 758}
e0d087af 759EXPORT_SYMBOL(rtnl_notify);
97676b6b 760
97c53cac 761void rtnl_set_sk_err(struct net *net, u32 group, int error)
97676b6b 762{
97c53cac
DL
763 struct sock *rtnl = net->rtnl;
764
97676b6b
TG
765 netlink_set_err(rtnl, 0, group, error);
766}
e0d087af 767EXPORT_SYMBOL(rtnl_set_sk_err);
97676b6b 768
1da177e4
LT
769int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
770{
2d7202bf
TG
771 struct nlattr *mx;
772 int i, valid = 0;
773
c22a133a
DA
774 /* nothing is dumped for dst_default_metrics, so just skip the loop */
775 if (metrics == dst_default_metrics.metrics)
776 return 0;
777
ae0be8de 778 mx = nla_nest_start_noflag(skb, RTA_METRICS);
2d7202bf
TG
779 if (mx == NULL)
780 return -ENOBUFS;
781
782 for (i = 0; i < RTAX_MAX; i++) {
783 if (metrics[i]) {
ea697639
DB
784 if (i == RTAX_CC_ALGO - 1) {
785 char tmp[TCP_CA_NAME_MAX], *name;
786
787 name = tcp_ca_get_name_by_key(metrics[i], tmp);
788 if (!name)
789 continue;
790 if (nla_put_string(skb, i + 1, name))
791 goto nla_put_failure;
c3a8d947
DB
792 } else if (i == RTAX_FEATURES - 1) {
793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
794
f8edcd12
PS
795 if (!user_features)
796 continue;
c3a8d947
DB
797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
798 if (nla_put_u32(skb, i + 1, user_features))
799 goto nla_put_failure;
ea697639
DB
800 } else {
801 if (nla_put_u32(skb, i + 1, metrics[i]))
802 goto nla_put_failure;
803 }
2d7202bf 804 valid++;
2d7202bf 805 }
1da177e4 806 }
1da177e4 807
a57d27fc
DM
808 if (!valid) {
809 nla_nest_cancel(skb, mx);
810 return 0;
811 }
2d7202bf
TG
812
813 return nla_nest_end(skb, mx);
814
815nla_put_failure:
bc3ed28c
TG
816 nla_nest_cancel(skb, mx);
817 return -EMSGSIZE;
1da177e4 818}
e0d087af 819EXPORT_SYMBOL(rtnetlink_put_metrics);
1da177e4 820
e3703b3d 821int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
87a50699 822 long expires, u32 error)
e3703b3d
TG
823{
824 struct rta_cacheinfo ci = {
e3703b3d
TG
825 .rta_error = error,
826 .rta_id = id,
e3703b3d
TG
827 };
828
3940746d
DA
829 if (dst) {
830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
831 ci.rta_used = dst->__use;
bc9d3a9f 832 ci.rta_clntref = rcuref_read(&dst->__rcuref);
3940746d 833 }
8253947e
LW
834 if (expires) {
835 unsigned long clock;
e3703b3d 836
8253947e
LW
837 clock = jiffies_to_clock_t(abs(expires));
838 clock = min_t(unsigned long, clock, INT_MAX);
839 ci.rta_expires = (expires > 0) ? clock : -clock;
840 }
e3703b3d
TG
841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
842}
e3703b3d 843EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
1da177e4 844
6a2968ee
ED
845void netdev_set_operstate(struct net_device *dev, int newstate)
846{
847 unsigned int old = READ_ONCE(dev->operstate);
848
849 do {
850 if (old == newstate)
851 return;
852 } while (!try_cmpxchg(&dev->operstate, &old, newstate));
853
854 netdev_state_change(dev);
855}
856EXPORT_SYMBOL(netdev_set_operstate);
857
93b2d4a2 858static void set_operstate(struct net_device *dev, unsigned char transition)
b00055aa 859{
6a2968ee 860 unsigned char operstate = READ_ONCE(dev->operstate);
b00055aa 861
e0d087af 862 switch (transition) {
b00055aa
SR
863 case IF_OPER_UP:
864 if ((operstate == IF_OPER_DORMANT ||
eec517cd 865 operstate == IF_OPER_TESTING ||
b00055aa 866 operstate == IF_OPER_UNKNOWN) &&
eec517cd 867 !netif_dormant(dev) && !netif_testing(dev))
b00055aa
SR
868 operstate = IF_OPER_UP;
869 break;
870
eec517cd 871 case IF_OPER_TESTING:
abbc7928 872 if (netif_oper_up(dev))
eec517cd
AL
873 operstate = IF_OPER_TESTING;
874 break;
875
b00055aa 876 case IF_OPER_DORMANT:
abbc7928 877 if (netif_oper_up(dev))
b00055aa
SR
878 operstate = IF_OPER_DORMANT;
879 break;
3ff50b79 880 }
b00055aa 881
6a2968ee 882 netdev_set_operstate(dev, operstate);
b00055aa
SR
883}
884
b1beb681
JB
885static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
886{
887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
889}
890
3729d502
PM
891static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
892 const struct ifinfomsg *ifm)
893{
894 unsigned int flags = ifm->ifi_flags;
895
896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
897 if (ifm->ifi_change)
898 flags = (flags & ifm->ifi_change) |
b1beb681 899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
3729d502
PM
900
901 return flags;
902}
903
b60c5115 904static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
be1f3c2c 905 const struct rtnl_link_stats64 *b)
1da177e4 906{
b60c5115
TG
907 a->rx_packets = b->rx_packets;
908 a->tx_packets = b->tx_packets;
909 a->rx_bytes = b->rx_bytes;
910 a->tx_bytes = b->tx_bytes;
911 a->rx_errors = b->rx_errors;
912 a->tx_errors = b->tx_errors;
913 a->rx_dropped = b->rx_dropped;
914 a->tx_dropped = b->tx_dropped;
915
916 a->multicast = b->multicast;
917 a->collisions = b->collisions;
918
919 a->rx_length_errors = b->rx_length_errors;
920 a->rx_over_errors = b->rx_over_errors;
921 a->rx_crc_errors = b->rx_crc_errors;
922 a->rx_frame_errors = b->rx_frame_errors;
923 a->rx_fifo_errors = b->rx_fifo_errors;
924 a->rx_missed_errors = b->rx_missed_errors;
925
926 a->tx_aborted_errors = b->tx_aborted_errors;
927 a->tx_carrier_errors = b->tx_carrier_errors;
928 a->tx_fifo_errors = b->tx_fifo_errors;
929 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
930 a->tx_window_errors = b->tx_window_errors;
931
932 a->rx_compressed = b->rx_compressed;
933 a->tx_compressed = b->tx_compressed;
6e7333d3
JW
934
935 a->rx_nohandler = b->rx_nohandler;
10708f37
JE
936}
937
c02db8c6 938/* All VF info */
115c9b81
GR
939static inline int rtnl_vfinfo_size(const struct net_device *dev,
940 u32 ext_filter_mask)
ebc08a6f 941{
9af15c38 942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
c02db8c6 943 int num_vfs = dev_num_vf(dev->dev.parent);
7e75f74a 944 size_t size = nla_total_size(0);
045de01a 945 size += num_vfs *
7e75f74a
SD
946 (nla_total_size(0) +
947 nla_total_size(sizeof(struct ifla_vf_mac)) +
75345f88 948 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
7e75f74a
SD
949 nla_total_size(sizeof(struct ifla_vf_vlan)) +
950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
79aab093
MS
951 nla_total_size(MAX_VLAN_LIST_LEN *
952 sizeof(struct ifla_vf_vlan_info)) +
ed616689 953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
7e75f74a 954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
945a3676 955 nla_total_size(sizeof(struct ifla_vf_rate)) +
01a3d796 956 nla_total_size(sizeof(struct ifla_vf_link_state)) +
3b766cd8 957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
dd461d6a 958 nla_total_size(sizeof(struct ifla_vf_trust)));
fa0e21fa
EP
959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
960 size += num_vfs *
961 (nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)));
978 }
c02db8c6
CW
979 return size;
980 } else
ebc08a6f
WM
981 return 0;
982}
983
c53864fd
DG
984static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
57b61080
SF
986{
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
57b61080
SF
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
c53864fd
DG
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
57b61080
SF
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007}
1008
b5cdae32 1009static size_t rtnl_xdp_size(void)
d1fdd913 1010{
b3cfaa31 1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
58038695 1012 nla_total_size(1) + /* XDP_ATTACHED */
a25717d2 1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
4f91da26 1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
d1fdd913 1015
b5cdae32 1016 return xdp_size;
d1fdd913
BB
1017}
1018
88f4fb0c
JP
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021 struct netdev_name_node *name_node;
9f308313
ED
1022 unsigned int cnt = 0;
1023
1024 rcu_read_lock();
1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1026 cnt++;
1027 rcu_read_unlock();
88f4fb0c 1028
9f308313 1029 if (!cnt)
88f4fb0c 1030 return 0;
9f308313
ED
1031
1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
88f4fb0c
JP
1033}
1034
829eb208
RP
1035static size_t rtnl_proto_down_size(const struct net_device *dev)
1036{
1037 size_t size = nla_total_size(1);
1038
1039 if (dev->proto_down_reason)
1040 size += nla_total_size(0) + nla_total_size(4);
1041
1042 return size;
1043}
1044
dca56c30
JP
1045static size_t rtnl_devlink_port_size(const struct net_device *dev)
1046{
1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1048
1049 if (dev->devlink_port)
1050 size += devlink_nl_port_handle_size(dev->devlink_port);
1051
1052 return size;
1053}
1054
5f184269
JP
1055static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1056{
1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1058
289e9225 1059 size += dpll_netdev_pin_handle_size(dev);
5f184269
JP
1060
1061 return size;
1062}
1063
115c9b81
GR
1064static noinline size_t if_nlmsg_size(const struct net_device *dev,
1065 u32 ext_filter_mask)
339bf98f
TG
1066{
1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
0b815a1a 1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
339bf98f 1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
270cb4d0 1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
339bf98f 1072 + nla_total_size(sizeof(struct rtnl_link_stats))
35c58459 1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
339bf98f
TG
1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1076 + nla_total_size(4) /* IFLA_TXQLEN */
1077 + nla_total_size(4) /* IFLA_WEIGHT */
1078 + nla_total_size(4) /* IFLA_MTU */
1079 + nla_total_size(4) /* IFLA_LINK */
1080 + nla_total_size(4) /* IFLA_MASTER */
9a57247f 1081 + nla_total_size(1) /* IFLA_CARRIER */
edbc0bb3 1082 + nla_total_size(4) /* IFLA_PROMISCUITY */
7e6e1b57 1083 + nla_total_size(4) /* IFLA_ALLMULTI */
76ff5cc9
JP
1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
6919756c
TK
1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
eac1b93c 1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
9eefedd5
XL
1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
89527be8
ED
1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
339bf98f 1093 + nla_total_size(1) /* IFLA_OPERSTATE */
38f7b870 1094 + nla_total_size(1) /* IFLA_LINKMODE */
2d3b479d 1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
d37512a2 1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */
db833d40 1097 + nla_total_size(4) /* IFLA_GROUP */
115c9b81
GR
1098 + nla_total_size(ext_filter_mask
1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
c53864fd 1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
f8ff182c 1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
b1974ed0 1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
82f28412 1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
88d6378b 1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
c57c7a95 1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
b5cdae32 1107 + rtnl_xdp_size() /* IFLA_XDP */
3d3ea5af 1108 + nla_total_size(4) /* IFLA_EVENT */
6621dd29 1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */
38e01b30 1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
829eb208 1111 + rtnl_proto_down_size(dev) /* proto down */
7e4a8d5a 1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
b2d3bcfa
DD
1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
3e7a50ce
SH
1115 + nla_total_size(4) /* IFLA_MIN_MTU */
1116 + nla_total_size(4) /* IFLA_MAX_MTU */
88f4fb0c 1117 + rtnl_prop_list_size(dev)
f74877a5 1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
dca56c30 1119 + rtnl_devlink_port_size(dev)
5f184269 1120 + rtnl_dpll_pin_size(dev)
79e1ad14 1121 + 0;
339bf98f
TG
1122}
1123
57b61080
SF
1124static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1125{
1126 struct nlattr *vf_ports;
1127 struct nlattr *vf_port;
1128 int vf;
1129 int err;
1130
ae0be8de 1131 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
57b61080
SF
1132 if (!vf_ports)
1133 return -EMSGSIZE;
1134
1135 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
ae0be8de 1136 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
8ca94183
SF
1137 if (!vf_port)
1138 goto nla_put_failure;
a6574349
DM
1139 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1140 goto nla_put_failure;
57b61080 1141 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
8ca94183
SF
1142 if (err == -EMSGSIZE)
1143 goto nla_put_failure;
57b61080 1144 if (err) {
57b61080
SF
1145 nla_nest_cancel(skb, vf_port);
1146 continue;
1147 }
1148 nla_nest_end(skb, vf_port);
1149 }
1150
1151 nla_nest_end(skb, vf_ports);
1152
1153 return 0;
8ca94183
SF
1154
1155nla_put_failure:
1156 nla_nest_cancel(skb, vf_ports);
1157 return -EMSGSIZE;
57b61080
SF
1158}
1159
1160static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1161{
1162 struct nlattr *port_self;
1163 int err;
1164
ae0be8de 1165 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
57b61080
SF
1166 if (!port_self)
1167 return -EMSGSIZE;
1168
1169 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1170 if (err) {
1171 nla_nest_cancel(skb, port_self);
8ca94183 1172 return (err == -EMSGSIZE) ? err : 0;
57b61080
SF
1173 }
1174
1175 nla_nest_end(skb, port_self);
1176
1177 return 0;
1178}
1179
c53864fd
DG
1180static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1181 u32 ext_filter_mask)
57b61080
SF
1182{
1183 int err;
1184
c53864fd
DG
1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1186 !(ext_filter_mask & RTEXT_FILTER_VF))
57b61080
SF
1187 return 0;
1188
1189 err = rtnl_port_self_fill(skb, dev);
1190 if (err)
1191 return err;
1192
1193 if (dev_num_vf(dev->dev.parent)) {
1194 err = rtnl_vf_ports_fill(skb, dev);
1195 if (err)
1196 return err;
1197 }
1198
1199 return 0;
1200}
1201
66cae9ed
JP
1202static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1203{
1204 int err;
02637fce 1205 struct netdev_phys_item_id ppid;
66cae9ed
JP
1206
1207 err = dev_get_phys_port_id(dev, &ppid);
1208 if (err) {
1209 if (err == -EOPNOTSUPP)
1210 return 0;
1211 return err;
1212 }
1213
1214 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1215 return -EMSGSIZE;
1216
1217 return 0;
1218}
1219
db24a904
DA
1220static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1221{
1222 char name[IFNAMSIZ];
1223 int err;
1224
1225 err = dev_get_phys_port_name(dev, name, sizeof(name));
1226 if (err) {
1227 if (err == -EOPNOTSUPP)
1228 return 0;
1229 return err;
1230 }
1231
77ef033b 1232 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
db24a904
DA
1233 return -EMSGSIZE;
1234
1235 return 0;
1236}
1237
82f28412
JP
1238static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1239{
bccb3025 1240 struct netdev_phys_item_id ppid = { };
82f28412 1241 int err;
82f28412 1242
bccb3025 1243 err = dev_get_port_parent_id(dev, &ppid, false);
82f28412
JP
1244 if (err) {
1245 if (err == -EOPNOTSUPP)
1246 return 0;
1247 return err;
1248 }
1249
bccb3025 1250 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
82f28412
JP
1251 return -EMSGSIZE;
1252
1253 return 0;
1254}
1255
b22b941b
HFS
1256static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1257 struct net_device *dev)
1258{
550bce59 1259 struct rtnl_link_stats64 *sp;
b22b941b 1260 struct nlattr *attr;
18402843 1261
58414d32
ND
1262 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1263 sizeof(struct rtnl_link_stats64), IFLA_PAD);
b22b941b
HFS
1264 if (!attr)
1265 return -EMSGSIZE;
1266
550bce59
RP
1267 sp = nla_data(attr);
1268 dev_get_stats(dev, sp);
b22b941b 1269
550bce59
RP
1270 attr = nla_reserve(skb, IFLA_STATS,
1271 sizeof(struct rtnl_link_stats));
b22b941b
HFS
1272 if (!attr)
1273 return -EMSGSIZE;
1274
550bce59 1275 copy_rtnl_link_stats(nla_data(attr), sp);
b22b941b
HFS
1276
1277 return 0;
1278}
1279
1280static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1281 struct net_device *dev,
1282 int vfs_num,
fa0e21fa 1283 u32 ext_filter_mask)
b22b941b
HFS
1284{
1285 struct ifla_vf_rss_query_en vf_rss_query_en;
79aab093 1286 struct nlattr *vf, *vfstats, *vfvlanlist;
b22b941b 1287 struct ifla_vf_link_state vf_linkstate;
79aab093 1288 struct ifla_vf_vlan_info vf_vlan_info;
b22b941b
HFS
1289 struct ifla_vf_spoofchk vf_spoofchk;
1290 struct ifla_vf_tx_rate vf_tx_rate;
1291 struct ifla_vf_stats vf_stats;
1292 struct ifla_vf_trust vf_trust;
1293 struct ifla_vf_vlan vf_vlan;
1294 struct ifla_vf_rate vf_rate;
b22b941b 1295 struct ifla_vf_mac vf_mac;
75345f88 1296 struct ifla_vf_broadcast vf_broadcast;
b22b941b 1297 struct ifla_vf_info ivi;
30aad417
DG
1298 struct ifla_vf_guid node_guid;
1299 struct ifla_vf_guid port_guid;
b22b941b 1300
0eed9cf5
MY
1301 memset(&ivi, 0, sizeof(ivi));
1302
b22b941b
HFS
1303 /* Not all SR-IOV capable drivers support the
1304 * spoofcheck and "RSS query enable" query. Preset to
1305 * -1 so the user space tool can detect that the driver
1306 * didn't report anything.
1307 */
1308 ivi.spoofchk = -1;
1309 ivi.rss_query_en = -1;
1310 ivi.trusted = -1;
b22b941b
HFS
1311 /* The default value for VF link state is "auto"
1312 * IFLA_VF_LINK_STATE_AUTO which equals zero
1313 */
1314 ivi.linkstate = 0;
79aab093
MS
1315 /* VLAN Protocol by default is 802.1Q */
1316 ivi.vlan_proto = htons(ETH_P_8021Q);
b22b941b
HFS
1317 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1318 return 0;
1319
775f4f05 1320 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
9fbf082f
LR
1321 memset(&node_guid, 0, sizeof(node_guid));
1322 memset(&port_guid, 0, sizeof(port_guid));
775f4f05 1323
b22b941b
HFS
1324 vf_mac.vf =
1325 vf_vlan.vf =
79aab093 1326 vf_vlan_info.vf =
b22b941b
HFS
1327 vf_rate.vf =
1328 vf_tx_rate.vf =
1329 vf_spoofchk.vf =
1330 vf_linkstate.vf =
1331 vf_rss_query_en.vf =
9aed6ae0
DG
1332 vf_trust.vf =
1333 node_guid.vf =
1334 port_guid.vf = ivi.vf;
b22b941b
HFS
1335
1336 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
75345f88 1337 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
b22b941b
HFS
1338 vf_vlan.vlan = ivi.vlan;
1339 vf_vlan.qos = ivi.qos;
79aab093
MS
1340 vf_vlan_info.vlan = ivi.vlan;
1341 vf_vlan_info.qos = ivi.qos;
1342 vf_vlan_info.vlan_proto = ivi.vlan_proto;
b22b941b
HFS
1343 vf_tx_rate.rate = ivi.max_tx_rate;
1344 vf_rate.min_tx_rate = ivi.min_tx_rate;
1345 vf_rate.max_tx_rate = ivi.max_tx_rate;
1346 vf_spoofchk.setting = ivi.spoofchk;
1347 vf_linkstate.link_state = ivi.linkstate;
1348 vf_rss_query_en.setting = ivi.rss_query_en;
1349 vf_trust.setting = ivi.trusted;
ae0be8de 1350 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
79aab093 1351 if (!vf)
4a59cdfd 1352 return -EMSGSIZE;
b22b941b 1353 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
75345f88 1354 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
b22b941b
HFS
1355 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1356 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1357 &vf_rate) ||
1358 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1359 &vf_tx_rate) ||
1360 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1361 &vf_spoofchk) ||
1362 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1363 &vf_linkstate) ||
1364 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1365 sizeof(vf_rss_query_en),
1366 &vf_rss_query_en) ||
1367 nla_put(skb, IFLA_VF_TRUST,
1368 sizeof(vf_trust), &vf_trust))
79aab093 1369 goto nla_put_vf_failure;
30aad417 1370
30aad417
DG
1371 if (dev->netdev_ops->ndo_get_vf_guid &&
1372 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1373 &port_guid)) {
1374 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1375 &node_guid) ||
1376 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1377 &port_guid))
1378 goto nla_put_vf_failure;
1379 }
ae0be8de 1380 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
79aab093
MS
1381 if (!vfvlanlist)
1382 goto nla_put_vf_failure;
1383 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1384 &vf_vlan_info)) {
1385 nla_nest_cancel(skb, vfvlanlist);
1386 goto nla_put_vf_failure;
1387 }
1388 nla_nest_end(skb, vfvlanlist);
fa0e21fa
EP
1389 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1390 memset(&vf_stats, 0, sizeof(vf_stats));
1391 if (dev->netdev_ops->ndo_get_vf_stats)
1392 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1393 &vf_stats);
1394 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1395 if (!vfstats)
1396 goto nla_put_vf_failure;
1397 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1398 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1400 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1402 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1404 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1406 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1407 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1408 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1409 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1410 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1411 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1412 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1413 nla_nest_cancel(skb, vfstats);
1414 goto nla_put_vf_failure;
1415 }
1416 nla_nest_end(skb, vfstats);
79aab093 1417 }
b22b941b
HFS
1418 nla_nest_end(skb, vf);
1419 return 0;
79aab093
MS
1420
1421nla_put_vf_failure:
1422 nla_nest_cancel(skb, vf);
79aab093 1423 return -EMSGSIZE;
b22b941b
HFS
1424}
1425
250fc3df
FW
1426static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1427 struct net_device *dev,
1428 u32 ext_filter_mask)
1429{
1430 struct nlattr *vfinfo;
1431 int i, num_vfs;
1432
1433 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1434 return 0;
1435
1436 num_vfs = dev_num_vf(dev->dev.parent);
1437 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1438 return -EMSGSIZE;
1439
1440 if (!dev->netdev_ops->ndo_get_vf_config)
1441 return 0;
1442
ae0be8de 1443 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
250fc3df
FW
1444 if (!vfinfo)
1445 return -EMSGSIZE;
1446
1447 for (i = 0; i < num_vfs; i++) {
4a59cdfd
GP
1448 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1449 nla_nest_cancel(skb, vfinfo);
250fc3df 1450 return -EMSGSIZE;
4a59cdfd 1451 }
250fc3df
FW
1452 }
1453
1454 nla_nest_end(skb, vfinfo);
1455 return 0;
1456}
1457
74808e72
ED
1458static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1459 const struct net_device *dev)
b22b941b 1460{
5f8e4474
KL
1461 struct rtnl_link_ifmap map;
1462
1463 memset(&map, 0, sizeof(map));
74808e72
ED
1464 map.mem_start = READ_ONCE(dev->mem_start);
1465 map.mem_end = READ_ONCE(dev->mem_end);
1466 map.base_addr = READ_ONCE(dev->base_addr);
1467 map.irq = READ_ONCE(dev->irq);
1468 map.dma = READ_ONCE(dev->dma);
1469 map.port = READ_ONCE(dev->if_port);
5f8e4474 1470
270cb4d0 1471 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
b22b941b
HFS
1472 return -EMSGSIZE;
1473
1474 return 0;
1475}
1476
a25717d2 1477static u32 rtnl_xdp_prog_skb(struct net_device *dev)
d67b9cd2 1478{
58038695 1479 const struct bpf_prog *generic_xdp_prog;
d67b9cd2
DB
1480
1481 ASSERT_RTNL();
1482
58038695 1483 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
a25717d2
JK
1484 if (!generic_xdp_prog)
1485 return 0;
1486 return generic_xdp_prog->aux->id;
1487}
d67b9cd2 1488
a25717d2
JK
1489static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1490{
7f0a8382 1491 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
a25717d2 1492}
118b4aa2 1493
a25717d2
JK
1494static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1495{
7f0a8382 1496 return dev_xdp_prog_id(dev, XDP_MODE_HW);
a25717d2
JK
1497}
1498
1499static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1500 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1501 u32 (*get_prog_id)(struct net_device *dev))
1502{
1503 u32 curr_id;
1504 int err;
1505
1506 curr_id = get_prog_id(dev);
1507 if (!curr_id)
1508 return 0;
1509
1510 *prog_id = curr_id;
1511 err = nla_put_u32(skb, attr, curr_id);
1512 if (err)
1513 return err;
d67b9cd2 1514
a25717d2
JK
1515 if (*mode != XDP_ATTACHED_NONE)
1516 *mode = XDP_ATTACHED_MULTI;
1517 else
1518 *mode = tgt_mode;
118b4aa2 1519
a25717d2 1520 return 0;
d67b9cd2
DB
1521}
1522
d1fdd913
BB
1523static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1524{
d1fdd913 1525 struct nlattr *xdp;
58038695 1526 u32 prog_id;
d1fdd913 1527 int err;
4f91da26 1528 u8 mode;
d1fdd913 1529
ae0be8de 1530 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
d1fdd913
BB
1531 if (!xdp)
1532 return -EMSGSIZE;
d67b9cd2 1533
a25717d2
JK
1534 prog_id = 0;
1535 mode = XDP_ATTACHED_NONE;
202aabe8
JK
1536 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1537 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1538 if (err)
a25717d2 1539 goto err_cancel;
202aabe8
JK
1540 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1541 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1542 if (err)
a25717d2 1543 goto err_cancel;
202aabe8
JK
1544 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1545 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1546 if (err)
a25717d2
JK
1547 goto err_cancel;
1548
4f91da26 1549 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
d1fdd913
BB
1550 if (err)
1551 goto err_cancel;
1552
a25717d2 1553 if (prog_id && mode != XDP_ATTACHED_MULTI) {
58038695
MKL
1554 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1555 if (err)
1556 goto err_cancel;
1557 }
1558
d1fdd913
BB
1559 nla_nest_end(skb, xdp);
1560 return 0;
1561
1562err_cancel:
1563 nla_nest_cancel(skb, xdp);
1564 return err;
1565}
1566
3d3ea5af
VY
1567static u32 rtnl_get_event(unsigned long event)
1568{
1569 u32 rtnl_event_type = IFLA_EVENT_NONE;
1570
1571 switch (event) {
1572 case NETDEV_REBOOT:
1573 rtnl_event_type = IFLA_EVENT_REBOOT;
1574 break;
1575 case NETDEV_FEAT_CHANGE:
1576 rtnl_event_type = IFLA_EVENT_FEATURES;
1577 break;
1578 case NETDEV_BONDING_FAILOVER:
1579 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1580 break;
1581 case NETDEV_NOTIFY_PEERS:
1582 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1583 break;
1584 case NETDEV_RESEND_IGMP:
1585 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1586 break;
1587 case NETDEV_CHANGEINFODATA:
1588 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1589 break;
1590 default:
1591 break;
1592 }
1593
1594 return rtnl_event_type;
1595}
1596
79110a04
FW
1597static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1598{
1599 const struct net_device *upper_dev;
1600 int ret = 0;
1601
1602 rcu_read_lock();
1603
1604 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1605 if (upper_dev)
1606 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1607
1608 rcu_read_unlock();
1609 return ret;
1610}
1611
feadc4b6
SD
1612static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1613 bool force)
79110a04 1614{
e353ea9c 1615 int iflink = dev_get_iflink(dev);
79110a04 1616
e353ea9c
ED
1617 if (force || READ_ONCE(dev->ifindex) != iflink)
1618 return nla_put_u32(skb, IFLA_LINK, iflink);
79110a04 1619
feadc4b6 1620 return 0;
79110a04
FW
1621}
1622
6c557001
FW
1623static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1624 struct net_device *dev)
1625{
1626 char buf[IFALIASZ];
1627 int ret;
1628
1629 ret = dev_get_alias(dev, buf, sizeof(buf));
1630 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1631}
1632
b1e66b9a 1633static int rtnl_fill_link_netnsid(struct sk_buff *skb,
79e1ad14 1634 const struct net_device *dev,
d4e4fdf9 1635 struct net *src_net, gfp_t gfp)
b1e66b9a 1636{
feadc4b6
SD
1637 bool put_iflink = false;
1638
b1e66b9a
FW
1639 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1640 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1641
1642 if (!net_eq(dev_net(dev), link_net)) {
d4e4fdf9 1643 int id = peernet2id_alloc(src_net, link_net, gfp);
b1e66b9a
FW
1644
1645 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1646 return -EMSGSIZE;
feadc4b6
SD
1647
1648 put_iflink = true;
b1e66b9a
FW
1649 }
1650 }
1651
feadc4b6 1652 return nla_put_iflink(skb, dev, put_iflink);
b1e66b9a
FW
1653}
1654
070cbf5b
FW
1655static int rtnl_fill_link_af(struct sk_buff *skb,
1656 const struct net_device *dev,
1657 u32 ext_filter_mask)
1658{
1659 const struct rtnl_af_ops *af_ops;
1660 struct nlattr *af_spec;
1661
ae0be8de 1662 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
070cbf5b
FW
1663 if (!af_spec)
1664 return -EMSGSIZE;
1665
5fa85a09 1666 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
070cbf5b
FW
1667 struct nlattr *af;
1668 int err;
1669
1670 if (!af_ops->fill_link_af)
1671 continue;
1672
ae0be8de 1673 af = nla_nest_start_noflag(skb, af_ops->family);
070cbf5b
FW
1674 if (!af)
1675 return -EMSGSIZE;
1676
1677 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1678 /*
1679 * Caller may return ENODATA to indicate that there
1680 * was no data to be dumped. This is not an error, it
1681 * means we should trim the attribute header and
1682 * continue.
1683 */
1684 if (err == -ENODATA)
1685 nla_nest_cancel(skb, af);
1686 else if (err < 0)
1687 return -EMSGSIZE;
1688
1689 nla_nest_end(skb, af);
1690 }
1691
1692 nla_nest_end(skb, af_spec);
1693 return 0;
1694}
1695
88f4fb0c
JP
1696static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1697 const struct net_device *dev)
1698{
1699 struct netdev_name_node *name_node;
1700 int count = 0;
1701
0ec4e48c 1702 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
88f4fb0c
JP
1703 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1704 return -EMSGSIZE;
1705 count++;
1706 }
1707 return count;
1708}
1709
0ec4e48c 1710/* RCU protected. */
88f4fb0c
JP
1711static int rtnl_fill_prop_list(struct sk_buff *skb,
1712 const struct net_device *dev)
1713{
1714 struct nlattr *prop_list;
1715 int ret;
1716
1717 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1718 if (!prop_list)
1719 return -EMSGSIZE;
1720
1721 ret = rtnl_fill_alt_ifnames(skb, dev);
1722 if (ret <= 0)
1723 goto nest_cancel;
1724
1725 nla_nest_end(skb, prop_list);
1726 return 0;
1727
1728nest_cancel:
1729 nla_nest_cancel(skb, prop_list);
1730 return ret;
1731}
1732
829eb208
RP
1733static int rtnl_fill_proto_down(struct sk_buff *skb,
1734 const struct net_device *dev)
1735{
1736 struct nlattr *pr;
1737 u32 preason;
1738
1739 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1740 goto nla_put_failure;
1741
1742 preason = dev->proto_down_reason;
1743 if (!preason)
1744 return 0;
1745
1746 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1747 if (!pr)
1748 return -EMSGSIZE;
1749
1750 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1751 nla_nest_cancel(skb, pr);
1752 goto nla_put_failure;
1753 }
1754
1755 nla_nest_end(skb, pr);
1756 return 0;
1757
1758nla_put_failure:
1759 return -EMSGSIZE;
1760}
1761
dca56c30
JP
1762static int rtnl_fill_devlink_port(struct sk_buff *skb,
1763 const struct net_device *dev)
1764{
1765 struct nlattr *devlink_port_nest;
1766 int ret;
1767
1768 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1769 if (!devlink_port_nest)
1770 return -EMSGSIZE;
1771
1772 if (dev->devlink_port) {
1773 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1774 if (ret < 0)
1775 goto nest_cancel;
1776 }
1777
1778 nla_nest_end(skb, devlink_port_nest);
1779 return 0;
1780
1781nest_cancel:
1782 nla_nest_cancel(skb, devlink_port_nest);
1783 return ret;
1784}
1785
5f184269
JP
1786static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1787 const struct net_device *dev)
1788{
1789 struct nlattr *dpll_pin_nest;
1790 int ret;
1791
1792 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1793 if (!dpll_pin_nest)
1794 return -EMSGSIZE;
1795
289e9225 1796 ret = dpll_netdev_add_pin_handle(skb, dev);
5f184269
JP
1797 if (ret < 0)
1798 goto nest_cancel;
1799
1800 nla_nest_end(skb, dpll_pin_nest);
1801 return 0;
1802
1803nest_cancel:
1804 nla_nest_cancel(skb, dpll_pin_nest);
1805 return ret;
1806}
1807
79e1ad14
JB
1808static int rtnl_fill_ifinfo(struct sk_buff *skb,
1809 struct net_device *dev, struct net *src_net,
575c3e2a 1810 int type, u32 pid, u32 seq, u32 change,
3d3ea5af 1811 unsigned int flags, u32 ext_filter_mask,
38e01b30 1812 u32 event, int *new_nsid, int new_ifindex,
d4e4fdf9 1813 int tgt_netnsid, gfp_t gfp)
b60c5115 1814{
8a582681 1815 char devname[IFNAMSIZ];
b60c5115
TG
1816 struct ifinfomsg *ifm;
1817 struct nlmsghdr *nlh;
5891cd5e 1818 struct Qdisc *qdisc;
1da177e4 1819
2907c35f 1820 ASSERT_RTNL();
b60c5115
TG
1821 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1822 if (nlh == NULL)
26932566 1823 return -EMSGSIZE;
1da177e4 1824
b60c5115
TG
1825 ifm = nlmsg_data(nlh);
1826 ifm->ifi_family = AF_UNSPEC;
1827 ifm->__ifi_pad = 0;
1828 ifm->ifi_type = dev->type;
1829 ifm->ifi_index = dev->ifindex;
1830 ifm->ifi_flags = dev_get_flags(dev);
1831 ifm->ifi_change = change;
1832
7e4a8d5a 1833 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
79e1ad14
JB
1834 goto nla_put_failure;
1835
8a582681
ED
1836 netdev_copy_name(dev, devname);
1837 if (nla_put_string(skb, IFLA_IFNAME, devname))
1838 goto nla_put_failure;
1839
1840 if (nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
a6574349
DM
1841 nla_put_u8(skb, IFLA_OPERSTATE,
1842 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1843 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1844 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3e7a50ce
SH
1845 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1846 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
a6574349 1847 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
edbc0bb3 1848 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
7e6e1b57 1849 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
76ff5cc9 1850 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
c70ce028
ED
1851 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1852 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
eac1b93c 1853 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
9eefedd5
XL
1854 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
1855 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
89527be8
ED
1856 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1857 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1d69c2b3 1858#ifdef CONFIG_RPS
76ff5cc9 1859 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1d69c2b3 1860#endif
79110a04 1861 put_master_ifindex(skb, dev) ||
9a57247f 1862 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
6c557001 1863 nla_put_ifalias(skb, dev) ||
2d3b479d 1864 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
b2d3bcfa
DD
1865 atomic_read(&dev->carrier_up_count) +
1866 atomic_read(&dev->carrier_down_count)) ||
b2d3bcfa
DD
1867 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1868 atomic_read(&dev->carrier_up_count)) ||
1869 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1870 atomic_read(&dev->carrier_down_count)))
a6574349 1871 goto nla_put_failure;
0b815a1a 1872
829eb208
RP
1873 if (rtnl_fill_proto_down(skb, dev))
1874 goto nla_put_failure;
1875
3d3ea5af
VY
1876 if (event != IFLA_EVENT_NONE) {
1877 if (nla_put_u32(skb, IFLA_EVENT, event))
1878 goto nla_put_failure;
1879 }
1880
1da177e4 1881 if (dev->addr_len) {
a6574349
DM
1882 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1883 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1884 goto nla_put_failure;
1da177e4
LT
1885 }
1886
66cae9ed
JP
1887 if (rtnl_phys_port_id_fill(skb, dev))
1888 goto nla_put_failure;
1889
db24a904
DA
1890 if (rtnl_phys_port_name_fill(skb, dev))
1891 goto nla_put_failure;
1892
82f28412
JP
1893 if (rtnl_phys_switch_id_fill(skb, dev))
1894 goto nla_put_failure;
1895
b22b941b 1896 if (rtnl_fill_stats(skb, dev))
10708f37 1897 goto nla_put_failure;
10708f37 1898
250fc3df 1899 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
a6574349 1900 goto nla_put_failure;
57b61080 1901
c53864fd 1902 if (rtnl_port_fill(skb, dev, ext_filter_mask))
57b61080
SF
1903 goto nla_put_failure;
1904
d1fdd913
BB
1905 if (rtnl_xdp_fill(skb, dev))
1906 goto nla_put_failure;
1907
ba7d49b1 1908 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
38f7b870
PM
1909 if (rtnl_link_fill(skb, dev) < 0)
1910 goto nla_put_failure;
1911 }
1912
d4e4fdf9 1913 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
b1e66b9a 1914 goto nla_put_failure;
d37512a2 1915
6621dd29
ND
1916 if (new_nsid &&
1917 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1918 goto nla_put_failure;
38e01b30
ND
1919 if (new_ifindex &&
1920 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1921 goto nla_put_failure;
1922
f74877a5
MK
1923 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1924 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1925 goto nla_put_failure;
6621dd29 1926
5fa85a09 1927 rcu_read_lock();
698419ff
ED
1928 qdisc = rcu_dereference(dev->qdisc);
1929 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
1930 goto nla_put_failure_rcu;
070cbf5b 1931 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
5fa85a09 1932 goto nla_put_failure_rcu;
74808e72
ED
1933 if (rtnl_fill_link_ifmap(skb, dev))
1934 goto nla_put_failure_rcu;
88f4fb0c 1935 if (rtnl_fill_prop_list(skb, dev))
0ec4e48c
ED
1936 goto nla_put_failure_rcu;
1937 rcu_read_unlock();
88f4fb0c 1938
00e77ed8
JB
1939 if (dev->dev.parent &&
1940 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1941 dev_name(dev->dev.parent)))
1942 goto nla_put_failure;
1943
1944 if (dev->dev.parent && dev->dev.parent->bus &&
1945 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1946 dev->dev.parent->bus->name))
1947 goto nla_put_failure;
1948
dca56c30
JP
1949 if (rtnl_fill_devlink_port(skb, dev))
1950 goto nla_put_failure;
1951
5f184269
JP
1952 if (rtnl_fill_dpll_pin(skb, dev))
1953 goto nla_put_failure;
1954
053c095a
JB
1955 nlmsg_end(skb, nlh);
1956 return 0;
b60c5115 1957
5fa85a09
FW
1958nla_put_failure_rcu:
1959 rcu_read_unlock();
b60c5115 1960nla_put_failure:
26932566
PM
1961 nlmsg_cancel(skb, nlh);
1962 return -EMSGSIZE;
1da177e4
LT
1963}
1964
f7b12606 1965static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
5176f91e 1966 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
38f7b870
PM
1967 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1968 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
5176f91e 1969 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
da5e0494 1970 [IFLA_MTU] = { .type = NLA_U32 },
76e87306 1971 [IFLA_LINK] = { .type = NLA_U32 },
fbaec0ea 1972 [IFLA_MASTER] = { .type = NLA_U32 },
9a57247f 1973 [IFLA_CARRIER] = { .type = NLA_U8 },
da5e0494
TG
1974 [IFLA_TXQLEN] = { .type = NLA_U32 },
1975 [IFLA_WEIGHT] = { .type = NLA_U32 },
1976 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1977 [IFLA_LINKMODE] = { .type = NLA_U8 },
76e87306 1978 [IFLA_LINKINFO] = { .type = NLA_NESTED },
d8a5ec67 1979 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
f0630529 1980 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
2459b4c6
ND
1981 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1982 * allow 0-length string (needed to remove an alias).
1983 */
1984 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
c02db8c6 1985 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
57b61080
SF
1986 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1987 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
f8ff182c 1988 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
115c9b81 1989 [IFLA_EXT_MASK] = { .type = NLA_U32 },
edbc0bb3 1990 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
76ff5cc9
JP
1991 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1992 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
46e6b992
SH
1993 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1994 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
02637fce 1995 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2d3b479d 1996 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
82f28412 1997 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
317f4810 1998 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
88d6378b 1999 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
d1fdd913 2000 [IFLA_XDP] = { .type = NLA_NESTED },
3d3ea5af 2001 [IFLA_EVENT] = { .type = NLA_U32 },
db833d40 2002 [IFLA_GROUP] = { .type = NLA_U32 },
7e4a8d5a 2003 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
b2d3bcfa
DD
2004 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
2005 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
3e7a50ce
SH
2006 [IFLA_MIN_MTU] = { .type = NLA_U32 },
2007 [IFLA_MAX_MTU] = { .type = NLA_U32 },
36fbf1e5
JP
2008 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
2009 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
2010 .len = ALTIFNAMSIZ - 1 },
f74877a5 2011 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
829eb208 2012 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
7e4a5131 2013 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
88b71053 2014 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
eac1b93c 2015 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
89527be8
ED
2016 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
2017 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
7e6e1b57 2018 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
9eefedd5
XL
2019 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2020 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
da5e0494
TG
2021};
2022
38f7b870
PM
2023static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2024 [IFLA_INFO_KIND] = { .type = NLA_STRING },
2025 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
ba7d49b1
JP
2026 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
2027 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
38f7b870
PM
2028};
2029
c02db8c6 2030static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
364d5716 2031 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
75345f88 2032 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
364d5716 2033 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
79aab093 2034 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
364d5716
DB
2035 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
2036 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
2037 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
2038 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
01a3d796 2039 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
3b766cd8 2040 [IFLA_VF_STATS] = { .type = NLA_NESTED },
dd461d6a 2041 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
cc8e27cc
EC
2042 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2043 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
3b766cd8
EBE
2044};
2045
57b61080
SF
2046static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2047 [IFLA_PORT_VF] = { .type = NLA_U32 },
2048 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2049 .len = PORT_PROFILE_MAX },
57b61080
SF
2050 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2051 .len = PORT_UUID_MAX },
2052 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2053 .len = PORT_UUID_MAX },
2054 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2055 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
025331df
DB
2056
2057 /* Unused, but we need to keep it here since user space could
2058 * fill it. It's also broken with regard to NLA_BINARY use in
2059 * combination with structs.
2060 */
2061 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2062 .len = sizeof(struct ifla_port_vsi) },
57b61080
SF
2063};
2064
d1fdd913 2065static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
92234c8f 2066 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
d1fdd913 2067 [IFLA_XDP_FD] = { .type = NLA_S32 },
92234c8f 2068 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
d1fdd913 2069 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
85de8576 2070 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
58038695 2071 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
d1fdd913
BB
2072};
2073
dc599f76
DA
2074static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2075{
2076 const struct rtnl_link_ops *ops = NULL;
2077 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2078
8cb08174 2079 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
dc599f76
DA
2080 return NULL;
2081
2082 if (linfo[IFLA_INFO_KIND]) {
2083 char kind[MODULE_NAME_LEN];
2084
872f6903 2085 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
dc599f76
DA
2086 ops = rtnl_link_ops_get(kind);
2087 }
2088
2089 return ops;
2090}
2091
2092static bool link_master_filtered(struct net_device *dev, int master_idx)
2093{
2094 struct net_device *master;
2095
2096 if (!master_idx)
2097 return false;
2098
2099 master = netdev_master_upper_dev_get(dev);
d3432bf1
LS
2100
2101 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2102 * another invalid value for ifindex to denote "no master".
2103 */
2104 if (master_idx == -1)
2105 return !!master;
2106
dc599f76
DA
2107 if (!master || master->ifindex != master_idx)
2108 return true;
2109
2110 return false;
2111}
2112
2113static bool link_kind_filtered(const struct net_device *dev,
2114 const struct rtnl_link_ops *kind_ops)
2115{
2116 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2117 return true;
2118
2119 return false;
2120}
2121
2122static bool link_dump_filtered(struct net_device *dev,
2123 int master_idx,
2124 const struct rtnl_link_ops *kind_ops)
2125{
2126 if (link_master_filtered(dev, master_idx) ||
2127 link_kind_filtered(dev, kind_ops))
2128 return true;
2129
2130 return false;
2131}
2132
c383edc4
CB
2133/**
2134 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2135 * @sk: netlink socket
2136 * @netnsid: network namespace identifier
2137 *
2138 * Returns the network namespace identified by netnsid on success or an error
2139 * pointer on failure.
2140 */
2141struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
79e1ad14
JB
2142{
2143 struct net *net;
2144
f428fe4a 2145 net = get_net_ns_by_id(sock_net(sk), netnsid);
79e1ad14
JB
2146 if (!net)
2147 return ERR_PTR(-EINVAL);
2148
2149 /* For now, the caller is required to have CAP_NET_ADMIN in
2150 * the user namespace owning the target net ns.
2151 */
f428fe4a 2152 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
79e1ad14
JB
2153 put_net(net);
2154 return ERR_PTR(-EACCES);
2155 }
2156 return net;
2157}
c383edc4 2158EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
79e1ad14 2159
905cf0ab
DA
2160static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2161 bool strict_check, struct nlattr **tb,
2162 struct netlink_ext_ack *extack)
2163{
2164 int hdrlen;
2165
2166 if (strict_check) {
2167 struct ifinfomsg *ifm;
2168
2169 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2170 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2171 return -EINVAL;
2172 }
2173
2174 ifm = nlmsg_data(nlh);
2175 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2176 ifm->ifi_change) {
2177 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2178 return -EINVAL;
2179 }
2180 if (ifm->ifi_index) {
2181 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2182 return -EINVAL;
2183 }
2184
8cb08174
JB
2185 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2186 IFLA_MAX, ifla_policy,
2187 extack);
905cf0ab
DA
2188 }
2189
2190 /* A hack to preserve kernel<->userspace interface.
2191 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2192 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2193 * what iproute2 < v3.9.0 used.
2194 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2195 * attribute, its netlink message is shorter than struct ifinfomsg.
2196 */
2197 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2198 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2199
8cb08174
JB
2200 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2201 extack);
905cf0ab
DA
2202}
2203
f7b12606
JP
2204static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2205{
3e41af90 2206 const struct rtnl_link_ops *kind_ops = NULL;
905cf0ab
DA
2207 struct netlink_ext_ack *extack = cb->extack;
2208 const struct nlmsghdr *nlh = cb->nlh;
f7b12606 2209 struct net *net = sock_net(skb->sk);
3e41af90 2210 unsigned int flags = NLM_F_MULTI;
f7b12606 2211 struct nlattr *tb[IFLA_MAX+1];
3e41af90
ED
2212 struct {
2213 unsigned long ifindex;
2214 } *ctx = (void *)cb->ctx;
2215 struct net *tgt_net = net;
f7b12606 2216 u32 ext_filter_mask = 0;
3e41af90 2217 struct net_device *dev;
dc599f76 2218 int master_idx = 0;
79e1ad14 2219 int netnsid = -1;
905cf0ab 2220 int err, i;
f7b12606 2221
905cf0ab
DA
2222 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2223 if (err < 0) {
2224 if (cb->strict_check)
2225 return err;
2226
2227 goto walk_entries;
2228 }
2229
2230 for (i = 0; i <= IFLA_MAX; ++i) {
2231 if (!tb[i])
2232 continue;
e5eca6d4 2233
905cf0ab
DA
2234 /* new attributes should only be added with strict checking */
2235 switch (i) {
2236 case IFLA_TARGET_NETNSID:
2237 netnsid = nla_get_s32(tb[i]);
c383edc4 2238 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
905cf0ab
DA
2239 if (IS_ERR(tgt_net)) {
2240 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
893626d6 2241 return PTR_ERR(tgt_net);
905cf0ab
DA
2242 }
2243 break;
2244 case IFLA_EXT_MASK:
2245 ext_filter_mask = nla_get_u32(tb[i]);
2246 break;
2247 case IFLA_MASTER:
2248 master_idx = nla_get_u32(tb[i]);
2249 break;
2250 case IFLA_LINKINFO:
2251 kind_ops = linkinfo_to_kind_ops(tb[i]);
2252 break;
2253 default:
2254 if (cb->strict_check) {
2255 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2256 return -EINVAL;
2257 }
79e1ad14 2258 }
f7b12606
JP
2259 }
2260
905cf0ab
DA
2261 if (master_idx || kind_ops)
2262 flags |= NLM_F_DUMP_FILTERED;
2263
2264walk_entries:
3e41af90
ED
2265 err = 0;
2266 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2267 if (link_dump_filtered(dev, master_idx, kind_ops))
2268 continue;
2269 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2270 NETLINK_CB(cb->skb).portid,
2271 nlh->nlmsg_seq, 0, flags,
2272 ext_filter_mask, 0, NULL, 0,
2273 netnsid, GFP_KERNEL);
02e24903 2274 if (err < 0)
3e41af90 2275 break;
f7b12606 2276 }
a9ecb0cb 2277 cb->seq = tgt_net->dev_base_seq;
d0225784 2278 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
79e1ad14
JB
2279 if (netnsid >= 0)
2280 put_net(tgt_net);
f7b12606 2281
f6c5775f 2282 return err;
f7b12606
JP
2283}
2284
f534f658
JK
2285int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2286 struct netlink_ext_ack *exterr)
f7b12606 2287{
f534f658
JK
2288 const struct ifinfomsg *ifmp;
2289 const struct nlattr *attrs;
2290 size_t len;
2291
2292 ifmp = nla_data(nla_peer);
2293 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2294 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2295
2296 if (ifmp->ifi_index < 0) {
2297 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2298 "ifindex can't be negative");
2299 return -EINVAL;
2300 }
2301
2302 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
8cb08174 2303 exterr);
f7b12606 2304}
f534f658 2305EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
f7b12606 2306
81adee47
EB
2307struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2308{
2309 struct net *net;
2310 /* Examine the link attributes and figure out which
2311 * network namespace we are talking about.
2312 */
2313 if (tb[IFLA_NET_NS_PID])
2314 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
f0630529
EB
2315 else if (tb[IFLA_NET_NS_FD])
2316 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
81adee47
EB
2317 else
2318 net = get_net(src_net);
2319 return net;
2320}
2321EXPORT_SYMBOL(rtnl_link_get_net);
2322
7c4f63ba
CB
2323/* Figure out which network namespace we are talking about by
2324 * examining the link attributes in the following order:
2325 *
2326 * 1. IFLA_NET_NS_PID
2327 * 2. IFLA_NET_NS_FD
7e4a8d5a 2328 * 3. IFLA_TARGET_NETNSID
7c4f63ba
CB
2329 */
2330static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2331 struct nlattr *tb[])
2332{
2333 struct net *net;
2334
2335 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2336 return rtnl_link_get_net(src_net, tb);
2337
7e4a8d5a 2338 if (!tb[IFLA_TARGET_NETNSID])
7c4f63ba
CB
2339 return get_net(src_net);
2340
7e4a8d5a 2341 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
7c4f63ba
CB
2342 if (!net)
2343 return ERR_PTR(-EINVAL);
2344
2345 return net;
2346}
2347
2348static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2349 struct net *src_net,
2350 struct nlattr *tb[], int cap)
2351{
2352 struct net *net;
2353
2354 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2355 if (IS_ERR(net))
2356 return net;
2357
2358 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2359 put_net(net);
2360 return ERR_PTR(-EPERM);
2361 }
2362
2363 return net;
2364}
2365
4ff66cae
CB
2366/* Verify that rtnetlink requests do not pass additional properties
2367 * potentially referring to different network namespaces.
2368 */
2369static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2370 struct netlink_ext_ack *extack,
2371 bool netns_id_only)
2372{
2373
2374 if (netns_id_only) {
2375 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2376 return 0;
2377
2378 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2379 return -EOPNOTSUPP;
2380 }
2381
7e4a8d5a 2382 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
4ff66cae
CB
2383 goto invalid_attr;
2384
7e4a8d5a 2385 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
4ff66cae
CB
2386 goto invalid_attr;
2387
7e4a8d5a 2388 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
4ff66cae
CB
2389 goto invalid_attr;
2390
2391 return 0;
2392
2393invalid_attr:
2394 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2395 return -EINVAL;
2396}
2397
a14857c2
BC
2398static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2399 int max_tx_rate)
2400{
2401 const struct net_device_ops *ops = dev->netdev_ops;
2402
2403 if (!ops->ndo_set_vf_rate)
2404 return -EOPNOTSUPP;
2405 if (max_tx_rate && max_tx_rate < min_tx_rate)
2406 return -EINVAL;
2407
2408 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2409}
2410
8679c31e
RY
2411static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2412 struct netlink_ext_ack *extack)
1840bb13 2413{
89da780a
XL
2414 if (tb[IFLA_ADDRESS] &&
2415 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2416 return -EINVAL;
1840bb13 2417
89da780a
XL
2418 if (tb[IFLA_BROADCAST] &&
2419 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2420 return -EINVAL;
fef5b228 2421
89da780a
XL
2422 if (tb[IFLA_GSO_MAX_SIZE] &&
2423 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2424 NL_SET_ERR_MSG(extack, "too big gso_max_size");
2425 return -EINVAL;
2426 }
fef5b228 2427
89da780a
XL
2428 if (tb[IFLA_GSO_MAX_SEGS] &&
2429 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2430 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2431 NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2432 return -EINVAL;
2433 }
fef5b228 2434
89da780a
XL
2435 if (tb[IFLA_GRO_MAX_SIZE] &&
2436 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2437 NL_SET_ERR_MSG(extack, "too big gro_max_size");
2438 return -EINVAL;
2439 }
65d6914e 2440
89da780a
XL
2441 if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2442 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2443 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2444 return -EINVAL;
2445 }
65d6914e 2446
89da780a
XL
2447 if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2448 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2449 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2450 return -EINVAL;
1840bb13
TG
2451 }
2452
cf7afbfe
TG
2453 if (tb[IFLA_AF_SPEC]) {
2454 struct nlattr *af;
2455 int rem, err;
2456
2457 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2458 const struct rtnl_af_ops *af_ops;
2459
5fa85a09 2460 af_ops = rtnl_af_lookup(nla_type(af));
a100243d 2461 if (!af_ops)
cf7afbfe
TG
2462 return -EAFNOSUPPORT;
2463
a100243d 2464 if (!af_ops->set_link_af)
cf7afbfe
TG
2465 return -EOPNOTSUPP;
2466
2467 if (af_ops->validate_link_af) {
8679c31e 2468 err = af_ops->validate_link_af(dev, af, extack);
a100243d 2469 if (err < 0)
cf7afbfe
TG
2470 return err;
2471 }
2472 }
2473 }
2474
1840bb13
TG
2475 return 0;
2476}
2477
cc8e27cc
EC
2478static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2479 int guid_type)
2480{
2481 const struct net_device_ops *ops = dev->netdev_ops;
2482
2483 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2484}
2485
2486static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2487{
2488 if (dev->type != ARPHRD_INFINIBAND)
2489 return -EOPNOTSUPP;
2490
2491 return handle_infiniband_guid(dev, ivt, guid_type);
2492}
2493
4f7d2cdf 2494static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
c02db8c6 2495{
c02db8c6 2496 const struct net_device_ops *ops = dev->netdev_ops;
4f7d2cdf 2497 int err = -EINVAL;
c02db8c6 2498
4f7d2cdf
DB
2499 if (tb[IFLA_VF_MAC]) {
2500 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
01a3d796 2501
ff08ddba
DC
2502 if (ivm->vf >= INT_MAX)
2503 return -EINVAL;
4f7d2cdf
DB
2504 err = -EOPNOTSUPP;
2505 if (ops->ndo_set_vf_mac)
2506 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2507 ivm->mac);
2508 if (err < 0)
2509 return err;
2510 }
2511
2512 if (tb[IFLA_VF_VLAN]) {
2513 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2514
ff08ddba
DC
2515 if (ivv->vf >= INT_MAX)
2516 return -EINVAL;
4f7d2cdf
DB
2517 err = -EOPNOTSUPP;
2518 if (ops->ndo_set_vf_vlan)
2519 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
79aab093
MS
2520 ivv->qos,
2521 htons(ETH_P_8021Q));
2522 if (err < 0)
2523 return err;
2524 }
2525
2526 if (tb[IFLA_VF_VLAN_LIST]) {
2527 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2528 struct nlattr *attr;
2529 int rem, len = 0;
2530
2531 err = -EOPNOTSUPP;
2532 if (!ops->ndo_set_vf_vlan)
2533 return err;
2534
2535 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2536 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2537 nla_len(attr) < NLA_HDRLEN) {
2538 return -EINVAL;
2539 }
2540 if (len >= MAX_VLAN_LIST_LEN)
2541 return -EOPNOTSUPP;
2542 ivvl[len] = nla_data(attr);
2543
2544 len++;
2545 }
fa34cd94
AB
2546 if (len == 0)
2547 return -EINVAL;
2548
ff08ddba
DC
2549 if (ivvl[0]->vf >= INT_MAX)
2550 return -EINVAL;
79aab093
MS
2551 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2552 ivvl[0]->qos, ivvl[0]->vlan_proto);
4f7d2cdf
DB
2553 if (err < 0)
2554 return err;
c02db8c6 2555 }
4f7d2cdf
DB
2556
2557 if (tb[IFLA_VF_TX_RATE]) {
2558 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2559 struct ifla_vf_info ivf;
2560
ff08ddba
DC
2561 if (ivt->vf >= INT_MAX)
2562 return -EINVAL;
4f7d2cdf
DB
2563 err = -EOPNOTSUPP;
2564 if (ops->ndo_get_vf_config)
2565 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2566 if (err < 0)
2567 return err;
2568
a14857c2
BC
2569 err = rtnl_set_vf_rate(dev, ivt->vf,
2570 ivf.min_tx_rate, ivt->rate);
4f7d2cdf
DB
2571 if (err < 0)
2572 return err;
2573 }
2574
2575 if (tb[IFLA_VF_RATE]) {
2576 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2577
ff08ddba
DC
2578 if (ivt->vf >= INT_MAX)
2579 return -EINVAL;
a14857c2
BC
2580
2581 err = rtnl_set_vf_rate(dev, ivt->vf,
2582 ivt->min_tx_rate, ivt->max_tx_rate);
4f7d2cdf
DB
2583 if (err < 0)
2584 return err;
2585 }
2586
2587 if (tb[IFLA_VF_SPOOFCHK]) {
2588 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2589
ff08ddba
DC
2590 if (ivs->vf >= INT_MAX)
2591 return -EINVAL;
4f7d2cdf
DB
2592 err = -EOPNOTSUPP;
2593 if (ops->ndo_set_vf_spoofchk)
2594 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2595 ivs->setting);
2596 if (err < 0)
2597 return err;
2598 }
2599
2600 if (tb[IFLA_VF_LINK_STATE]) {
2601 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2602
ff08ddba
DC
2603 if (ivl->vf >= INT_MAX)
2604 return -EINVAL;
4f7d2cdf
DB
2605 err = -EOPNOTSUPP;
2606 if (ops->ndo_set_vf_link_state)
2607 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2608 ivl->link_state);
2609 if (err < 0)
2610 return err;
2611 }
2612
2613 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2614 struct ifla_vf_rss_query_en *ivrssq_en;
2615
2616 err = -EOPNOTSUPP;
2617 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
ff08ddba
DC
2618 if (ivrssq_en->vf >= INT_MAX)
2619 return -EINVAL;
4f7d2cdf
DB
2620 if (ops->ndo_set_vf_rss_query_en)
2621 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2622 ivrssq_en->setting);
2623 if (err < 0)
2624 return err;
2625 }
2626
dd461d6a
HS
2627 if (tb[IFLA_VF_TRUST]) {
2628 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2629
ff08ddba
DC
2630 if (ivt->vf >= INT_MAX)
2631 return -EINVAL;
dd461d6a
HS
2632 err = -EOPNOTSUPP;
2633 if (ops->ndo_set_vf_trust)
2634 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2635 if (err < 0)
2636 return err;
2637 }
2638
cc8e27cc
EC
2639 if (tb[IFLA_VF_IB_NODE_GUID]) {
2640 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2641
ff08ddba
DC
2642 if (ivt->vf >= INT_MAX)
2643 return -EINVAL;
cc8e27cc
EC
2644 if (!ops->ndo_set_vf_guid)
2645 return -EOPNOTSUPP;
cc8e27cc
EC
2646 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2647 }
2648
2649 if (tb[IFLA_VF_IB_PORT_GUID]) {
2650 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2651
ff08ddba
DC
2652 if (ivt->vf >= INT_MAX)
2653 return -EINVAL;
cc8e27cc
EC
2654 if (!ops->ndo_set_vf_guid)
2655 return -EOPNOTSUPP;
2656
2657 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2658 }
2659
c02db8c6
CW
2660 return err;
2661}
2662
33eaf2a6
DA
2663static int do_set_master(struct net_device *dev, int ifindex,
2664 struct netlink_ext_ack *extack)
fbaec0ea 2665{
898e5061 2666 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
fbaec0ea
JP
2667 const struct net_device_ops *ops;
2668 int err;
2669
898e5061
JP
2670 if (upper_dev) {
2671 if (upper_dev->ifindex == ifindex)
fbaec0ea 2672 return 0;
898e5061 2673 ops = upper_dev->netdev_ops;
fbaec0ea 2674 if (ops->ndo_del_slave) {
898e5061 2675 err = ops->ndo_del_slave(upper_dev, dev);
fbaec0ea
JP
2676 if (err)
2677 return err;
2678 } else {
2679 return -EOPNOTSUPP;
2680 }
2681 }
2682
2683 if (ifindex) {
898e5061
JP
2684 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2685 if (!upper_dev)
fbaec0ea 2686 return -EINVAL;
898e5061 2687 ops = upper_dev->netdev_ops;
fbaec0ea 2688 if (ops->ndo_add_slave) {
33eaf2a6 2689 err = ops->ndo_add_slave(upper_dev, dev, extack);
fbaec0ea
JP
2690 if (err)
2691 return err;
2692 } else {
2693 return -EOPNOTSUPP;
2694 }
2695 }
2696 return 0;
2697}
2698
829eb208
RP
2699static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2700 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2701 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2702};
2703
2704static int do_set_proto_down(struct net_device *dev,
2705 struct nlattr *nl_proto_down,
2706 struct nlattr *nl_proto_down_reason,
2707 struct netlink_ext_ack *extack)
2708{
2709 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
829eb208
RP
2710 unsigned long mask = 0;
2711 u32 value;
2712 bool proto_down;
2713 int err;
2714
2106efda 2715 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
829eb208
RP
2716 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2717 return -EOPNOTSUPP;
2718 }
2719
2720 if (nl_proto_down_reason) {
2721 err = nla_parse_nested_deprecated(pdreason,
2722 IFLA_PROTO_DOWN_REASON_MAX,
2723 nl_proto_down_reason,
2724 ifla_proto_down_reason_policy,
2725 NULL);
2726 if (err < 0)
2727 return err;
2728
2729 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2730 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2731 return -EINVAL;
2732 }
2733
2734 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2735
2736 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2737 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2738
2739 dev_change_proto_down_reason(dev, mask, value);
2740 }
2741
2742 if (nl_proto_down) {
2743 proto_down = nla_get_u8(nl_proto_down);
2744
d467d0bc 2745 /* Don't turn off protodown if there are active reasons */
829eb208
RP
2746 if (!proto_down && dev->proto_down_reason) {
2747 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2748 return -EBUSY;
2749 }
2750 err = dev_change_proto_down(dev,
2751 proto_down);
2752 if (err)
2753 return err;
2754 }
2755
2756 return 0;
2757}
2758
90c325e3 2759#define DO_SETLINK_MODIFIED 0x01
ba998906
ND
2760/* notify flag means notify + modified. */
2761#define DO_SETLINK_NOTIFY 0x03
90f62cf3
EB
2762static int do_setlink(const struct sk_buff *skb,
2763 struct net_device *dev, struct ifinfomsg *ifm,
ddf9f970 2764 struct netlink_ext_ack *extack,
5ea08b52 2765 struct nlattr **tb, int status)
1da177e4 2766{
d314774c 2767 const struct net_device_ops *ops = dev->netdev_ops;
5ea08b52 2768 char ifname[IFNAMSIZ];
0157f60c 2769 int err;
1da177e4 2770
5ea08b52
FF
2771 if (tb[IFLA_IFNAME])
2772 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2773 else
2774 ifname[0] = '\0';
2775
7e4a8d5a 2776 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
5ea08b52 2777 const char *pat = ifname[0] ? ifname : NULL;
eeb85a14
AV
2778 struct net *net;
2779 int new_ifindex;
2780
2781 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2782 tb, CAP_NET_ADMIN);
d8a5ec67
EB
2783 if (IS_ERR(net)) {
2784 err = PTR_ERR(net);
2785 goto errout;
2786 }
7c4f63ba 2787
eeb85a14
AV
2788 if (tb[IFLA_NEW_IFINDEX])
2789 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2790 else
2791 new_ifindex = 0;
2792
96a6b93b 2793 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
d8a5ec67
EB
2794 put_net(net);
2795 if (err)
2796 goto errout;
90c325e3 2797 status |= DO_SETLINK_MODIFIED;
d8a5ec67
EB
2798 }
2799
da5e0494 2800 if (tb[IFLA_MAP]) {
1da177e4
LT
2801 struct rtnl_link_ifmap *u_map;
2802 struct ifmap k_map;
2803
d314774c 2804 if (!ops->ndo_set_config) {
1da177e4 2805 err = -EOPNOTSUPP;
0157f60c 2806 goto errout;
1da177e4
LT
2807 }
2808
2809 if (!netif_device_present(dev)) {
2810 err = -ENODEV;
0157f60c 2811 goto errout;
1da177e4 2812 }
1da177e4 2813
da5e0494 2814 u_map = nla_data(tb[IFLA_MAP]);
1da177e4
LT
2815 k_map.mem_start = (unsigned long) u_map->mem_start;
2816 k_map.mem_end = (unsigned long) u_map->mem_end;
2817 k_map.base_addr = (unsigned short) u_map->base_addr;
2818 k_map.irq = (unsigned char) u_map->irq;
2819 k_map.dma = (unsigned char) u_map->dma;
2820 k_map.port = (unsigned char) u_map->port;
2821
d314774c 2822 err = ops->ndo_set_config(dev, &k_map);
da5e0494 2823 if (err < 0)
0157f60c 2824 goto errout;
1da177e4 2825
ba998906 2826 status |= DO_SETLINK_NOTIFY;
1da177e4
LT
2827 }
2828
da5e0494 2829 if (tb[IFLA_ADDRESS]) {
70f8e78e
DM
2830 struct sockaddr *sa;
2831 int len;
2832
153711f9
WC
2833 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2834 sizeof(*sa));
70f8e78e
DM
2835 sa = kmalloc(len, GFP_KERNEL);
2836 if (!sa) {
2837 err = -ENOMEM;
0157f60c 2838 goto errout;
70f8e78e
DM
2839 }
2840 sa->sa_family = dev->type;
da5e0494 2841 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
70f8e78e 2842 dev->addr_len);
3b23a32a 2843 err = dev_set_mac_address_user(dev, sa, extack);
70f8e78e 2844 kfree(sa);
1da177e4 2845 if (err)
0157f60c 2846 goto errout;
90c325e3 2847 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2848 }
2849
da5e0494 2850 if (tb[IFLA_MTU]) {
7a4c53be 2851 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
da5e0494 2852 if (err < 0)
0157f60c 2853 goto errout;
90c325e3 2854 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2855 }
2856
cbda10fa
VD
2857 if (tb[IFLA_GROUP]) {
2858 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
ba998906 2859 status |= DO_SETLINK_NOTIFY;
cbda10fa
VD
2860 }
2861
da5e0494
TG
2862 /*
2863 * Interface selected by interface index but interface
2864 * name provided implies that a name change has been
2865 * requested.
2866 */
51055be8 2867 if (ifm->ifi_index > 0 && ifname[0]) {
da5e0494
TG
2868 err = dev_change_name(dev, ifname);
2869 if (err < 0)
0157f60c 2870 goto errout;
90c325e3 2871 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2872 }
2873
0b815a1a
SH
2874 if (tb[IFLA_IFALIAS]) {
2875 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2876 nla_len(tb[IFLA_IFALIAS]));
2877 if (err < 0)
2878 goto errout;
ba998906 2879 status |= DO_SETLINK_NOTIFY;
0b815a1a
SH
2880 }
2881
da5e0494
TG
2882 if (tb[IFLA_BROADCAST]) {
2883 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
e7c3273e 2884 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
2885 }
2886
a4abfa62
PS
2887 if (ifm->ifi_flags || ifm->ifi_change) {
2888 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2889 extack);
2890 if (err < 0)
2891 goto errout;
2892 }
2893
ec4ffd10
ND
2894 if (tb[IFLA_MASTER]) {
2895 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2896 if (err)
2897 goto errout;
2898 status |= DO_SETLINK_MODIFIED;
2899 }
2900
9a57247f
JP
2901 if (tb[IFLA_CARRIER]) {
2902 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2903 if (err)
2904 goto errout;
90c325e3 2905 status |= DO_SETLINK_MODIFIED;
9a57247f
JP
2906 }
2907
5d1180fc 2908 if (tb[IFLA_TXQLEN]) {
0cd29503 2909 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
6a643ddb
CW
2910
2911 err = dev_change_tx_queue_len(dev, value);
2912 if (err)
2913 goto errout;
2914 status |= DO_SETLINK_MODIFIED;
5d1180fc 2915 }
b00055aa 2916
46e6b992
SH
2917 if (tb[IFLA_GSO_MAX_SIZE]) {
2918 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2919
46e6b992
SH
2920 if (dev->gso_max_size ^ max_size) {
2921 netif_set_gso_max_size(dev, max_size);
2922 status |= DO_SETLINK_MODIFIED;
2923 }
2924 }
2925
2926 if (tb[IFLA_GSO_MAX_SEGS]) {
2927 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2928
46e6b992 2929 if (dev->gso_max_segs ^ max_segs) {
6d872df3 2930 netif_set_gso_max_segs(dev, max_segs);
46e6b992
SH
2931 status |= DO_SETLINK_MODIFIED;
2932 }
2933 }
2934
eac1b93c
CL
2935 if (tb[IFLA_GRO_MAX_SIZE]) {
2936 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2937
2938 if (dev->gro_max_size ^ gro_max_size) {
2939 netif_set_gro_max_size(dev, gro_max_size);
2940 status |= DO_SETLINK_MODIFIED;
2941 }
2942 }
2943
9eefedd5
XL
2944 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2945 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2946
9eefedd5
XL
2947 if (dev->gso_ipv4_max_size ^ max_size) {
2948 netif_set_gso_ipv4_max_size(dev, max_size);
2949 status |= DO_SETLINK_MODIFIED;
2950 }
2951 }
2952
2953 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2954 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2955
2956 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2957 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2958 status |= DO_SETLINK_MODIFIED;
2959 }
2960 }
2961
da5e0494 2962 if (tb[IFLA_OPERSTATE])
93b2d4a2 2963 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
b00055aa 2964
da5e0494 2965 if (tb[IFLA_LINKMODE]) {
1889b0e7
ND
2966 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2967
1889b0e7 2968 if (dev->link_mode ^ value)
ba998906 2969 status |= DO_SETLINK_NOTIFY;
a6473fe9 2970 WRITE_ONCE(dev->link_mode, value);
b00055aa
SR
2971 }
2972
c02db8c6 2973 if (tb[IFLA_VFINFO_LIST]) {
4f7d2cdf 2974 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
c02db8c6
CW
2975 struct nlattr *attr;
2976 int rem;
4f7d2cdf 2977
c02db8c6 2978 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
4f7d2cdf
DB
2979 if (nla_type(attr) != IFLA_VF_INFO ||
2980 nla_len(attr) < NLA_HDRLEN) {
253683bb 2981 err = -EINVAL;
c02db8c6 2982 goto errout;
253683bb 2983 }
8cb08174
JB
2984 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2985 attr,
2986 ifla_vf_policy,
2987 NULL);
4f7d2cdf
DB
2988 if (err < 0)
2989 goto errout;
2990 err = do_setvfinfo(dev, vfinfo);
c02db8c6
CW
2991 if (err < 0)
2992 goto errout;
ba998906 2993 status |= DO_SETLINK_NOTIFY;
c02db8c6 2994 }
ebc08a6f 2995 }
1da177e4
LT
2996 err = 0;
2997
57b61080
SF
2998 if (tb[IFLA_VF_PORTS]) {
2999 struct nlattr *port[IFLA_PORT_MAX+1];
3000 struct nlattr *attr;
3001 int vf;
3002 int rem;
3003
3004 err = -EOPNOTSUPP;
3005 if (!ops->ndo_set_vf_port)
3006 goto errout;
3007
3008 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
035d210f
DB
3009 if (nla_type(attr) != IFLA_VF_PORT ||
3010 nla_len(attr) < NLA_HDRLEN) {
3011 err = -EINVAL;
3012 goto errout;
3013 }
8cb08174
JB
3014 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3015 attr,
3016 ifla_port_policy,
3017 NULL);
57b61080
SF
3018 if (err < 0)
3019 goto errout;
3020 if (!port[IFLA_PORT_VF]) {
3021 err = -EOPNOTSUPP;
3022 goto errout;
3023 }
3024 vf = nla_get_u32(port[IFLA_PORT_VF]);
3025 err = ops->ndo_set_vf_port(dev, vf, port);
3026 if (err < 0)
3027 goto errout;
ba998906 3028 status |= DO_SETLINK_NOTIFY;
57b61080
SF
3029 }
3030 }
3031 err = 0;
3032
3033 if (tb[IFLA_PORT_SELF]) {
3034 struct nlattr *port[IFLA_PORT_MAX+1];
3035
8cb08174
JB
3036 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3037 tb[IFLA_PORT_SELF],
3038 ifla_port_policy, NULL);
57b61080
SF
3039 if (err < 0)
3040 goto errout;
3041
3042 err = -EOPNOTSUPP;
3043 if (ops->ndo_set_vf_port)
3044 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3045 if (err < 0)
3046 goto errout;
ba998906 3047 status |= DO_SETLINK_NOTIFY;
57b61080 3048 }
f8ff182c
TG
3049
3050 if (tb[IFLA_AF_SPEC]) {
3051 struct nlattr *af;
3052 int rem;
3053
3054 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3055 const struct rtnl_af_ops *af_ops;
3056
058c8d59 3057 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
f8ff182c 3058
3583a4e8 3059 err = af_ops->set_link_af(dev, af, extack);
a100243d 3060 if (err < 0)
f8ff182c
TG
3061 goto errout;
3062
ba998906 3063 status |= DO_SETLINK_NOTIFY;
f8ff182c
TG
3064 }
3065 }
57b61080
SF
3066 err = 0;
3067
829eb208
RP
3068 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3069 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3070 tb[IFLA_PROTO_DOWN_REASON], extack);
88d6378b
AK
3071 if (err)
3072 goto errout;
3073 status |= DO_SETLINK_NOTIFY;
3074 }
3075
d1fdd913
BB
3076 if (tb[IFLA_XDP]) {
3077 struct nlattr *xdp[IFLA_XDP_MAX + 1];
85de8576 3078 u32 xdp_flags = 0;
d1fdd913 3079
8cb08174
JB
3080 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3081 tb[IFLA_XDP],
3082 ifla_xdp_policy, NULL);
d1fdd913
BB
3083 if (err < 0)
3084 goto errout;
3085
58038695 3086 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
262d8625
BB
3087 err = -EINVAL;
3088 goto errout;
3089 }
85de8576
DB
3090
3091 if (xdp[IFLA_XDP_FLAGS]) {
3092 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3093 if (xdp_flags & ~XDP_FLAGS_MASK) {
3094 err = -EINVAL;
3095 goto errout;
3096 }
ee5d032f 3097 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
0489df9a
DB
3098 err = -EINVAL;
3099 goto errout;
3100 }
85de8576
DB
3101 }
3102
d1fdd913 3103 if (xdp[IFLA_XDP_FD]) {
92234c8f
THJ
3104 int expected_fd = -1;
3105
3106 if (xdp_flags & XDP_FLAGS_REPLACE) {
3107 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3108 err = -EINVAL;
3109 goto errout;
3110 }
3111 expected_fd =
3112 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3113 }
3114
ddf9f970 3115 err = dev_change_xdp_fd(dev, extack,
85de8576 3116 nla_get_s32(xdp[IFLA_XDP_FD]),
92234c8f 3117 expected_fd,
85de8576 3118 xdp_flags);
d1fdd913
BB
3119 if (err)
3120 goto errout;
3121 status |= DO_SETLINK_NOTIFY;
3122 }
3123 }
3124
0157f60c 3125errout:
ba998906 3126 if (status & DO_SETLINK_MODIFIED) {
64ff90cc 3127 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
ba998906
ND
3128 netdev_state_change(dev);
3129
3130 if (err < 0)
3131 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3132 dev->name);
3133 }
da5e0494 3134
0157f60c
PM
3135 return err;
3136}
1da177e4 3137
cc6090e9 3138static struct net_device *rtnl_dev_get(struct net *net,
5ea08b52
FF
3139 struct nlattr *tb[])
3140{
3141 char ifname[ALTIFNAMSIZ];
3142
3143 if (tb[IFLA_IFNAME])
3144 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3145 else if (tb[IFLA_ALT_IFNAME])
3146 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3147 else
3148 return NULL;
cc6090e9
JP
3149
3150 return __dev_get_by_name(net, ifname);
3151}
3152
c21ef3e3
DA
3153static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3154 struct netlink_ext_ack *extack)
0157f60c 3155{
3b1e0a65 3156 struct net *net = sock_net(skb->sk);
0157f60c
PM
3157 struct ifinfomsg *ifm;
3158 struct net_device *dev;
3159 int err;
3160 struct nlattr *tb[IFLA_MAX+1];
0157f60c 3161
8cb08174
JB
3162 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3163 ifla_policy, extack);
0157f60c
PM
3164 if (err < 0)
3165 goto errout;
3166
4ff66cae
CB
3167 err = rtnl_ensure_unique_netns(tb, extack, false);
3168 if (err < 0)
3169 goto errout;
3170
0157f60c
PM
3171 err = -EINVAL;
3172 ifm = nlmsg_data(nlh);
3173 if (ifm->ifi_index > 0)
a3d12891 3174 dev = __dev_get_by_index(net, ifm->ifi_index);
76c9ac0e 3175 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3176 dev = rtnl_dev_get(net, tb);
0157f60c
PM
3177 else
3178 goto errout;
3179
3180 if (dev == NULL) {
3181 err = -ENODEV;
3182 goto errout;
3183 }
3184
89da780a
XL
3185 err = validate_linkmsg(dev, tb, extack);
3186 if (err < 0)
3187 goto errout;
3188
5ea08b52 3189 err = do_setlink(skb, dev, ifm, extack, tb, 0);
da5e0494 3190errout:
1da177e4
LT
3191 return err;
3192}
3193
66400d54
WC
3194static int rtnl_group_dellink(const struct net *net, int group)
3195{
3196 struct net_device *dev, *aux;
3197 LIST_HEAD(list_kill);
3198 bool found = false;
3199
3200 if (!group)
3201 return -EPERM;
3202
3203 for_each_netdev(net, dev) {
3204 if (dev->group == group) {
3205 const struct rtnl_link_ops *ops;
3206
3207 found = true;
3208 ops = dev->rtnl_link_ops;
3209 if (!ops || !ops->dellink)
3210 return -EOPNOTSUPP;
3211 }
3212 }
3213
3214 if (!found)
3215 return -ENODEV;
3216
3217 for_each_netdev_safe(net, dev, aux) {
3218 if (dev->group == group) {
3219 const struct rtnl_link_ops *ops;
3220
3221 ops = dev->rtnl_link_ops;
3222 ops->dellink(dev, &list_kill);
3223 }
3224 }
3225 unregister_netdevice_many(&list_kill);
3226
3227 return 0;
3228}
3229
f3a63cce 3230int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
614732ea
TG
3231{
3232 const struct rtnl_link_ops *ops;
3233 LIST_HEAD(list_kill);
3234
3235 ops = dev->rtnl_link_ops;
3236 if (!ops || !ops->dellink)
3237 return -EOPNOTSUPP;
3238
3239 ops->dellink(dev, &list_kill);
f3a63cce 3240 unregister_netdevice_many_notify(&list_kill, portid, nlh);
614732ea
TG
3241
3242 return 0;
3243}
3244EXPORT_SYMBOL_GPL(rtnl_delete_link);
3245
c21ef3e3
DA
3246static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3247 struct netlink_ext_ack *extack)
38f7b870 3248{
3b1e0a65 3249 struct net *net = sock_net(skb->sk);
f3a63cce 3250 u32 portid = NETLINK_CB(skb).portid;
b61ad68a
CB
3251 struct net *tgt_net = net;
3252 struct net_device *dev = NULL;
38f7b870 3253 struct ifinfomsg *ifm;
38f7b870
PM
3254 struct nlattr *tb[IFLA_MAX+1];
3255 int err;
b61ad68a 3256 int netnsid = -1;
38f7b870 3257
8cb08174
JB
3258 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3259 ifla_policy, extack);
38f7b870
PM
3260 if (err < 0)
3261 return err;
3262
4ff66cae
CB
3263 err = rtnl_ensure_unique_netns(tb, extack, true);
3264 if (err < 0)
3265 return err;
3266
7e4a8d5a
CB
3267 if (tb[IFLA_TARGET_NETNSID]) {
3268 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
c383edc4 3269 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
b61ad68a
CB
3270 if (IS_ERR(tgt_net))
3271 return PTR_ERR(tgt_net);
3272 }
3273
3274 err = -EINVAL;
38f7b870
PM
3275 ifm = nlmsg_data(nlh);
3276 if (ifm->ifi_index > 0)
b61ad68a 3277 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
76c9ac0e 3278 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3279 dev = rtnl_dev_get(net, tb);
66400d54 3280 else if (tb[IFLA_GROUP])
b61ad68a 3281 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
38f7b870 3282 else
b61ad68a 3283 goto out;
38f7b870 3284
b61ad68a 3285 if (!dev) {
dee04163 3286 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
b61ad68a
CB
3287 err = -ENODEV;
3288
3289 goto out;
3290 }
3291
f3a63cce 3292 err = rtnl_delete_link(dev, portid, nlh);
38f7b870 3293
b61ad68a
CB
3294out:
3295 if (netnsid >= 0)
3296 put_net(tgt_net);
3297
3298 return err;
38f7b870
PM
3299}
3300
1d997f10
HL
3301int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3302 u32 portid, const struct nlmsghdr *nlh)
3729d502
PM
3303{
3304 unsigned int old_flags;
3305 int err;
3306
3307 old_flags = dev->flags;
3308 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
6d040321
PM
3309 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3310 NULL);
3729d502
PM
3311 if (err < 0)
3312 return err;
3313 }
3314
8d356b89 3315 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
1d997f10 3316 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
8d356b89
RP
3317 } else {
3318 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1d997f10 3319 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
8d356b89 3320 }
3729d502
PM
3321 return 0;
3322}
3323EXPORT_SYMBOL(rtnl_configure_link);
3324
d0522f1c
DA
3325struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3326 unsigned char name_assign_type,
3327 const struct rtnl_link_ops *ops,
3328 struct nlattr *tb[],
3329 struct netlink_ext_ack *extack)
e7199288 3330{
e7199288 3331 struct net_device *dev;
d40156aa
JP
3332 unsigned int num_tx_queues = 1;
3333 unsigned int num_rx_queues = 1;
b0ad3c17 3334 int err;
e7199288 3335
76ff5cc9
JP
3336 if (tb[IFLA_NUM_TX_QUEUES])
3337 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3338 else if (ops->get_num_tx_queues)
d40156aa 3339 num_tx_queues = ops->get_num_tx_queues();
76ff5cc9
JP
3340
3341 if (tb[IFLA_NUM_RX_QUEUES])
3342 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3343 else if (ops->get_num_rx_queues)
d40156aa 3344 num_rx_queues = ops->get_num_rx_queues();
efacb309 3345
d0522f1c
DA
3346 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3347 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
0e1d6eca 3348 return ERR_PTR(-EINVAL);
d0522f1c 3349 }
0e1d6eca 3350
d0522f1c
DA
3351 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3352 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
0e1d6eca 3353 return ERR_PTR(-EINVAL);
d0522f1c 3354 }
0e1d6eca 3355
8c713dc9
JB
3356 if (ops->alloc) {
3357 dev = ops->alloc(tb, ifname, name_assign_type,
3358 num_tx_queues, num_rx_queues);
3359 if (IS_ERR(dev))
3360 return dev;
3361 } else {
3362 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3363 name_assign_type, ops->setup,
3364 num_tx_queues, num_rx_queues);
3365 }
3366
e7199288 3367 if (!dev)
d1892e4e 3368 return ERR_PTR(-ENOMEM);
e7199288 3369
b0ad3c17
XL
3370 err = validate_linkmsg(dev, tb, extack);
3371 if (err < 0) {
3372 free_netdev(dev);
3373 return ERR_PTR(err);
3374 }
3375
81adee47
EB
3376 dev_net_set(dev, net);
3377 dev->rtnl_link_ops = ops;
3729d502 3378 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
81adee47 3379
d836f5c6
ED
3380 if (tb[IFLA_MTU]) {
3381 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
d836f5c6
ED
3382
3383 err = dev_validate_mtu(dev, mtu, extack);
3384 if (err) {
3385 free_netdev(dev);
3386 return ERR_PTR(err);
3387 }
3388 dev->mtu = mtu;
3389 }
2afb9b53 3390 if (tb[IFLA_ADDRESS]) {
efd38f75
JK
3391 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3392 nla_len(tb[IFLA_ADDRESS]));
2afb9b53
JP
3393 dev->addr_assign_type = NET_ADDR_SET;
3394 }
e7199288
PE
3395 if (tb[IFLA_BROADCAST])
3396 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3397 nla_len(tb[IFLA_BROADCAST]));
3398 if (tb[IFLA_TXQLEN])
3399 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3400 if (tb[IFLA_OPERSTATE])
93b2d4a2 3401 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
e7199288
PE
3402 if (tb[IFLA_LINKMODE])
3403 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
ffa934f1
PM
3404 if (tb[IFLA_GROUP])
3405 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
46e6b992
SH
3406 if (tb[IFLA_GSO_MAX_SIZE])
3407 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3408 if (tb[IFLA_GSO_MAX_SEGS])
6d872df3 3409 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
eac1b93c
CL
3410 if (tb[IFLA_GRO_MAX_SIZE])
3411 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
9eefedd5
XL
3412 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3413 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3414 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3415 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
e7199288
PE
3416
3417 return dev;
e7199288 3418}
e0d087af 3419EXPORT_SYMBOL(rtnl_create_link);
e7199288 3420
90f62cf3
EB
3421static int rtnl_group_changelink(const struct sk_buff *skb,
3422 struct net *net, int group,
e7ed828f 3423 struct ifinfomsg *ifm,
ddf9f970 3424 struct netlink_ext_ack *extack,
e7ed828f
VD
3425 struct nlattr **tb)
3426{
d079535d 3427 struct net_device *dev, *aux;
e7ed828f
VD
3428 int err;
3429
d079535d 3430 for_each_netdev_safe(net, dev, aux) {
e7ed828f 3431 if (dev->group == group) {
89da780a
XL
3432 err = validate_linkmsg(dev, tb, extack);
3433 if (err < 0)
3434 return err;
5ea08b52 3435 err = do_setlink(skb, dev, ifm, extack, tb, 0);
e7ed828f
VD
3436 if (err < 0)
3437 return err;
3438 }
3439 }
3440
3441 return 0;
3442}
3443
63105e83
JK
3444static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3445 const struct rtnl_link_ops *ops,
d88e136c 3446 const struct nlmsghdr *nlh,
63105e83 3447 struct nlattr **tb, struct nlattr **data,
02839cc8
JK
3448 struct netlink_ext_ack *extack)
3449{
3450 unsigned char name_assign_type = NET_NAME_USER;
3451 struct net *net = sock_net(skb->sk);
d88e136c 3452 u32 portid = NETLINK_CB(skb).portid;
02839cc8
JK
3453 struct net *dest_net, *link_net;
3454 struct net_device *dev;
3455 char ifname[IFNAMSIZ];
3456 int err;
3457
3458 if (!ops->alloc && !ops->setup)
3459 return -EOPNOTSUPP;
3460
3461 if (tb[IFLA_IFNAME]) {
3462 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3463 } else {
3464 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3465 name_assign_type = NET_NAME_ENUM;
3466 }
3467
3468 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3469 if (IS_ERR(dest_net))
3470 return PTR_ERR(dest_net);
3471
3472 if (tb[IFLA_LINK_NETNSID]) {
3473 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3474
3475 link_net = get_net_ns_by_id(dest_net, id);
3476 if (!link_net) {
3477 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3478 err = -EINVAL;
3479 goto out;
3480 }
3481 err = -EPERM;
3482 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3483 goto out;
3484 } else {
3485 link_net = NULL;
3486 }
3487
3488 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3489 name_assign_type, ops, tb, extack);
3490 if (IS_ERR(dev)) {
3491 err = PTR_ERR(dev);
3492 goto out;
3493 }
3494
3495 dev->ifindex = ifm->ifi_index;
3496
3497 if (ops->newlink)
3498 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3499 else
3500 err = register_netdevice(dev);
3501 if (err < 0) {
3502 free_netdev(dev);
3503 goto out;
3504 }
3505
d88e136c 3506 err = rtnl_configure_link(dev, ifm, portid, nlh);
02839cc8
JK
3507 if (err < 0)
3508 goto out_unregister;
3509 if (link_net) {
3510 err = dev_change_net_namespace(dev, dest_net, ifname);
3511 if (err < 0)
3512 goto out_unregister;
3513 }
3514 if (tb[IFLA_MASTER]) {
3515 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3516 if (err)
3517 goto out_unregister;
3518 }
3519out:
3520 if (link_net)
3521 put_net(link_net);
3522 put_net(dest_net);
3523 return err;
3524out_unregister:
3525 if (ops->newlink) {
3526 LIST_HEAD(list_kill);
3527
3528 ops->dellink(dev, &list_kill);
3529 unregister_netdevice_many(&list_kill);
3530 } else {
3531 unregister_netdevice(dev);
3532 }
3533 goto out;
3534}
63105e83 3535
c92bf26c
JK
3536struct rtnl_newlink_tbs {
3537 struct nlattr *tb[IFLA_MAX + 1];
3538 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3539 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3540};
3541
a2939745 3542static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
c92bf26c
JK
3543 struct rtnl_newlink_tbs *tbs,
3544 struct netlink_ext_ack *extack)
38f7b870 3545{
420d0318 3546 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
c92bf26c 3547 struct nlattr ** const tb = tbs->tb;
c6f6f244
ED
3548 const struct rtnl_link_ops *m_ops;
3549 struct net_device *master_dev;
3b1e0a65 3550 struct net *net = sock_net(skb->sk);
38f7b870 3551 const struct rtnl_link_ops *ops;
420d0318
JK
3552 struct nlattr **slave_data;
3553 char kind[MODULE_NAME_LEN];
38f7b870
PM
3554 struct net_device *dev;
3555 struct ifinfomsg *ifm;
420d0318 3556 struct nlattr **data;
ef2a7c90 3557 bool link_specified;
38f7b870
PM
3558 int err;
3559
95a5afca 3560#ifdef CONFIG_MODULES
38f7b870 3561replay:
8072f085 3562#endif
8cb08174
JB
3563 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3564 ifla_policy, extack);
38f7b870
PM
3565 if (err < 0)
3566 return err;
3567
4ff66cae
CB
3568 err = rtnl_ensure_unique_netns(tb, extack, false);
3569 if (err < 0)
3570 return err;
3571
38f7b870 3572 ifm = nlmsg_data(nlh);
ef2a7c90
FF
3573 if (ifm->ifi_index > 0) {
3574 link_specified = true;
881d966b 3575 dev = __dev_get_by_index(net, ifm->ifi_index);
30188bd7
IS
3576 } else if (ifm->ifi_index < 0) {
3577 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3578 return -EINVAL;
ef2a7c90
FF
3579 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3580 link_specified = true;
5ea08b52 3581 dev = rtnl_dev_get(net, tb);
ef2a7c90
FF
3582 } else {
3583 link_specified = false;
7af12cba 3584 dev = NULL;
ef2a7c90 3585 }
38f7b870 3586
c6f6f244
ED
3587 master_dev = NULL;
3588 m_ops = NULL;
ba7d49b1
JP
3589 if (dev) {
3590 master_dev = netdev_master_upper_dev_get(dev);
3591 if (master_dev)
3592 m_ops = master_dev->rtnl_link_ops;
3593 }
3594
38f7b870 3595 if (tb[IFLA_LINKINFO]) {
8cb08174
JB
3596 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3597 tb[IFLA_LINKINFO],
3598 ifla_info_policy, NULL);
38f7b870
PM
3599 if (err < 0)
3600 return err;
3601 } else
3602 memset(linkinfo, 0, sizeof(linkinfo));
3603
3604 if (linkinfo[IFLA_INFO_KIND]) {
872f6903 3605 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
38f7b870
PM
3606 ops = rtnl_link_ops_get(kind);
3607 } else {
3608 kind[0] = '\0';
3609 ops = NULL;
3610 }
3611
420d0318
JK
3612 data = NULL;
3613 if (ops) {
3614 if (ops->maxtype > RTNL_MAX_TYPE)
3615 return -EINVAL;
ccf8dbcd 3616
420d0318 3617 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
c92bf26c 3618 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
8cb08174
JB
3619 linkinfo[IFLA_INFO_DATA],
3620 ops->policy, extack);
420d0318
JK
3621 if (err < 0)
3622 return err;
c92bf26c 3623 data = tbs->attr;
38f7b870 3624 }
420d0318
JK
3625 if (ops->validate) {
3626 err = ops->validate(tb, data, extack);
3627 if (err < 0)
3628 return err;
3629 }
3630 }
38f7b870 3631
420d0318
JK
3632 slave_data = NULL;
3633 if (m_ops) {
3634 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3635 return -EINVAL;
ccf8dbcd 3636
420d0318
JK
3637 if (m_ops->slave_maxtype &&
3638 linkinfo[IFLA_INFO_SLAVE_DATA]) {
c92bf26c 3639 err = nla_parse_nested_deprecated(tbs->slave_attr,
8cb08174
JB
3640 m_ops->slave_maxtype,
3641 linkinfo[IFLA_INFO_SLAVE_DATA],
3642 m_ops->slave_policy,
3643 extack);
420d0318
JK
3644 if (err < 0)
3645 return err;
c92bf26c 3646 slave_data = tbs->slave_attr;
ba7d49b1 3647 }
420d0318 3648 }
ba7d49b1 3649
420d0318
JK
3650 if (dev) {
3651 int status = 0;
38f7b870 3652
420d0318
JK
3653 if (nlh->nlmsg_flags & NLM_F_EXCL)
3654 return -EEXIST;
3655 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3656 return -EOPNOTSUPP;
38f7b870 3657
89da780a
XL
3658 err = validate_linkmsg(dev, tb, extack);
3659 if (err < 0)
3660 return err;
3661
420d0318
JK
3662 if (linkinfo[IFLA_INFO_DATA]) {
3663 if (!ops || ops != dev->rtnl_link_ops ||
3664 !ops->changelink)
3665 return -EOPNOTSUPP;
38f7b870 3666
420d0318
JK
3667 err = ops->changelink(dev, tb, data, extack);
3668 if (err < 0)
3669 return err;
3670 status |= DO_SETLINK_NOTIFY;
3671 }
ba7d49b1 3672
420d0318
JK
3673 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3674 if (!m_ops || !m_ops->slave_changelink)
3675 return -EOPNOTSUPP;
ba7d49b1 3676
420d0318
JK
3677 err = m_ops->slave_changelink(master_dev, dev, tb,
3678 slave_data, extack);
3679 if (err < 0)
3680 return err;
3681 status |= DO_SETLINK_NOTIFY;
38f7b870
PM
3682 }
3683
5ea08b52 3684 return do_setlink(skb, dev, ifm, extack, tb, status);
420d0318
JK
3685 }
3686
3687 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
ef2a7c90
FF
3688 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3689 * or it's for a group
3690 */
3691 if (link_specified)
3692 return -ENODEV;
3693 if (tb[IFLA_GROUP])
420d0318 3694 return rtnl_group_changelink(skb, net,
ffa934f1 3695 nla_get_u32(tb[IFLA_GROUP]),
ddf9f970 3696 ifm, extack, tb);
6f37c9f9 3697 return -ENODEV;
420d0318 3698 }
38f7b870 3699
420d0318
JK
3700 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3701 return -EOPNOTSUPP;
38f7b870 3702
420d0318 3703 if (!ops) {
95a5afca 3704#ifdef CONFIG_MODULES
420d0318
JK
3705 if (kind[0]) {
3706 __rtnl_unlock();
3707 request_module("rtnl-link-%s", kind);
3708 rtnl_lock();
3709 ops = rtnl_link_ops_get(kind);
3710 if (ops)
3711 goto replay;
38f7b870 3712 }
420d0318
JK
3713#endif
3714 NL_SET_ERR_MSG(extack, "Unknown device type");
3715 return -EOPNOTSUPP;
3716 }
38f7b870 3717
d88e136c 3718 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
63105e83
JK
3719}
3720
a2939745
JK
3721static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3722 struct netlink_ext_ack *extack)
3723{
c92bf26c 3724 struct rtnl_newlink_tbs *tbs;
a2939745
JK
3725 int ret;
3726
c92bf26c
JK
3727 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3728 if (!tbs)
a2939745
JK
3729 return -ENOMEM;
3730
c92bf26c
JK
3731 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3732 kfree(tbs);
a2939745
JK
3733 return ret;
3734}
3735
9b3757b0
JK
3736static int rtnl_valid_getlink_req(struct sk_buff *skb,
3737 const struct nlmsghdr *nlh,
3738 struct nlattr **tb,
3739 struct netlink_ext_ack *extack)
3740{
3741 struct ifinfomsg *ifm;
3742 int i, err;
3743
3744 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3745 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3746 return -EINVAL;
3747 }
3748
3749 if (!netlink_strict_get_check(skb))
8cb08174
JB
3750 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3751 ifla_policy, extack);
9b3757b0
JK
3752
3753 ifm = nlmsg_data(nlh);
3754 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3755 ifm->ifi_change) {
3756 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3757 return -EINVAL;
3758 }
3759
8cb08174
JB
3760 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3761 ifla_policy, extack);
9b3757b0
JK
3762 if (err)
3763 return err;
3764
3765 for (i = 0; i <= IFLA_MAX; i++) {
3766 if (!tb[i])
3767 continue;
3768
3769 switch (i) {
3770 case IFLA_IFNAME:
76c9ac0e 3771 case IFLA_ALT_IFNAME:
9b3757b0
JK
3772 case IFLA_EXT_MASK:
3773 case IFLA_TARGET_NETNSID:
3774 break;
3775 default:
3776 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3777 return -EINVAL;
3778 }
3779 }
3780
3781 return 0;
3782}
3783
c21ef3e3
DA
3784static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3785 struct netlink_ext_ack *extack)
711e2c33 3786{
3b1e0a65 3787 struct net *net = sock_net(skb->sk);
79e1ad14 3788 struct net *tgt_net = net;
b60c5115
TG
3789 struct ifinfomsg *ifm;
3790 struct nlattr *tb[IFLA_MAX+1];
3791 struct net_device *dev = NULL;
3792 struct sk_buff *nskb;
79e1ad14 3793 int netnsid = -1;
339bf98f 3794 int err;
115c9b81 3795 u32 ext_filter_mask = 0;
711e2c33 3796
9b3757b0 3797 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
b60c5115 3798 if (err < 0)
9918f230 3799 return err;
b60c5115 3800
4ff66cae
CB
3801 err = rtnl_ensure_unique_netns(tb, extack, true);
3802 if (err < 0)
3803 return err;
3804
7e4a8d5a
CB
3805 if (tb[IFLA_TARGET_NETNSID]) {
3806 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
c383edc4 3807 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
79e1ad14
JB
3808 if (IS_ERR(tgt_net))
3809 return PTR_ERR(tgt_net);
3810 }
3811
115c9b81
GR
3812 if (tb[IFLA_EXT_MASK])
3813 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3814
79e1ad14 3815 err = -EINVAL;
b60c5115 3816 ifm = nlmsg_data(nlh);
a3d12891 3817 if (ifm->ifi_index > 0)
79e1ad14 3818 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
76c9ac0e 3819 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3820 dev = rtnl_dev_get(tgt_net, tb);
a3d12891 3821 else
79e1ad14 3822 goto out;
711e2c33 3823
79e1ad14 3824 err = -ENODEV;
a3d12891 3825 if (dev == NULL)
79e1ad14 3826 goto out;
a3d12891 3827
79e1ad14 3828 err = -ENOBUFS;
ac40916a 3829 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
a3d12891 3830 if (nskb == NULL)
79e1ad14 3831 goto out;
b60c5115 3832
facd15df
JB
3833 /* Synchronize the carrier state so we don't report a state
3834 * that we're not actually going to honour immediately; if
3835 * the driver just did a carrier off->on transition, we can
3836 * only TX if link watch work has run, but without this we'd
3837 * already report carrier on, even if it doesn't work yet.
3838 */
3839 linkwatch_sync_dev(dev);
3840
79e1ad14
JB
3841 err = rtnl_fill_ifinfo(nskb, dev, net,
3842 RTM_NEWLINK, NETLINK_CB(skb).portid,
3843 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
d4e4fdf9 3844 0, NULL, 0, netnsid, GFP_KERNEL);
26932566
PM
3845 if (err < 0) {
3846 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3847 WARN_ON(err == -EMSGSIZE);
3848 kfree_skb(nskb);
a3d12891 3849 } else
15e47304 3850 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
79e1ad14
JB
3851out:
3852 if (netnsid >= 0)
3853 put_net(tgt_net);
711e2c33 3854
b60c5115 3855 return err;
711e2c33 3856}
711e2c33 3857
36fbf1e5
JP
3858static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3859 bool *changed, struct netlink_ext_ack *extack)
3860{
3861 char *alt_ifname;
155fb43b 3862 size_t size;
36fbf1e5
JP
3863 int err;
3864
3865 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3866 if (err)
3867 return err;
3868
155fb43b
JK
3869 if (cmd == RTM_NEWLINKPROP) {
3870 size = rtnl_prop_list_size(dev);
3871 size += nla_total_size(ALTIFNAMSIZ);
3872 if (size >= U16_MAX) {
3873 NL_SET_ERR_MSG(extack,
3874 "effective property list too long");
3875 return -EINVAL;
3876 }
3877 }
3878
5d26cff5 3879 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
44bfa9c5
ED
3880 if (!alt_ifname)
3881 return -ENOMEM;
3882
36fbf1e5 3883 if (cmd == RTM_NEWLINKPROP) {
36fbf1e5 3884 err = netdev_name_node_alt_create(dev, alt_ifname);
44bfa9c5
ED
3885 if (!err)
3886 alt_ifname = NULL;
36fbf1e5
JP
3887 } else if (cmd == RTM_DELLINKPROP) {
3888 err = netdev_name_node_alt_destroy(dev, alt_ifname);
36fbf1e5 3889 } else {
44bfa9c5
ED
3890 WARN_ON_ONCE(1);
3891 err = -EINVAL;
36fbf1e5
JP
3892 }
3893
44bfa9c5
ED
3894 kfree(alt_ifname);
3895 if (!err)
3896 *changed = true;
3897 return err;
36fbf1e5
JP
3898}
3899
3900static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3901 struct netlink_ext_ack *extack)
3902{
3903 struct net *net = sock_net(skb->sk);
3904 struct nlattr *tb[IFLA_MAX + 1];
3905 struct net_device *dev;
3906 struct ifinfomsg *ifm;
3907 bool changed = false;
3908 struct nlattr *attr;
3909 int err, rem;
3910
3911 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3912 if (err)
3913 return err;
3914
3915 err = rtnl_ensure_unique_netns(tb, extack, true);
3916 if (err)
3917 return err;
3918
3919 ifm = nlmsg_data(nlh);
cc6090e9 3920 if (ifm->ifi_index > 0)
36fbf1e5 3921 dev = __dev_get_by_index(net, ifm->ifi_index);
76c9ac0e 3922 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3923 dev = rtnl_dev_get(net, tb);
cc6090e9 3924 else
36fbf1e5 3925 return -EINVAL;
36fbf1e5
JP
3926
3927 if (!dev)
3928 return -ENODEV;
3929
3930 if (!tb[IFLA_PROP_LIST])
3931 return 0;
3932
3933 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3934 switch (nla_type(attr)) {
3935 case IFLA_ALT_IFNAME:
3936 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3937 if (err)
3938 return err;
3939 break;
3940 }
3941 }
3942
3943 if (changed)
3944 netdev_state_change(dev);
3945 return 0;
3946}
3947
3948static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3949 struct netlink_ext_ack *extack)
3950{
3951 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3952}
3953
3954static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3955 struct netlink_ext_ack *extack)
3956{
3957 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3958}
3959
ebfe3c51 3960static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
c7ac8679 3961{
115c9b81 3962 struct net *net = sock_net(skb->sk);
ebfe3c51 3963 size_t min_ifinfo_dump_size = 0;
115c9b81
GR
3964 struct nlattr *tb[IFLA_MAX+1];
3965 u32 ext_filter_mask = 0;
ebfe3c51 3966 struct net_device *dev;
e5eca6d4
MS
3967 int hdrlen;
3968
3969 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3970 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3971 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
115c9b81 3972
8cb08174 3973 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
a4b64fbe
ED
3974 if (tb[IFLA_EXT_MASK])
3975 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3976 }
115c9b81
GR
3977
3978 if (!ext_filter_mask)
3979 return NLMSG_GOODSIZE;
3980 /*
3981 * traverse the list of net devices and compute the minimum
3982 * buffer size based upon the filter mask.
3983 */
6853dd48
FW
3984 rcu_read_lock();
3985 for_each_netdev_rcu(net, dev) {
ebfe3c51
DZ
3986 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3987 if_nlmsg_size(dev, ext_filter_mask));
115c9b81 3988 }
6853dd48 3989 rcu_read_unlock();
115c9b81 3990
93af2056 3991 return nlmsg_total_size(min_ifinfo_dump_size);
c7ac8679
GR
3992}
3993
42bad1da 3994static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1da177e4
LT
3995{
3996 int idx;
3997 int s_idx = cb->family;
87ccbb1f 3998 int type = cb->nlh->nlmsg_type - RTM_BASE;
c63586dc 3999 int ret = 0;
1da177e4
LT
4000
4001 if (s_idx == 0)
4002 s_idx = 1;
6853dd48 4003
25239cee 4004 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
51e13685 4005 struct rtnl_link __rcu **tab;
addf9b90 4006 struct rtnl_link *link;
6853dd48
FW
4007 rtnl_dumpit_func dumpit;
4008
1da177e4
LT
4009 if (idx < s_idx || idx == PF_PACKET)
4010 continue;
6853dd48 4011
addf9b90 4012 if (type < 0 || type >= RTM_NR_MSGTYPES)
1da177e4 4013 continue;
6853dd48 4014
addf9b90
FW
4015 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4016 if (!tab)
4017 continue;
4018
51e13685 4019 link = rcu_dereference_rtnl(tab[type]);
addf9b90
FW
4020 if (!link)
4021 continue;
4022
4023 dumpit = link->dumpit;
6853dd48
FW
4024 if (!dumpit)
4025 continue;
4026
0465277f 4027 if (idx > s_idx) {
1da177e4 4028 memset(&cb->args[0], 0, sizeof(cb->args));
0465277f
ND
4029 cb->prev_seq = 0;
4030 cb->seq = 0;
4031 }
c63586dc 4032 ret = dumpit(skb, cb);
5e1acb4a 4033 if (ret)
1da177e4
LT
4034 break;
4035 }
4036 cb->family = idx;
4037
c63586dc 4038 return skb->len ? : ret;
1da177e4
LT
4039}
4040
395eea6c 4041struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3d3ea5af 4042 unsigned int change,
38e01b30 4043 u32 event, gfp_t flags, int *new_nsid,
59d3efd2
MW
4044 int new_ifindex, u32 portid,
4045 const struct nlmsghdr *nlh)
1da177e4 4046{
c346dca1 4047 struct net *net = dev_net(dev);
1da177e4 4048 struct sk_buff *skb;
0ec6d3f4 4049 int err = -ENOBUFS;
59d3efd2 4050 u32 seq = 0;
1da177e4 4051
50af5969 4052 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
0ec6d3f4
TG
4053 if (skb == NULL)
4054 goto errout;
1da177e4 4055
59d3efd2
MW
4056 if (nlmsg_report(nlh))
4057 seq = nlmsg_seq(nlh);
4058 else
4059 portid = 0;
4060
79e1ad14 4061 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
1d997f10 4062 type, portid, seq, change, 0, 0, event,
d4e4fdf9 4063 new_nsid, new_ifindex, -1, flags);
26932566
PM
4064 if (err < 0) {
4065 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
4066 WARN_ON(err == -EMSGSIZE);
4067 kfree_skb(skb);
4068 goto errout;
4069 }
395eea6c 4070 return skb;
0ec6d3f4
TG
4071errout:
4072 if (err < 0)
4b3da706 4073 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
395eea6c
MB
4074 return NULL;
4075}
4076
1d997f10
HL
4077void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4078 u32 portid, const struct nlmsghdr *nlh)
395eea6c
MB
4079{
4080 struct net *net = dev_net(dev);
4081
1d997f10 4082 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
395eea6c
MB
4083}
4084
3d3ea5af
VY
4085static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4086 unsigned int change, u32 event,
1d997f10
HL
4087 gfp_t flags, int *new_nsid, int new_ifindex,
4088 u32 portid, const struct nlmsghdr *nlh)
395eea6c
MB
4089{
4090 struct sk_buff *skb;
4091
ed2a80ab
ND
4092 if (dev->reg_state != NETREG_REGISTERED)
4093 return;
4094
38e01b30 4095 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
59d3efd2 4096 new_ifindex, portid, nlh);
395eea6c 4097 if (skb)
1d997f10 4098 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
1da177e4 4099}
3d3ea5af
VY
4100
4101void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
1d997f10 4102 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
3d3ea5af 4103{
38e01b30 4104 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
1d997f10 4105 NULL, 0, portid, nlh);
3d3ea5af 4106}
1da177e4 4107
6621dd29 4108void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
38e01b30 4109 gfp_t flags, int *new_nsid, int new_ifindex)
6621dd29
ND
4110{
4111 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
1d997f10 4112 new_nsid, new_ifindex, 0, NULL);
6621dd29
ND
4113}
4114
d83b0603
JF
4115static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4116 struct net_device *dev,
1e53d5bb 4117 u8 *addr, u16 vid, u32 pid, u32 seq,
1c104a6b 4118 int type, unsigned int flags,
b3379041 4119 int nlflags, u16 ndm_state)
d83b0603
JF
4120{
4121 struct nlmsghdr *nlh;
4122 struct ndmsg *ndm;
4123
1c104a6b 4124 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
d83b0603
JF
4125 if (!nlh)
4126 return -EMSGSIZE;
4127
4128 ndm = nlmsg_data(nlh);
4129 ndm->ndm_family = AF_BRIDGE;
4130 ndm->ndm_pad1 = 0;
4131 ndm->ndm_pad2 = 0;
4132 ndm->ndm_flags = flags;
4133 ndm->ndm_type = 0;
4134 ndm->ndm_ifindex = dev->ifindex;
b3379041 4135 ndm->ndm_state = ndm_state;
d83b0603 4136
aa540695 4137 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
d83b0603 4138 goto nla_put_failure;
1e53d5bb
HS
4139 if (vid)
4140 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4141 goto nla_put_failure;
d83b0603 4142
053c095a
JB
4143 nlmsg_end(skb, nlh);
4144 return 0;
d83b0603
JF
4145
4146nla_put_failure:
4147 nlmsg_cancel(skb, nlh);
4148 return -EMSGSIZE;
4149}
4150
aa540695 4151static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
3ff661c3 4152{
f82ef3e1 4153 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
aa540695 4154 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
f82ef3e1
SD
4155 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4156 0;
3ff661c3
JF
4157}
4158
b3379041
HS
4159static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4160 u16 ndm_state)
3ff661c3
JF
4161{
4162 struct net *net = dev_net(dev);
4163 struct sk_buff *skb;
4164 int err = -ENOBUFS;
4165
aa540695 4166 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
3ff661c3
JF
4167 if (!skb)
4168 goto errout;
4169
1e53d5bb 4170 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
b3379041 4171 0, 0, type, NTF_SELF, 0, ndm_state);
3ff661c3
JF
4172 if (err < 0) {
4173 kfree_skb(skb);
4174 goto errout;
4175 }
4176
4177 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4178 return;
4179errout:
4180 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4181}
4182
a986967e 4183/*
090096bf
VY
4184 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4185 */
4186int ndo_dflt_fdb_add(struct ndmsg *ndm,
4187 struct nlattr *tb[],
4188 struct net_device *dev,
f6f6424b 4189 const unsigned char *addr, u16 vid,
090096bf
VY
4190 u16 flags)
4191{
4192 int err = -EINVAL;
4193
4194 /* If aging addresses are supported device will need to
4195 * implement its own handler for this.
4196 */
4197 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
78ecc890 4198 netdev_info(dev, "default FDB implementation only supports local addresses\n");
090096bf
VY
4199 return err;
4200 }
4201
a35ec8e3
HS
4202 if (tb[NDA_FLAGS_EXT]) {
4203 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4204 return err;
4205 }
4206
65891fea 4207 if (vid) {
23ac0b42 4208 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
65891fea
OG
4209 return err;
4210 }
4211
090096bf
VY
4212 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4213 err = dev_uc_add_excl(dev, addr);
4214 else if (is_multicast_ether_addr(addr))
4215 err = dev_mc_add_excl(dev, addr);
4216
4217 /* Only return duplicate errors if NLM_F_EXCL is set */
4218 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4219 err = 0;
4220
4221 return err;
4222}
4223EXPORT_SYMBOL(ndo_dflt_fdb_add);
4224
b88d12e4
FW
4225static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4226 struct netlink_ext_ack *extack)
f6f6424b
JP
4227{
4228 u16 vid = 0;
4229
4230 if (vlan_attr) {
4231 if (nla_len(vlan_attr) != sizeof(u16)) {
b88d12e4 4232 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
f6f6424b
JP
4233 return -EINVAL;
4234 }
4235
4236 vid = nla_get_u16(vlan_attr);
4237
4238 if (!vid || vid >= VLAN_VID_MASK) {
b88d12e4 4239 NL_SET_ERR_MSG(extack, "invalid vlan id");
f6f6424b
JP
4240 return -EINVAL;
4241 }
4242 }
4243 *p_vid = vid;
4244 return 0;
4245}
4246
c21ef3e3
DA
4247static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4248 struct netlink_ext_ack *extack)
77162022
JF
4249{
4250 struct net *net = sock_net(skb->sk);
77162022
JF
4251 struct ndmsg *ndm;
4252 struct nlattr *tb[NDA_MAX+1];
4253 struct net_device *dev;
4254 u8 *addr;
f6f6424b 4255 u16 vid;
77162022
JF
4256 int err;
4257
8cb08174
JB
4258 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4259 extack);
77162022
JF
4260 if (err < 0)
4261 return err;
4262
4263 ndm = nlmsg_data(nlh);
4264 if (ndm->ndm_ifindex == 0) {
b88d12e4 4265 NL_SET_ERR_MSG(extack, "invalid ifindex");
77162022
JF
4266 return -EINVAL;
4267 }
4268
4269 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4270 if (dev == NULL) {
b88d12e4 4271 NL_SET_ERR_MSG(extack, "unknown ifindex");
77162022
JF
4272 return -ENODEV;
4273 }
4274
4275 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
b88d12e4 4276 NL_SET_ERR_MSG(extack, "invalid address");
77162022
JF
4277 return -EINVAL;
4278 }
4279
da715775
IS
4280 if (dev->type != ARPHRD_ETHER) {
4281 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4282 return -EINVAL;
4283 }
4284
77162022 4285 addr = nla_data(tb[NDA_LLADDR]);
77162022 4286
b88d12e4 4287 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
f6f6424b
JP
4288 if (err)
4289 return err;
4290
77162022
JF
4291 err = -EOPNOTSUPP;
4292
4293 /* Support fdb on master device the net/bridge default case */
4294 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2e92a2d0 4295 netif_is_bridge_port(dev)) {
898e5061
JP
4296 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4297 const struct net_device_ops *ops = br_dev->netdev_ops;
4298
f6f6424b 4299 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
87b0984e 4300 nlh->nlmsg_flags, extack);
77162022
JF
4301 if (err)
4302 goto out;
4303 else
4304 ndm->ndm_flags &= ~NTF_MASTER;
4305 }
4306
4307 /* Embedded bridge, macvlan, and any other device support */
090096bf
VY
4308 if ((ndm->ndm_flags & NTF_SELF)) {
4309 if (dev->netdev_ops->ndo_fdb_add)
4310 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
f6f6424b 4311 vid,
87b0984e
PM
4312 nlh->nlmsg_flags,
4313 extack);
090096bf 4314 else
f6f6424b 4315 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
090096bf 4316 nlh->nlmsg_flags);
77162022 4317
3ff661c3 4318 if (!err) {
b3379041
HS
4319 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4320 ndm->ndm_state);
77162022 4321 ndm->ndm_flags &= ~NTF_SELF;
3ff661c3 4322 }
77162022
JF
4323 }
4324out:
4325 return err;
4326}
4327
a986967e 4328/*
090096bf
VY
4329 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4330 */
4331int ndo_dflt_fdb_del(struct ndmsg *ndm,
4332 struct nlattr *tb[],
4333 struct net_device *dev,
f6f6424b 4334 const unsigned char *addr, u16 vid)
090096bf 4335{
c8a89c4a 4336 int err = -EINVAL;
090096bf
VY
4337
4338 /* If aging addresses are supported device will need to
4339 * implement its own handler for this.
4340 */
64535993 4341 if (!(ndm->ndm_state & NUD_PERMANENT)) {
78ecc890 4342 netdev_info(dev, "default FDB implementation only supports local addresses\n");
c8a89c4a 4343 return err;
090096bf
VY
4344 }
4345
4346 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4347 err = dev_uc_del(dev, addr);
4348 else if (is_multicast_ether_addr(addr))
4349 err = dev_mc_del(dev, addr);
090096bf
VY
4350
4351 return err;
4352}
4353EXPORT_SYMBOL(ndo_dflt_fdb_del);
4354
c21ef3e3
DA
4355static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4356 struct netlink_ext_ack *extack)
77162022 4357{
9e834259 4358 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
77162022 4359 struct net *net = sock_net(skb->sk);
9e834259 4360 const struct net_device_ops *ops;
77162022 4361 struct ndmsg *ndm;
1690be63 4362 struct nlattr *tb[NDA_MAX+1];
77162022 4363 struct net_device *dev;
9e834259 4364 __u8 *addr = NULL;
7d311801 4365 int err;
f6f6424b 4366 u16 vid;
77162022 4367
90f62cf3 4368 if (!netlink_capable(skb, CAP_NET_ADMIN))
1690be63
VY
4369 return -EPERM;
4370
9e834259
NA
4371 if (!del_bulk) {
4372 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4373 NULL, extack);
4374 } else {
38985e8c
AC
4375 /* For bulk delete, the drivers will parse the message with
4376 * policy.
4377 */
4378 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
9e834259 4379 }
1690be63
VY
4380 if (err < 0)
4381 return err;
77162022
JF
4382
4383 ndm = nlmsg_data(nlh);
4384 if (ndm->ndm_ifindex == 0) {
b88d12e4 4385 NL_SET_ERR_MSG(extack, "invalid ifindex");
77162022
JF
4386 return -EINVAL;
4387 }
4388
4389 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4390 if (dev == NULL) {
b88d12e4 4391 NL_SET_ERR_MSG(extack, "unknown ifindex");
77162022
JF
4392 return -ENODEV;
4393 }
4394
9e834259
NA
4395 if (!del_bulk) {
4396 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4397 NL_SET_ERR_MSG(extack, "invalid address");
4398 return -EINVAL;
4399 }
4400 addr = nla_data(tb[NDA_LLADDR]);
38985e8c
AC
4401
4402 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4403 if (err)
4404 return err;
1690be63
VY
4405 }
4406
da715775
IS
4407 if (dev->type != ARPHRD_ETHER) {
4408 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4409 return -EINVAL;
4410 }
4411
77162022
JF
4412 err = -EOPNOTSUPP;
4413
4414 /* Support fdb on master device the net/bridge default case */
4415 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2e92a2d0 4416 netif_is_bridge_port(dev)) {
898e5061 4417 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
77162022 4418
9e834259
NA
4419 ops = br_dev->netdev_ops;
4420 if (!del_bulk) {
4421 if (ops->ndo_fdb_del)
ca4567f1 4422 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
9e834259
NA
4423 } else {
4424 if (ops->ndo_fdb_del_bulk)
38985e8c 4425 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
9e834259 4426 }
77162022
JF
4427
4428 if (err)
4429 goto out;
4430 else
4431 ndm->ndm_flags &= ~NTF_MASTER;
4432 }
4433
4434 /* Embedded bridge, macvlan, and any other device support */
090096bf 4435 if (ndm->ndm_flags & NTF_SELF) {
9e834259
NA
4436 ops = dev->netdev_ops;
4437 if (!del_bulk) {
4438 if (ops->ndo_fdb_del)
ca4567f1 4439 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
9e834259
NA
4440 else
4441 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4442 } else {
4443 /* in case err was cleared by NTF_MASTER call */
4444 err = -EOPNOTSUPP;
4445 if (ops->ndo_fdb_del_bulk)
38985e8c 4446 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
9e834259 4447 }
77162022 4448
3ff661c3 4449 if (!err) {
9e834259
NA
4450 if (!del_bulk)
4451 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4452 ndm->ndm_state);
77162022 4453 ndm->ndm_flags &= ~NTF_SELF;
3ff661c3 4454 }
77162022
JF
4455 }
4456out:
4457 return err;
4458}
4459
d83b0603
JF
4460static int nlmsg_populate_fdb(struct sk_buff *skb,
4461 struct netlink_callback *cb,
4462 struct net_device *dev,
4463 int *idx,
4464 struct netdev_hw_addr_list *list)
4465{
4466 struct netdev_hw_addr *ha;
4467 int err;
15e47304 4468 u32 portid, seq;
d83b0603 4469
15e47304 4470 portid = NETLINK_CB(cb->skb).portid;
d83b0603
JF
4471 seq = cb->nlh->nlmsg_seq;
4472
4473 list_for_each_entry(ha, &list->list, list) {
d297653d 4474 if (*idx < cb->args[2])
d83b0603
JF
4475 goto skip;
4476
1e53d5bb 4477 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
a7a558fe 4478 portid, seq,
1c104a6b 4479 RTM_NEWNEIGH, NTF_SELF,
b3379041 4480 NLM_F_MULTI, NUD_PERMANENT);
d83b0603
JF
4481 if (err < 0)
4482 return err;
4483skip:
4484 *idx += 1;
4485 }
4486 return 0;
4487}
4488
4489/**
2c53040f 4490 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
a986967e
BVA
4491 * @skb: socket buffer to store message in
4492 * @cb: netlink callback
d83b0603 4493 * @dev: netdevice
a986967e
BVA
4494 * @filter_dev: ignored
4495 * @idx: the number of FDB table entries dumped is added to *@idx
d83b0603
JF
4496 *
4497 * Default netdevice operation to dump the existing unicast address list.
91f3e7b1 4498 * Returns number of addresses from list put in skb.
d83b0603
JF
4499 */
4500int ndo_dflt_fdb_dump(struct sk_buff *skb,
4501 struct netlink_callback *cb,
4502 struct net_device *dev,
5d5eacb3 4503 struct net_device *filter_dev,
d297653d 4504 int *idx)
d83b0603
JF
4505{
4506 int err;
4507
68883893
ED
4508 if (dev->type != ARPHRD_ETHER)
4509 return -EINVAL;
4510
d83b0603 4511 netif_addr_lock_bh(dev);
d297653d 4512 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
d83b0603
JF
4513 if (err)
4514 goto out;
2934c9db 4515 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
d83b0603
JF
4516out:
4517 netif_addr_unlock_bh(dev);
d297653d 4518 return err;
d83b0603
JF
4519}
4520EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4521
8c6e137f
DA
4522static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4523 int *br_idx, int *brport_idx,
4524 struct netlink_ext_ack *extack)
4525{
4526 struct nlattr *tb[NDA_MAX + 1];
4527 struct ndmsg *ndm;
4528 int err, i;
4529
4530 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4531 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4532 return -EINVAL;
4533 }
4534
4535 ndm = nlmsg_data(nlh);
4536 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4537 ndm->ndm_flags || ndm->ndm_type) {
8b73018f 4538 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
8c6e137f
DA
4539 return -EINVAL;
4540 }
4541
8cb08174
JB
4542 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4543 NDA_MAX, NULL, extack);
8c6e137f
DA
4544 if (err < 0)
4545 return err;
4546
4547 *brport_idx = ndm->ndm_ifindex;
4548 for (i = 0; i <= NDA_MAX; ++i) {
4549 if (!tb[i])
4550 continue;
4551
4552 switch (i) {
4553 case NDA_IFINDEX:
4554 if (nla_len(tb[i]) != sizeof(u32)) {
4555 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4556 return -EINVAL;
4557 }
4558 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4559 break;
4560 case NDA_MASTER:
4561 if (nla_len(tb[i]) != sizeof(u32)) {
4562 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4563 return -EINVAL;
4564 }
4565 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4566 break;
4567 default:
4568 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4569 return -EINVAL;
4570 }
4571 }
4572
4573 return 0;
4574}
4575
8dfbda19
DA
4576static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4577 int *br_idx, int *brport_idx,
4578 struct netlink_ext_ack *extack)
77162022 4579{
5e6d2435 4580 struct nlattr *tb[IFLA_MAX+1];
8dfbda19 4581 int err;
5e6d2435 4582
bd961c9b
MFO
4583 /* A hack to preserve kernel<->userspace interface.
4584 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4585 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4586 * So, check for ndmsg with an optional u32 attribute (not used here).
4587 * Fortunately these sizes don't conflict with the size of ifinfomsg
4588 * with an optional attribute.
4589 */
8dfbda19
DA
4590 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4591 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
bd961c9b 4592 nla_attr_size(sizeof(u32)))) {
4565d7e5
DA
4593 struct ifinfomsg *ifm;
4594
8cb08174
JB
4595 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4596 tb, IFLA_MAX, ifla_policy,
4597 extack);
bd961c9b
MFO
4598 if (err < 0) {
4599 return -EINVAL;
4600 } else if (err == 0) {
4601 if (tb[IFLA_MASTER])
8dfbda19 4602 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
bd961c9b 4603 }
5e6d2435 4604
4565d7e5 4605 ifm = nlmsg_data(nlh);
8dfbda19 4606 *brport_idx = ifm->ifi_index;
bd961c9b 4607 }
8dfbda19
DA
4608 return 0;
4609}
4610
4611static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4612{
4613 struct net_device *dev;
4614 struct net_device *br_dev = NULL;
4615 const struct net_device_ops *ops = NULL;
4616 const struct net_device_ops *cops = NULL;
4617 struct net *net = sock_net(skb->sk);
4618 struct hlist_head *head;
4619 int brport_idx = 0;
4620 int br_idx = 0;
4621 int h, s_h;
4622 int idx = 0, s_idx;
4623 int err = 0;
4624 int fidx = 0;
4625
8c6e137f
DA
4626 if (cb->strict_check)
4627 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4628 cb->extack);
4629 else
4630 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4631 cb->extack);
8dfbda19
DA
4632 if (err < 0)
4633 return err;
5e6d2435
JHS
4634
4635 if (br_idx) {
4636 br_dev = __dev_get_by_index(net, br_idx);
4637 if (!br_dev)
4638 return -ENODEV;
4639
4640 ops = br_dev->netdev_ops;
5e6d2435
JHS
4641 }
4642
d297653d
RP
4643 s_h = cb->args[0];
4644 s_idx = cb->args[1];
5e6d2435 4645
d297653d
RP
4646 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4647 idx = 0;
4648 head = &net->dev_index_head[h];
4649 hlist_for_each_entry(dev, head, index_hlist) {
5e6d2435 4650
d297653d 4651 if (brport_idx && (dev->ifindex != brport_idx))
5e6d2435
JHS
4652 continue;
4653
d297653d 4654 if (!br_idx) { /* user did not specify a specific bridge */
2e92a2d0 4655 if (netif_is_bridge_port(dev)) {
d297653d
RP
4656 br_dev = netdev_master_upper_dev_get(dev);
4657 cops = br_dev->netdev_ops;
4658 }
4659 } else {
4660 if (dev != br_dev &&
2e92a2d0 4661 !netif_is_bridge_port(dev))
d297653d 4662 continue;
5e6d2435 4663
d297653d 4664 if (br_dev != netdev_master_upper_dev_get(dev) &&
254ec036 4665 !netif_is_bridge_master(dev))
d297653d
RP
4666 continue;
4667 cops = ops;
4668 }
77162022 4669
d297653d
RP
4670 if (idx < s_idx)
4671 goto cont;
77162022 4672
2e92a2d0 4673 if (netif_is_bridge_port(dev)) {
d297653d
RP
4674 if (cops && cops->ndo_fdb_dump) {
4675 err = cops->ndo_fdb_dump(skb, cb,
4676 br_dev, dev,
4677 &fidx);
4678 if (err == -EMSGSIZE)
4679 goto out;
4680 }
4681 }
5e6d2435 4682
d297653d
RP
4683 if (dev->netdev_ops->ndo_fdb_dump)
4684 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4685 dev, NULL,
4686 &fidx);
4687 else
4688 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4689 &fidx);
4690 if (err == -EMSGSIZE)
4691 goto out;
4692
4693 cops = NULL;
4694
4695 /* reset fdb offset to 0 for rest of the interfaces */
4696 cb->args[2] = 0;
4697 fidx = 0;
4698cont:
4699 idx++;
4700 }
77162022 4701 }
77162022 4702
d297653d
RP
4703out:
4704 cb->args[0] = h;
4705 cb->args[1] = idx;
4706 cb->args[2] = fidx;
4707
77162022
JF
4708 return skb->len;
4709}
4710
5b2f94b2
RP
4711static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4712 struct nlattr **tb, u8 *ndm_flags,
4713 int *br_idx, int *brport_idx, u8 **addr,
4714 u16 *vid, struct netlink_ext_ack *extack)
4715{
4716 struct ndmsg *ndm;
4717 int err, i;
4718
4719 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4720 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4721 return -EINVAL;
4722 }
4723
4724 ndm = nlmsg_data(nlh);
4725 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4726 ndm->ndm_type) {
4727 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4728 return -EINVAL;
4729 }
4730
4731 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4732 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4733 return -EINVAL;
4734 }
4735
8cb08174
JB
4736 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4737 NDA_MAX, nda_policy, extack);
5b2f94b2
RP
4738 if (err < 0)
4739 return err;
4740
4741 *ndm_flags = ndm->ndm_flags;
4742 *brport_idx = ndm->ndm_ifindex;
4743 for (i = 0; i <= NDA_MAX; ++i) {
4744 if (!tb[i])
4745 continue;
4746
4747 switch (i) {
4748 case NDA_MASTER:
4749 *br_idx = nla_get_u32(tb[i]);
4750 break;
4751 case NDA_LLADDR:
4752 if (nla_len(tb[i]) != ETH_ALEN) {
4753 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4754 return -EINVAL;
4755 }
4756 *addr = nla_data(tb[i]);
4757 break;
4758 case NDA_VLAN:
4759 err = fdb_vid_parse(tb[i], vid, extack);
4760 if (err)
4761 return err;
4762 break;
4763 case NDA_VNI:
4764 break;
4765 default:
4766 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4767 return -EINVAL;
4768 }
4769 }
4770
4771 return 0;
4772}
4773
4774static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4775 struct netlink_ext_ack *extack)
4776{
4777 struct net_device *dev = NULL, *br_dev = NULL;
4778 const struct net_device_ops *ops = NULL;
4779 struct net *net = sock_net(in_skb->sk);
4780 struct nlattr *tb[NDA_MAX + 1];
4781 struct sk_buff *skb;
4782 int brport_idx = 0;
4783 u8 ndm_flags = 0;
4784 int br_idx = 0;
4785 u8 *addr = NULL;
4786 u16 vid = 0;
4787 int err;
4788
4789 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4790 &brport_idx, &addr, &vid, extack);
4791 if (err < 0)
4792 return err;
4793
f989d03e
NA
4794 if (!addr) {
4795 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4796 return -EINVAL;
4797 }
4798
5b2f94b2
RP
4799 if (brport_idx) {
4800 dev = __dev_get_by_index(net, brport_idx);
4801 if (!dev) {
4802 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4803 return -ENODEV;
4804 }
4805 }
4806
4807 if (br_idx) {
4808 if (dev) {
4809 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4810 return -EINVAL;
4811 }
4812
4813 br_dev = __dev_get_by_index(net, br_idx);
4814 if (!br_dev) {
4815 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4816 return -EINVAL;
4817 }
4818 ops = br_dev->netdev_ops;
4819 }
4820
4821 if (dev) {
4822 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
2e92a2d0 4823 if (!netif_is_bridge_port(dev)) {
5b2f94b2
RP
4824 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4825 return -EINVAL;
4826 }
4827 br_dev = netdev_master_upper_dev_get(dev);
4828 if (!br_dev) {
4829 NL_SET_ERR_MSG(extack, "Master of device not found");
4830 return -EINVAL;
4831 }
4832 ops = br_dev->netdev_ops;
4833 } else {
4834 if (!(ndm_flags & NTF_SELF)) {
4835 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4836 return -EINVAL;
4837 }
4838 ops = dev->netdev_ops;
4839 }
4840 }
4841
4842 if (!br_dev && !dev) {
4843 NL_SET_ERR_MSG(extack, "No device specified");
4844 return -ENODEV;
4845 }
4846
4847 if (!ops || !ops->ndo_fdb_get) {
4848 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4849 return -EOPNOTSUPP;
4850 }
4851
4852 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4853 if (!skb)
4854 return -ENOBUFS;
4855
4856 if (br_dev)
4857 dev = br_dev;
4858 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4859 NETLINK_CB(in_skb).portid,
4860 nlh->nlmsg_seq, extack);
4861 if (err)
4862 goto out;
4863
4864 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4865out:
4866 kfree_skb(skb);
4867 return err;
4868}
4869
2c3c031c
SF
4870static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4871 unsigned int attrnum, unsigned int flag)
4872{
4873 if (mask & flag)
4874 return nla_put_u8(skb, attrnum, !!(flags & flag));
4875 return 0;
4876}
4877
815cccbf 4878int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2c3c031c 4879 struct net_device *dev, u16 mode,
7d4f8d87
SF
4880 u32 flags, u32 mask, int nlflags,
4881 u32 filter_mask,
4882 int (*vlan_fill)(struct sk_buff *skb,
4883 struct net_device *dev,
4884 u32 filter_mask))
815cccbf
JF
4885{
4886 struct nlmsghdr *nlh;
4887 struct ifinfomsg *ifm;
4888 struct nlattr *br_afspec;
2c3c031c 4889 struct nlattr *protinfo;
815cccbf 4890 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
898e5061 4891 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
7d4f8d87 4892 int err = 0;
815cccbf 4893
46c264da 4894 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
815cccbf
JF
4895 if (nlh == NULL)
4896 return -EMSGSIZE;
4897
4898 ifm = nlmsg_data(nlh);
4899 ifm->ifi_family = AF_BRIDGE;
4900 ifm->__ifi_pad = 0;
4901 ifm->ifi_type = dev->type;
4902 ifm->ifi_index = dev->ifindex;
4903 ifm->ifi_flags = dev_get_flags(dev);
4904 ifm->ifi_change = 0;
4905
4906
4907 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4908 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4909 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
898e5061
JP
4910 (br_dev &&
4911 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
815cccbf
JF
4912 (dev->addr_len &&
4913 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
a54acb3a
ND
4914 (dev->ifindex != dev_get_iflink(dev) &&
4915 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
815cccbf
JF
4916 goto nla_put_failure;
4917
ae0be8de 4918 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
815cccbf
JF
4919 if (!br_afspec)
4920 goto nla_put_failure;
4921
1d460b98 4922 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
815cccbf
JF
4923 nla_nest_cancel(skb, br_afspec);
4924 goto nla_put_failure;
4925 }
1d460b98
RP
4926
4927 if (mode != BRIDGE_MODE_UNDEF) {
4928 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4929 nla_nest_cancel(skb, br_afspec);
4930 goto nla_put_failure;
4931 }
4932 }
7d4f8d87
SF
4933 if (vlan_fill) {
4934 err = vlan_fill(skb, dev, filter_mask);
4935 if (err) {
4936 nla_nest_cancel(skb, br_afspec);
4937 goto nla_put_failure;
4938 }
4939 }
815cccbf
JF
4940 nla_nest_end(skb, br_afspec);
4941
ae0be8de 4942 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
2c3c031c
SF
4943 if (!protinfo)
4944 goto nla_put_failure;
4945
4946 if (brport_nla_put_flag(skb, flags, mask,
4947 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4948 brport_nla_put_flag(skb, flags, mask,
4949 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4950 brport_nla_put_flag(skb, flags, mask,
4951 IFLA_BRPORT_FAST_LEAVE,
4952 BR_MULTICAST_FAST_LEAVE) ||
4953 brport_nla_put_flag(skb, flags, mask,
4954 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4955 brport_nla_put_flag(skb, flags, mask,
4956 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4957 brport_nla_put_flag(skb, flags, mask,
4958 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4959 brport_nla_put_flag(skb, flags, mask,
4960 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4961 brport_nla_put_flag(skb, flags, mask,
583cb0b4
JW
4962 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4963 brport_nla_put_flag(skb, flags, mask,
4964 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4965 brport_nla_put_flag(skb, flags, mask,
4966 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
2c3c031c
SF
4967 nla_nest_cancel(skb, protinfo);
4968 goto nla_put_failure;
4969 }
4970
4971 nla_nest_end(skb, protinfo);
4972
053c095a
JB
4973 nlmsg_end(skb, nlh);
4974 return 0;
815cccbf
JF
4975nla_put_failure:
4976 nlmsg_cancel(skb, nlh);
7d4f8d87 4977 return err ? err : -EMSGSIZE;
815cccbf 4978}
7d4f8d87 4979EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
815cccbf 4980
2d011be8
DA
4981static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4982 bool strict_check, u32 *filter_mask,
4983 struct netlink_ext_ack *extack)
4984{
4985 struct nlattr *tb[IFLA_MAX+1];
4986 int err, i;
4987
4988 if (strict_check) {
4989 struct ifinfomsg *ifm;
4990
4991 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4992 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4993 return -EINVAL;
4994 }
4995
4996 ifm = nlmsg_data(nlh);
4997 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4998 ifm->ifi_change || ifm->ifi_index) {
4999 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
5000 return -EINVAL;
5001 }
5002
8cb08174
JB
5003 err = nlmsg_parse_deprecated_strict(nlh,
5004 sizeof(struct ifinfomsg),
5005 tb, IFLA_MAX, ifla_policy,
5006 extack);
2d011be8 5007 } else {
8cb08174
JB
5008 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5009 tb, IFLA_MAX, ifla_policy,
5010 extack);
2d011be8
DA
5011 }
5012 if (err < 0)
5013 return err;
5014
5015 /* new attributes should only be added with strict checking */
5016 for (i = 0; i <= IFLA_MAX; ++i) {
5017 if (!tb[i])
5018 continue;
5019
5020 switch (i) {
5021 case IFLA_EXT_MASK:
5022 *filter_mask = nla_get_u32(tb[i]);
5023 break;
5024 default:
5025 if (strict_check) {
5026 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5027 return -EINVAL;
5028 }
5029 }
5030 }
5031
5032 return 0;
5033}
5034
e5a55a89
JF
5035static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5036{
2d011be8 5037 const struct nlmsghdr *nlh = cb->nlh;
e5a55a89
JF
5038 struct net *net = sock_net(skb->sk);
5039 struct net_device *dev;
5040 int idx = 0;
5041 u32 portid = NETLINK_CB(cb->skb).portid;
2d011be8 5042 u32 seq = nlh->nlmsg_seq;
6cbdceeb 5043 u32 filter_mask = 0;
d64f69b0 5044 int err;
6cbdceeb 5045
2d011be8
DA
5046 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5047 cb->extack);
5048 if (err < 0 && cb->strict_check)
5049 return err;
e5a55a89
JF
5050
5051 rcu_read_lock();
5052 for_each_netdev_rcu(net, dev) {
5053 const struct net_device_ops *ops = dev->netdev_ops;
898e5061 5054 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
e5a55a89 5055
898e5061 5056 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
d64f69b0
RP
5057 if (idx >= cb->args[0]) {
5058 err = br_dev->netdev_ops->ndo_bridge_getlink(
5059 skb, portid, seq, dev,
5060 filter_mask, NLM_F_MULTI);
f6c5775f
DA
5061 if (err < 0 && err != -EOPNOTSUPP) {
5062 if (likely(skb->len))
5063 break;
5064
5065 goto out_err;
5066 }
d64f69b0 5067 }
25b1e679 5068 idx++;
e5a55a89
JF
5069 }
5070
5071 if (ops->ndo_bridge_getlink) {
d64f69b0
RP
5072 if (idx >= cb->args[0]) {
5073 err = ops->ndo_bridge_getlink(skb, portid,
5074 seq, dev,
5075 filter_mask,
5076 NLM_F_MULTI);
f6c5775f
DA
5077 if (err < 0 && err != -EOPNOTSUPP) {
5078 if (likely(skb->len))
5079 break;
5080
5081 goto out_err;
5082 }
d64f69b0 5083 }
25b1e679 5084 idx++;
e5a55a89
JF
5085 }
5086 }
f6c5775f
DA
5087 err = skb->len;
5088out_err:
e5a55a89
JF
5089 rcu_read_unlock();
5090 cb->args[0] = idx;
5091
f6c5775f 5092 return err;
e5a55a89
JF
5093}
5094
2469ffd7
JF
5095static inline size_t bridge_nlmsg_size(void)
5096{
5097 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5098 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5099 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5100 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5101 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5102 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5103 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5104 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5105 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5106 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5107 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5108}
5109
02dba438 5110static int rtnl_bridge_notify(struct net_device *dev)
2469ffd7
JF
5111{
5112 struct net *net = dev_net(dev);
2469ffd7
JF
5113 struct sk_buff *skb;
5114 int err = -EOPNOTSUPP;
5115
02dba438
RP
5116 if (!dev->netdev_ops->ndo_bridge_getlink)
5117 return 0;
5118
2469ffd7
JF
5119 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5120 if (!skb) {
5121 err = -ENOMEM;
5122 goto errout;
5123 }
5124
46c264da 5125 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
02dba438
RP
5126 if (err < 0)
5127 goto errout;
2469ffd7 5128
d2e381c4
IS
5129 /* Notification info is only filled for bridge ports, not the bridge
5130 * device itself. Therefore, a zero notification length is valid and
5131 * should not result in an error.
5132 */
5133 if (!skb->len)
59ccaaaa
RP
5134 goto errout;
5135
2469ffd7
JF
5136 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5137 return 0;
5138errout:
5139 WARN_ON(err == -EMSGSIZE);
5140 kfree_skb(skb);
59ccaaaa
RP
5141 if (err)
5142 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2469ffd7
JF
5143 return err;
5144}
5145
c21ef3e3
DA
5146static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5147 struct netlink_ext_ack *extack)
e5a55a89
JF
5148{
5149 struct net *net = sock_net(skb->sk);
5150 struct ifinfomsg *ifm;
5151 struct net_device *dev;
743ad091 5152 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
2469ffd7 5153 int rem, err = -EOPNOTSUPP;
4de8b413 5154 u16 flags = 0;
e5a55a89
JF
5155
5156 if (nlmsg_len(nlh) < sizeof(*ifm))
5157 return -EINVAL;
5158
5159 ifm = nlmsg_data(nlh);
5160 if (ifm->ifi_family != AF_BRIDGE)
5161 return -EPFNOSUPPORT;
5162
5163 dev = __dev_get_by_index(net, ifm->ifi_index);
5164 if (!dev) {
b88d12e4 5165 NL_SET_ERR_MSG(extack, "unknown ifindex");
e5a55a89
JF
5166 return -ENODEV;
5167 }
5168
2469ffd7
JF
5169 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5170 if (br_spec) {
5171 nla_for_each_nested(attr, br_spec, rem) {
743ad091 5172 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
6e8d1c55
TG
5173 if (nla_len(attr) < sizeof(flags))
5174 return -EINVAL;
5175
743ad091 5176 br_flags_attr = attr;
2469ffd7 5177 flags = nla_get_u16(attr);
d73ef2d6
LM
5178 }
5179
5180 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5181 if (nla_len(attr) < sizeof(u16))
5182 return -EINVAL;
2469ffd7
JF
5183 }
5184 }
5185 }
5186
5187 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
898e5061
JP
5188 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5189
5190 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
2469ffd7
JF
5191 err = -EOPNOTSUPP;
5192 goto out;
5193 }
5194
2fd527b7
PM
5195 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5196 extack);
e5a55a89
JF
5197 if (err)
5198 goto out;
2469ffd7
JF
5199
5200 flags &= ~BRIDGE_FLAGS_MASTER;
e5a55a89
JF
5201 }
5202
2469ffd7
JF
5203 if ((flags & BRIDGE_FLAGS_SELF)) {
5204 if (!dev->netdev_ops->ndo_bridge_setlink)
5205 err = -EOPNOTSUPP;
5206 else
add511b3 5207 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
2fd527b7
PM
5208 flags,
5209 extack);
02dba438 5210 if (!err) {
2469ffd7 5211 flags &= ~BRIDGE_FLAGS_SELF;
02dba438
RP
5212
5213 /* Generate event to notify upper layer of bridge
5214 * change
5215 */
5216 err = rtnl_bridge_notify(dev);
5217 }
2469ffd7 5218 }
e5a55a89 5219
743ad091
LM
5220 if (br_flags_attr)
5221 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
e5a55a89
JF
5222out:
5223 return err;
5224}
5225
c21ef3e3
DA
5226static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5227 struct netlink_ext_ack *extack)
407af329
VY
5228{
5229 struct net *net = sock_net(skb->sk);
5230 struct ifinfomsg *ifm;
5231 struct net_device *dev;
5232 struct nlattr *br_spec, *attr = NULL;
5233 int rem, err = -EOPNOTSUPP;
4de8b413 5234 u16 flags = 0;
407af329
VY
5235 bool have_flags = false;
5236
5237 if (nlmsg_len(nlh) < sizeof(*ifm))
5238 return -EINVAL;
5239
5240 ifm = nlmsg_data(nlh);
5241 if (ifm->ifi_family != AF_BRIDGE)
5242 return -EPFNOSUPPORT;
5243
5244 dev = __dev_get_by_index(net, ifm->ifi_index);
5245 if (!dev) {
b88d12e4 5246 NL_SET_ERR_MSG(extack, "unknown ifindex");
407af329
VY
5247 return -ENODEV;
5248 }
5249
5250 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5251 if (br_spec) {
e8058a49
JB
5252 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
5253 rem) {
5254 if (nla_len(attr) < sizeof(flags))
5255 return -EINVAL;
6e8d1c55 5256
e8058a49
JB
5257 have_flags = true;
5258 flags = nla_get_u16(attr);
5259 break;
407af329
VY
5260 }
5261 }
5262
407af329
VY
5263 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5264 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5265
5266 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5267 err = -EOPNOTSUPP;
5268 goto out;
5269 }
5270
add511b3 5271 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
407af329
VY
5272 if (err)
5273 goto out;
5274
5275 flags &= ~BRIDGE_FLAGS_MASTER;
5276 }
5277
5278 if ((flags & BRIDGE_FLAGS_SELF)) {
5279 if (!dev->netdev_ops->ndo_bridge_dellink)
5280 err = -EOPNOTSUPP;
5281 else
add511b3
RP
5282 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5283 flags);
407af329 5284
02dba438 5285 if (!err) {
407af329 5286 flags &= ~BRIDGE_FLAGS_SELF;
02dba438
RP
5287
5288 /* Generate event to notify upper layer of bridge
5289 * change
5290 */
5291 err = rtnl_bridge_notify(dev);
5292 }
407af329
VY
5293 }
5294
5295 if (have_flags)
5296 memcpy(nla_data(attr), &flags, sizeof(flags));
407af329
VY
5297out:
5298 return err;
5299}
5300
e8872a25
NA
5301static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5302{
5303 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5304 (!idxattr || idxattr == attrid);
5305}
5306
f6e0fb81
PM
5307static bool
5308rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
69ae6ad2 5309{
f6e0fb81
PM
5310 return dev->netdev_ops &&
5311 dev->netdev_ops->ndo_has_offload_stats &&
5312 dev->netdev_ops->ndo_get_offload_stats &&
5313 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5314}
69ae6ad2 5315
f6e0fb81
PM
5316static unsigned int
5317rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5318{
5319 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5320 sizeof(struct rtnl_link_stats64) : 0;
69ae6ad2
NF
5321}
5322
f6e0fb81
PM
5323static int
5324rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5325 struct sk_buff *skb)
69ae6ad2 5326{
f6e0fb81 5327 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
69ae6ad2 5328 struct nlattr *attr = NULL;
69ae6ad2
NF
5329 void *attr_data;
5330 int err;
5331
f6e0fb81 5332 if (!size)
69ae6ad2
NF
5333 return -ENODATA;
5334
f6e0fb81
PM
5335 attr = nla_reserve_64bit(skb, attr_id, size,
5336 IFLA_OFFLOAD_XSTATS_UNSPEC);
5337 if (!attr)
5338 return -EMSGSIZE;
69ae6ad2 5339
f6e0fb81
PM
5340 attr_data = nla_data(attr);
5341 memset(attr_data, 0, size);
69ae6ad2 5342
f6e0fb81
PM
5343 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5344 if (err)
5345 return err;
69ae6ad2 5346
f6e0fb81
PM
5347 return 0;
5348}
69ae6ad2 5349
0e7788fd
PM
5350static unsigned int
5351rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5352 enum netdev_offload_xstats_type type)
5353{
5354 bool enabled = netdev_offload_xstats_enabled(dev, type);
5355
5356 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5357}
5358
5359struct rtnl_offload_xstats_request_used {
5360 bool request;
5361 bool used;
5362};
5363
5364static int
5365rtnl_offload_xstats_get_stats(struct net_device *dev,
5366 enum netdev_offload_xstats_type type,
5367 struct rtnl_offload_xstats_request_used *ru,
5368 struct rtnl_hw_stats64 *stats,
5369 struct netlink_ext_ack *extack)
5370{
5371 bool request;
5372 bool used;
5373 int err;
5374
5375 request = netdev_offload_xstats_enabled(dev, type);
5376 if (!request) {
5377 used = false;
5378 goto out;
5379 }
5380
5381 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5382 if (err)
5383 return err;
5384
5385out:
5386 if (ru) {
5387 ru->request = request;
5388 ru->used = used;
5389 }
5390 return 0;
5391}
5392
5393static int
5394rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5395 struct rtnl_offload_xstats_request_used *ru)
5396{
5397 struct nlattr *nest;
5398
5399 nest = nla_nest_start(skb, attr_id);
5400 if (!nest)
5401 return -EMSGSIZE;
5402
5403 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5404 goto nla_put_failure;
5405
5406 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5407 goto nla_put_failure;
5408
5409 nla_nest_end(skb, nest);
5410 return 0;
5411
5412nla_put_failure:
5413 nla_nest_cancel(skb, nest);
5414 return -EMSGSIZE;
5415}
5416
5417static int
5418rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5419 struct netlink_ext_ack *extack)
5420{
5421 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5422 struct rtnl_offload_xstats_request_used ru_l3;
5423 struct nlattr *nest;
5424 int err;
5425
5426 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5427 if (err)
5428 return err;
5429
5430 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5431 if (!nest)
5432 return -EMSGSIZE;
5433
5434 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5435 IFLA_OFFLOAD_XSTATS_L3_STATS,
5436 &ru_l3))
5437 goto nla_put_failure;
5438
5439 nla_nest_end(skb, nest);
5440 return 0;
5441
5442nla_put_failure:
5443 nla_nest_cancel(skb, nest);
5444 return -EMSGSIZE;
5445}
5446
f6e0fb81 5447static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
05415bcc
PM
5448 int *prividx, u32 off_filter_mask,
5449 struct netlink_ext_ack *extack)
f6e0fb81 5450{
0e7788fd
PM
5451 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5452 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5453 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
f6e0fb81
PM
5454 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5455 bool have_data = false;
5456 int err;
5457
46efc97b
PM
5458 if (*prividx <= attr_id_cpu_hit &&
5459 (off_filter_mask &
5460 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
f6e0fb81
PM
5461 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5462 if (!err) {
5463 have_data = true;
5464 } else if (err != -ENODATA) {
5465 *prividx = attr_id_cpu_hit;
5466 return err;
5467 }
69ae6ad2
NF
5468 }
5469
0e7788fd
PM
5470 if (*prividx <= attr_id_hw_s_info &&
5471 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5472 *prividx = attr_id_hw_s_info;
5473
5474 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5475 if (err)
5476 return err;
5477
5478 have_data = true;
5479 *prividx = 0;
5480 }
5481
5482 if (*prividx <= attr_id_l3_stats &&
5483 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5484 unsigned int size_l3;
5485 struct nlattr *attr;
5486
5487 *prividx = attr_id_l3_stats;
5488
5489 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
23cfe941
PM
5490 if (!size_l3)
5491 goto skip_l3_stats;
0e7788fd
PM
5492 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5493 IFLA_OFFLOAD_XSTATS_UNSPEC);
5494 if (!attr)
5495 return -EMSGSIZE;
5496
5497 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5498 nla_data(attr), extack);
5499 if (err)
5500 return err;
5501
5502 have_data = true;
23cfe941 5503skip_l3_stats:
0e7788fd
PM
5504 *prividx = 0;
5505 }
5506
f6e0fb81 5507 if (!have_data)
69ae6ad2
NF
5508 return -ENODATA;
5509
5510 *prividx = 0;
5511 return 0;
69ae6ad2
NF
5512}
5513
0e7788fd
PM
5514static unsigned int
5515rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5516 enum netdev_offload_xstats_type type)
5517{
0e7788fd
PM
5518 return nla_total_size(0) +
5519 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5520 nla_total_size(sizeof(u8)) +
5521 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
503930f8 5522 nla_total_size(sizeof(u8)) +
0e7788fd
PM
5523 0;
5524}
5525
5526static unsigned int
5527rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5528{
5529 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5530
5531 return nla_total_size(0) +
5532 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5533 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5534 0;
5535}
5536
46efc97b
PM
5537static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5538 u32 off_filter_mask)
69ae6ad2 5539{
0e7788fd 5540 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
f6e0fb81 5541 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
69ae6ad2 5542 int nla_size = 0;
69ae6ad2
NF
5543 int size;
5544
46efc97b
PM
5545 if (off_filter_mask &
5546 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5547 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5548 nla_size += nla_total_size_64bit(size);
5549 }
69ae6ad2 5550
0e7788fd
PM
5551 if (off_filter_mask &
5552 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5553 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5554
5555 if (off_filter_mask &
5556 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5557 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5558 nla_size += nla_total_size_64bit(size);
5559 }
5560
69ae6ad2
NF
5561 if (nla_size != 0)
5562 nla_size += nla_total_size(0);
5563
5564 return nla_size;
5565}
5566
46efc97b
PM
5567struct rtnl_stats_dump_filters {
5568 /* mask[0] filters outer attributes. Then individual nests have their
5569 * filtering mask at the index of the nested attribute.
5570 */
5571 u32 mask[IFLA_STATS_MAX + 1];
5572};
5573
10c9ead9
RP
5574static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5575 int type, u32 pid, u32 seq, u32 change,
46efc97b
PM
5576 unsigned int flags,
5577 const struct rtnl_stats_dump_filters *filters,
05415bcc
PM
5578 int *idxattr, int *prividx,
5579 struct netlink_ext_ack *extack)
10c9ead9 5580{
46efc97b 5581 unsigned int filter_mask = filters->mask[0];
10c9ead9
RP
5582 struct if_stats_msg *ifsm;
5583 struct nlmsghdr *nlh;
5584 struct nlattr *attr;
e8872a25 5585 int s_prividx = *prividx;
69ae6ad2 5586 int err;
10c9ead9
RP
5587
5588 ASSERT_RTNL();
5589
5590 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5591 if (!nlh)
5592 return -EMSGSIZE;
5593
5594 ifsm = nlmsg_data(nlh);
ce024f42
NA
5595 ifsm->family = PF_UNSPEC;
5596 ifsm->pad1 = 0;
5597 ifsm->pad2 = 0;
10c9ead9
RP
5598 ifsm->ifindex = dev->ifindex;
5599 ifsm->filter_mask = filter_mask;
5600
e8872a25 5601 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
10c9ead9 5602 struct rtnl_link_stats64 *sp;
10c9ead9 5603
58414d32
ND
5604 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5605 sizeof(struct rtnl_link_stats64),
5606 IFLA_STATS_UNSPEC);
216e6906
PM
5607 if (!attr) {
5608 err = -EMSGSIZE;
10c9ead9 5609 goto nla_put_failure;
216e6906 5610 }
10c9ead9
RP
5611
5612 sp = nla_data(attr);
5613 dev_get_stats(dev, sp);
5614 }
5615
97a47fac
NA
5616 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5617 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5618
5619 if (ops && ops->fill_linkxstats) {
97a47fac 5620 *idxattr = IFLA_STATS_LINK_XSTATS;
ae0be8de
MK
5621 attr = nla_nest_start_noflag(skb,
5622 IFLA_STATS_LINK_XSTATS);
216e6906
PM
5623 if (!attr) {
5624 err = -EMSGSIZE;
97a47fac 5625 goto nla_put_failure;
216e6906 5626 }
97a47fac 5627
80e73cc5
NA
5628 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5629 nla_nest_end(skb, attr);
5630 if (err)
5631 goto nla_put_failure;
5632 *idxattr = 0;
5633 }
5634 }
5635
5636 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5637 *idxattr)) {
5638 const struct rtnl_link_ops *ops = NULL;
5639 const struct net_device *master;
5640
5641 master = netdev_master_upper_dev_get(dev);
5642 if (master)
5643 ops = master->rtnl_link_ops;
5644 if (ops && ops->fill_linkxstats) {
80e73cc5 5645 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
ae0be8de
MK
5646 attr = nla_nest_start_noflag(skb,
5647 IFLA_STATS_LINK_XSTATS_SLAVE);
216e6906
PM
5648 if (!attr) {
5649 err = -EMSGSIZE;
80e73cc5 5650 goto nla_put_failure;
216e6906 5651 }
80e73cc5
NA
5652
5653 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
97a47fac
NA
5654 nla_nest_end(skb, attr);
5655 if (err)
5656 goto nla_put_failure;
5657 *idxattr = 0;
5658 }
5659 }
5660
69ae6ad2
NF
5661 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5662 *idxattr)) {
46efc97b
PM
5663 u32 off_filter_mask;
5664
5665 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
69ae6ad2 5666 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
ae0be8de
MK
5667 attr = nla_nest_start_noflag(skb,
5668 IFLA_STATS_LINK_OFFLOAD_XSTATS);
216e6906
PM
5669 if (!attr) {
5670 err = -EMSGSIZE;
69ae6ad2 5671 goto nla_put_failure;
216e6906 5672 }
69ae6ad2 5673
46efc97b 5674 err = rtnl_offload_xstats_fill(skb, dev, prividx,
05415bcc 5675 off_filter_mask, extack);
69ae6ad2
NF
5676 if (err == -ENODATA)
5677 nla_nest_cancel(skb, attr);
5678 else
5679 nla_nest_end(skb, attr);
5680
5681 if (err && err != -ENODATA)
5682 goto nla_put_failure;
5683 *idxattr = 0;
5684 }
5685
aefb4d4a
RS
5686 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5687 struct rtnl_af_ops *af_ops;
5688
5689 *idxattr = IFLA_STATS_AF_SPEC;
ae0be8de 5690 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
216e6906
PM
5691 if (!attr) {
5692 err = -EMSGSIZE;
aefb4d4a 5693 goto nla_put_failure;
216e6906 5694 }
aefb4d4a 5695
5fa85a09
FW
5696 rcu_read_lock();
5697 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
aefb4d4a
RS
5698 if (af_ops->fill_stats_af) {
5699 struct nlattr *af;
aefb4d4a 5700
ae0be8de
MK
5701 af = nla_nest_start_noflag(skb,
5702 af_ops->family);
5fa85a09
FW
5703 if (!af) {
5704 rcu_read_unlock();
57d29a29 5705 err = -EMSGSIZE;
aefb4d4a 5706 goto nla_put_failure;
5fa85a09 5707 }
aefb4d4a
RS
5708 err = af_ops->fill_stats_af(skb, dev);
5709
5fa85a09 5710 if (err == -ENODATA) {
aefb4d4a 5711 nla_nest_cancel(skb, af);
5fa85a09
FW
5712 } else if (err < 0) {
5713 rcu_read_unlock();
aefb4d4a 5714 goto nla_put_failure;
5fa85a09 5715 }
aefb4d4a
RS
5716
5717 nla_nest_end(skb, af);
5718 }
5719 }
5fa85a09 5720 rcu_read_unlock();
aefb4d4a
RS
5721
5722 nla_nest_end(skb, attr);
5723
5724 *idxattr = 0;
5725 }
5726
10c9ead9
RP
5727 nlmsg_end(skb, nlh);
5728
5729 return 0;
5730
5731nla_put_failure:
e8872a25
NA
5732 /* not a multi message or no progress mean a real error */
5733 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5734 nlmsg_cancel(skb, nlh);
5735 else
5736 nlmsg_end(skb, nlh);
10c9ead9 5737
216e6906 5738 return err;
10c9ead9
RP
5739}
5740
10c9ead9 5741static size_t if_nlmsg_stats_size(const struct net_device *dev,
46efc97b 5742 const struct rtnl_stats_dump_filters *filters)
10c9ead9 5743{
d3436799 5744 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
46efc97b 5745 unsigned int filter_mask = filters->mask[0];
10c9ead9 5746
e8872a25 5747 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
10c9ead9
RP
5748 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5749
97a47fac
NA
5750 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5751 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
80e73cc5 5752 int attr = IFLA_STATS_LINK_XSTATS;
97a47fac
NA
5753
5754 if (ops && ops->get_linkxstats_size) {
80e73cc5
NA
5755 size += nla_total_size(ops->get_linkxstats_size(dev,
5756 attr));
97a47fac
NA
5757 /* for IFLA_STATS_LINK_XSTATS */
5758 size += nla_total_size(0);
5759 }
5760 }
5761
80e73cc5
NA
5762 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5763 struct net_device *_dev = (struct net_device *)dev;
5764 const struct rtnl_link_ops *ops = NULL;
5765 const struct net_device *master;
5766
5767 /* netdev_master_upper_dev_get can't take const */
5768 master = netdev_master_upper_dev_get(_dev);
5769 if (master)
5770 ops = master->rtnl_link_ops;
5771 if (ops && ops->get_linkxstats_size) {
5772 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5773
5774 size += nla_total_size(ops->get_linkxstats_size(dev,
5775 attr));
5776 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5777 size += nla_total_size(0);
5778 }
5779 }
5780
46efc97b
PM
5781 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5782 u32 off_filter_mask;
5783
5784 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5785 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5786 }
69ae6ad2 5787
aefb4d4a
RS
5788 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5789 struct rtnl_af_ops *af_ops;
5790
5791 /* for IFLA_STATS_AF_SPEC */
5792 size += nla_total_size(0);
5793
5fa85a09
FW
5794 rcu_read_lock();
5795 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
aefb4d4a
RS
5796 if (af_ops->get_stats_af_size) {
5797 size += nla_total_size(
5798 af_ops->get_stats_af_size(dev));
5799
5800 /* for AF_* */
5801 size += nla_total_size(0);
5802 }
5803 }
5fa85a09 5804 rcu_read_unlock();
aefb4d4a
RS
5805 }
5806
10c9ead9
RP
5807 return size;
5808}
5809
46efc97b
PM
5810#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5811
5812static const struct nla_policy
5813rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5814 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5815 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5816};
5817
5818static const struct nla_policy
5819rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5820 [IFLA_STATS_GET_FILTERS] =
5821 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5822};
5823
03ba3566
PM
5824static const struct nla_policy
5825ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5fd0b838 5826 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
03ba3566
PM
5827};
5828
46efc97b
PM
5829static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5830 struct rtnl_stats_dump_filters *filters,
5831 struct netlink_ext_ack *extack)
5832{
5833 struct nlattr *tb[IFLA_STATS_MAX + 1];
5834 int err;
5835 int at;
5836
5837 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5838 rtnl_stats_get_policy_filters, extack);
5839 if (err < 0)
5840 return err;
5841
5842 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5843 if (tb[at]) {
5844 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5845 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5846 return -EINVAL;
5847 }
5848 filters->mask[at] = nla_get_u32(tb[at]);
5849 }
5850 }
5851
5852 return 0;
5853}
5854
5855static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5856 u32 filter_mask,
5857 struct rtnl_stats_dump_filters *filters,
5858 struct netlink_ext_ack *extack)
5859{
5860 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5861 int err;
5862 int i;
5863
5864 filters->mask[0] = filter_mask;
5865 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5866 filters->mask[i] = -1U;
5867
5868 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5869 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5870 if (err < 0)
5871 return err;
5872
5873 if (tb[IFLA_STATS_GET_FILTERS]) {
5874 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5875 filters, extack);
5876 if (err)
5877 return err;
5878 }
5879
5880 return 0;
5881}
5882
51bc860d
JK
5883static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5884 bool is_dump, struct netlink_ext_ack *extack)
5885{
5886 struct if_stats_msg *ifsm;
5887
69f23a09 5888 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
51bc860d
JK
5889 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5890 return -EINVAL;
5891 }
5892
5893 if (!strict_check)
5894 return 0;
5895
5896 ifsm = nlmsg_data(nlh);
5897
5898 /* only requests using strict checks can pass data to influence
5899 * the dump. The legacy exception is filter_mask.
5900 */
5901 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5902 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5903 return -EINVAL;
5904 }
6300acb2
JK
5905 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5906 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5907 return -EINVAL;
5908 }
51bc860d
JK
5909
5910 return 0;
5911}
5912
c21ef3e3
DA
5913static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5914 struct netlink_ext_ack *extack)
10c9ead9 5915{
46efc97b 5916 struct rtnl_stats_dump_filters filters;
10c9ead9 5917 struct net *net = sock_net(skb->sk);
10c9ead9 5918 struct net_device *dev = NULL;
e8872a25
NA
5919 int idxattr = 0, prividx = 0;
5920 struct if_stats_msg *ifsm;
10c9ead9 5921 struct sk_buff *nskb;
10c9ead9
RP
5922 int err;
5923
51bc860d
JK
5924 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5925 false, extack);
5926 if (err)
5927 return err;
4775cc1f 5928
10c9ead9
RP
5929 ifsm = nlmsg_data(nlh);
5930 if (ifsm->ifindex > 0)
5931 dev = __dev_get_by_index(net, ifsm->ifindex);
5932 else
5933 return -EINVAL;
5934
5935 if (!dev)
5936 return -ENODEV;
5937
46efc97b 5938 if (!ifsm->filter_mask) {
22b67d17 5939 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
10c9ead9 5940 return -EINVAL;
22b67d17 5941 }
10c9ead9 5942
46efc97b
PM
5943 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5944 if (err)
5945 return err;
5946
5947 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
10c9ead9
RP
5948 if (!nskb)
5949 return -ENOBUFS;
5950
5951 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5952 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
05415bcc 5953 0, &filters, &idxattr, &prividx, extack);
10c9ead9
RP
5954 if (err < 0) {
5955 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5956 WARN_ON(err == -EMSGSIZE);
5957 kfree_skb(nskb);
5958 } else {
5959 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5960 }
5961
5962 return err;
5963}
5964
5965static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5966{
841891ec 5967 struct netlink_ext_ack *extack = cb->extack;
46efc97b 5968 struct rtnl_stats_dump_filters filters;
10c9ead9 5969 struct net *net = sock_net(skb->sk);
e8872a25 5970 unsigned int flags = NLM_F_MULTI;
10c9ead9 5971 struct if_stats_msg *ifsm;
0feb396f
ED
5972 struct {
5973 unsigned long ifindex;
5974 int idxattr;
5975 int prividx;
5976 } *ctx = (void *)cb->ctx;
e8872a25 5977 struct net_device *dev;
0feb396f 5978 int err;
10c9ead9
RP
5979
5980 cb->seq = net->dev_base_seq;
5981
51bc860d
JK
5982 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5983 if (err)
5984 return err;
4775cc1f 5985
10c9ead9 5986 ifsm = nlmsg_data(cb->nlh);
46efc97b 5987 if (!ifsm->filter_mask) {
841891ec 5988 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
10c9ead9 5989 return -EINVAL;
841891ec 5990 }
10c9ead9 5991
46efc97b
PM
5992 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5993 extack);
5994 if (err)
5995 return err;
5996
0feb396f
ED
5997 for_each_netdev_dump(net, dev, ctx->ifindex) {
5998 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5999 NETLINK_CB(cb->skb).portid,
6000 cb->nlh->nlmsg_seq, 0,
6001 flags, &filters,
6002 &ctx->idxattr, &ctx->prividx,
6003 extack);
6004 /* If we ran out of room on the first message,
6005 * we're in trouble.
6006 */
6007 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
10c9ead9 6008
0feb396f
ED
6009 if (err < 0)
6010 break;
6011 ctx->prividx = 0;
6012 ctx->idxattr = 0;
6013 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
10c9ead9 6014 }
10c9ead9 6015
136c2a9a 6016 return err;
10c9ead9
RP
6017}
6018
5fd0b838
PM
6019void rtnl_offload_xstats_notify(struct net_device *dev)
6020{
6021 struct rtnl_stats_dump_filters response_filters = {};
6022 struct net *net = dev_net(dev);
6023 int idxattr = 0, prividx = 0;
6024 struct sk_buff *skb;
6025 int err = -ENOBUFS;
6026
6027 ASSERT_RTNL();
6028
6029 response_filters.mask[0] |=
6030 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6031 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6032 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6033
6034 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6035 GFP_KERNEL);
6036 if (!skb)
6037 goto errout;
6038
6039 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6040 &response_filters, &idxattr, &prividx, NULL);
6041 if (err < 0) {
6042 kfree_skb(skb);
6043 goto errout;
6044 }
6045
6046 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6047 return;
6048
6049errout:
6050 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6051}
6052EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6053
03ba3566
PM
6054static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6055 struct netlink_ext_ack *extack)
6056{
5fd0b838 6057 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
03ba3566
PM
6058 struct rtnl_stats_dump_filters response_filters = {};
6059 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6060 struct net *net = sock_net(skb->sk);
6061 struct net_device *dev = NULL;
03ba3566 6062 struct if_stats_msg *ifsm;
5fd0b838 6063 bool notify = false;
03ba3566
PM
6064 int err;
6065
6066 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6067 false, extack);
6068 if (err)
6069 return err;
6070
6071 ifsm = nlmsg_data(nlh);
6072 if (ifsm->family != AF_UNSPEC) {
6073 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6074 return -EINVAL;
6075 }
6076
6077 if (ifsm->ifindex > 0)
6078 dev = __dev_get_by_index(net, ifsm->ifindex);
6079 else
6080 return -EINVAL;
6081
6082 if (!dev)
6083 return -ENODEV;
6084
6085 if (ifsm->filter_mask) {
6086 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6087 return -EINVAL;
6088 }
6089
6090 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6091 ifla_stats_set_policy, extack);
6092 if (err < 0)
6093 return err;
6094
5fd0b838
PM
6095 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6096 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
03ba3566 6097
5fd0b838
PM
6098 if (req)
6099 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6100 else
6101 err = netdev_offload_xstats_disable(dev, t_l3);
6102
6103 if (!err)
6104 notify = true;
6105 else if (err != -EALREADY)
6106 return err;
6107
6108 response_filters.mask[0] |=
6109 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6110 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6111 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
03ba3566
PM
6112 }
6113
5fd0b838
PM
6114 if (notify)
6115 rtnl_offload_xstats_notify(dev);
6116
6117 return 0;
03ba3566
PM
6118}
6119
cc7f5022
IS
6120static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6121 struct netlink_ext_ack *extack)
6122{
6123 struct br_port_msg *bpm;
6124
6125 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6126 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6127 return -EINVAL;
6128 }
6129
6130 bpm = nlmsg_data(nlh);
6131 if (bpm->ifindex) {
6132 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6133 return -EINVAL;
6134 }
6135 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6136 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6137 return -EINVAL;
6138 }
6139
6140 return 0;
6141}
6142
6143struct rtnl_mdb_dump_ctx {
6144 long idx;
6145};
6146
6147static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6148{
6149 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6150 struct net *net = sock_net(skb->sk);
6151 struct net_device *dev;
6152 int idx, s_idx;
6153 int err;
6154
6155 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
6156
6157 if (cb->strict_check) {
6158 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6159 if (err)
6160 return err;
6161 }
6162
6163 s_idx = ctx->idx;
6164 idx = 0;
6165
6166 for_each_netdev(net, dev) {
6167 if (idx < s_idx)
6168 goto skip;
6169 if (!dev->netdev_ops->ndo_mdb_dump)
6170 goto skip;
6171
6172 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6173 if (err == -EMSGSIZE)
6174 goto out;
6175 /* Moving on to next device, reset markers and sequence
6176 * counters since they are all maintained per-device.
6177 */
6178 memset(cb->ctx, 0, sizeof(cb->ctx));
6179 cb->prev_seq = 0;
6180 cb->seq = 0;
6181skip:
6182 idx++;
6183 }
6184
6185out:
6186 ctx->idx = idx;
6187 return skb->len;
6188}
6189
ddd17a54
IS
6190static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6191 struct netlink_ext_ack *extack)
6192{
6193 struct br_mdb_entry *entry = nla_data(attr);
6194
6195 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6196 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6197 return -EINVAL;
6198 }
6199
6200 if (entry->ifindex) {
6201 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6202 return -EINVAL;
6203 }
6204
6205 if (entry->state) {
6206 NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6207 return -EINVAL;
6208 }
6209
6210 if (entry->flags) {
6211 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6212 return -EINVAL;
6213 }
6214
6215 if (entry->vid >= VLAN_VID_MASK) {
6216 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6217 return -EINVAL;
6218 }
6219
6220 if (entry->addr.proto != htons(ETH_P_IP) &&
6221 entry->addr.proto != htons(ETH_P_IPV6) &&
6222 entry->addr.proto != 0) {
6223 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6224 return -EINVAL;
6225 }
6226
6227 return 0;
6228}
6229
6230static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6231 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6232 rtnl_validate_mdb_entry_get,
6233 sizeof(struct br_mdb_entry)),
6234 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6235};
6236
6237static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6238 struct netlink_ext_ack *extack)
6239{
6240 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6241 struct net *net = sock_net(in_skb->sk);
6242 struct br_port_msg *bpm;
6243 struct net_device *dev;
6244 int err;
6245
6246 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6247 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6248 if (err)
6249 return err;
6250
6251 bpm = nlmsg_data(nlh);
6252 if (!bpm->ifindex) {
6253 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6254 return -EINVAL;
6255 }
6256
6257 dev = __dev_get_by_index(net, bpm->ifindex);
6258 if (!dev) {
6259 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6260 return -ENODEV;
6261 }
6262
6263 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6264 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6265 return -EINVAL;
6266 }
6267
6268 if (!dev->netdev_ops->ndo_mdb_get) {
6269 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6270 return -EOPNOTSUPP;
6271 }
6272
6273 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6274 nlh->nlmsg_seq, extack);
6275}
6276
cc7f5022
IS
6277static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6278 struct netlink_ext_ack *extack)
6279{
6280 struct br_mdb_entry *entry = nla_data(attr);
6281
6282 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6283 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6284 return -EINVAL;
6285 }
6286
6287 if (entry->ifindex == 0) {
6288 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6289 return -EINVAL;
6290 }
6291
6292 if (entry->addr.proto == htons(ETH_P_IP)) {
da654c80
IS
6293 if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6294 !ipv4_is_zeronet(entry->addr.u.ip4)) {
6295 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
cc7f5022
IS
6296 return -EINVAL;
6297 }
6298 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6299 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6300 return -EINVAL;
6301 }
6302#if IS_ENABLED(CONFIG_IPV6)
6303 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6304 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6305 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6306 return -EINVAL;
6307 }
6308#endif
6309 } else if (entry->addr.proto == 0) {
6310 /* L2 mdb */
6311 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6312 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6313 return -EINVAL;
6314 }
6315 } else {
6316 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6317 return -EINVAL;
6318 }
6319
6320 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6321 NL_SET_ERR_MSG(extack, "Unknown entry state");
6322 return -EINVAL;
6323 }
6324 if (entry->vid >= VLAN_VID_MASK) {
6325 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6326 return -EINVAL;
6327 }
6328
6329 return 0;
6330}
6331
6332static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6333 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6334 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6335 rtnl_validate_mdb_entry,
6336 sizeof(struct br_mdb_entry)),
6337 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6338};
6339
6340static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6341 struct netlink_ext_ack *extack)
6342{
6343 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6344 struct net *net = sock_net(skb->sk);
6345 struct br_port_msg *bpm;
6346 struct net_device *dev;
6347 int err;
6348
6349 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6350 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6351 if (err)
6352 return err;
6353
6354 bpm = nlmsg_data(nlh);
6355 if (!bpm->ifindex) {
6356 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6357 return -EINVAL;
6358 }
6359
6360 dev = __dev_get_by_index(net, bpm->ifindex);
6361 if (!dev) {
6362 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6363 return -ENODEV;
6364 }
6365
6366 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6367 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6368 return -EINVAL;
6369 }
6370
6371 if (!dev->netdev_ops->ndo_mdb_add) {
6372 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6373 return -EOPNOTSUPP;
6374 }
6375
6376 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6377}
6378
e0cd06f7
IS
6379static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6380 struct netlink_ext_ack *extack)
6381{
6382 struct br_mdb_entry *entry = nla_data(attr);
6383 struct br_mdb_entry zero_entry = {};
6384
6385 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6386 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6387 return -EINVAL;
6388 }
6389
6390 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6391 NL_SET_ERR_MSG(extack, "Unknown entry state");
6392 return -EINVAL;
6393 }
6394
6395 if (entry->flags) {
6396 NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6397 return -EINVAL;
6398 }
6399
6400 if (entry->vid >= VLAN_N_VID - 1) {
6401 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6402 return -EINVAL;
6403 }
6404
6405 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6406 NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6407 return -EINVAL;
6408 }
6409
6410 return 0;
6411}
6412
6413static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6414 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6415 rtnl_validate_mdb_entry_del_bulk,
6416 sizeof(struct br_mdb_entry)),
6417 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6418};
6419
cc7f5022
IS
6420static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6421 struct netlink_ext_ack *extack)
6422{
e0cd06f7 6423 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
cc7f5022
IS
6424 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6425 struct net *net = sock_net(skb->sk);
6426 struct br_port_msg *bpm;
6427 struct net_device *dev;
6428 int err;
6429
e0cd06f7
IS
6430 if (!del_bulk)
6431 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6432 MDBA_SET_ENTRY_MAX, mdba_policy,
6433 extack);
6434 else
6435 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6436 mdba_del_bulk_policy, extack);
cc7f5022
IS
6437 if (err)
6438 return err;
6439
6440 bpm = nlmsg_data(nlh);
6441 if (!bpm->ifindex) {
6442 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6443 return -EINVAL;
6444 }
6445
6446 dev = __dev_get_by_index(net, bpm->ifindex);
6447 if (!dev) {
6448 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6449 return -ENODEV;
6450 }
6451
6452 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6453 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6454 return -EINVAL;
6455 }
6456
d8e81f13
IS
6457 if (del_bulk) {
6458 if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6459 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6460 return -EOPNOTSUPP;
6461 }
6462 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6463 }
6464
cc7f5022
IS
6465 if (!dev->netdev_ops->ndo_mdb_del) {
6466 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6467 return -EOPNOTSUPP;
6468 }
6469
6470 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6471}
6472
1da177e4
LT
6473/* Process one rtnetlink message. */
6474
2d4bc933
JB
6475static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6476 struct netlink_ext_ack *extack)
1da177e4 6477{
3b1e0a65 6478 struct net *net = sock_net(skb->sk);
addf9b90 6479 struct rtnl_link *link;
12dc5c2c 6480 enum rtnl_kinds kind;
e4202511 6481 struct module *owner;
6853dd48 6482 int err = -EOPNOTSUPP;
e2849863 6483 rtnl_doit_func doit;
62256f98 6484 unsigned int flags;
1da177e4
LT
6485 int family;
6486 int type;
1da177e4 6487
1da177e4 6488 type = nlh->nlmsg_type;
1da177e4 6489 if (type > RTM_MAX)
038890fe 6490 return -EOPNOTSUPP;
1da177e4
LT
6491
6492 type -= RTM_BASE;
6493
6494 /* All the messages must have at least 1 byte length */
573ce260 6495 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
1da177e4
LT
6496 return 0;
6497
573ce260 6498 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2e9ea3e3 6499 kind = rtnl_msgtype_kind(type);
1da177e4 6500
12dc5c2c 6501 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
1d00a4eb 6502 return -EPERM;
1da177e4 6503
6853dd48 6504 rcu_read_lock();
12dc5c2c 6505 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
97c53cac 6506 struct sock *rtnl;
e2849863 6507 rtnl_dumpit_func dumpit;
ebfe3c51 6508 u32 min_dump_alloc = 0;
1da177e4 6509
addf9b90
FW
6510 link = rtnl_get_link(family, type);
6511 if (!link || !link->dumpit) {
6853dd48 6512 family = PF_UNSPEC;
addf9b90
FW
6513 link = rtnl_get_link(family, type);
6514 if (!link || !link->dumpit)
6853dd48
FW
6515 goto err_unlock;
6516 }
e4202511 6517 owner = link->owner;
addf9b90 6518 dumpit = link->dumpit;
386520e0 6519 flags = link->flags;
e1fa6d21 6520
5c2bb9b6 6521 if (type == RTM_GETLINK - RTM_BASE)
e1fa6d21 6522 min_dump_alloc = rtnl_calcit(skb, nlh);
9ac4a169 6523
e4202511
FW
6524 err = 0;
6525 /* need to do this before rcu_read_unlock() */
6526 if (!try_module_get(owner))
6527 err = -EPROTONOSUPPORT;
6528
6853dd48
FW
6529 rcu_read_unlock();
6530
97c53cac 6531 rtnl = net->rtnl;
e4202511 6532 if (err == 0) {
80d326fa
PNA
6533 struct netlink_dump_control c = {
6534 .dump = dumpit,
6535 .min_dump_alloc = min_dump_alloc,
e4202511 6536 .module = owner,
386520e0 6537 .flags = flags,
80d326fa
PNA
6538 };
6539 err = netlink_dump_start(rtnl, skb, nlh, &c);
e4202511
FW
6540 /* netlink_dump_start() will keep a reference on
6541 * module if dump is still in progress.
6542 */
6543 module_put(owner);
80d326fa 6544 }
2907c35f 6545 return err;
1da177e4
LT
6546 }
6547
addf9b90
FW
6548 link = rtnl_get_link(family, type);
6549 if (!link || !link->doit) {
8caa38b5 6550 family = PF_UNSPEC;
addf9b90
FW
6551 link = rtnl_get_link(PF_UNSPEC, type);
6552 if (!link || !link->doit)
6553 goto out_unlock;
8caa38b5
FW
6554 }
6555
e4202511
FW
6556 owner = link->owner;
6557 if (!try_module_get(owner)) {
6558 err = -EPROTONOSUPPORT;
6559 goto out_unlock;
6560 }
6561
addf9b90 6562 flags = link->flags;
a6cec0bc
NA
6563 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6564 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6565 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
5b22f627 6566 module_put(owner);
a6cec0bc
NA
6567 goto err_unlock;
6568 }
6569
62256f98 6570 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
addf9b90 6571 doit = link->doit;
62256f98
FW
6572 rcu_read_unlock();
6573 if (doit)
6574 err = doit(skb, nlh, extack);
e4202511 6575 module_put(owner);
62256f98
FW
6576 return err;
6577 }
6853dd48 6578 rcu_read_unlock();
1da177e4 6579
6853dd48 6580 rtnl_lock();
addf9b90
FW
6581 link = rtnl_get_link(family, type);
6582 if (link && link->doit)
6583 err = link->doit(skb, nlh, extack);
0cc09020 6584 rtnl_unlock();
addf9b90 6585
e4202511
FW
6586 module_put(owner);
6587
addf9b90
FW
6588 return err;
6589
6590out_unlock:
6591 rcu_read_unlock();
0cc09020
FW
6592 return err;
6593
6594err_unlock:
6853dd48 6595 rcu_read_unlock();
0cc09020 6596 return -EOPNOTSUPP;
1da177e4
LT
6597}
6598
cd40b7d3 6599static void rtnetlink_rcv(struct sk_buff *skb)
1da177e4 6600{
cd40b7d3 6601 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
1da177e4
LT
6602}
6603
5f729eaa
JG
6604static int rtnetlink_bind(struct net *net, int group)
6605{
6606 switch (group) {
6607 case RTNLGRP_IPV4_MROUTE_R:
6608 case RTNLGRP_IPV6_MROUTE_R:
6609 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6610 return -EPERM;
6611 break;
6612 }
6613 return 0;
6614}
6615
1da177e4
LT
6616static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6617{
351638e7 6618 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
e9dc8653 6619
1da177e4 6620 switch (event) {
5138e86f 6621 case NETDEV_REBOOT:
8a212589 6622 case NETDEV_CHANGEMTU:
3753654e 6623 case NETDEV_CHANGEADDR:
5138e86f
VY
6624 case NETDEV_CHANGENAME:
6625 case NETDEV_FEAT_CHANGE:
6626 case NETDEV_BONDING_FAILOVER:
e6e66594 6627 case NETDEV_POST_TYPE_CHANGE:
5138e86f 6628 case NETDEV_NOTIFY_PEERS:
dc709f37 6629 case NETDEV_CHANGEUPPER:
5138e86f 6630 case NETDEV_RESEND_IGMP:
5138e86f 6631 case NETDEV_CHANGEINFODATA:
eeda3fb9 6632 case NETDEV_CHANGELOWERSTATE:
ebdcf045 6633 case NETDEV_CHANGE_TX_QUEUE_LEN:
3d3ea5af 6634 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
1d997f10 6635 GFP_KERNEL, NULL, 0, 0, NULL);
1da177e4
LT
6636 break;
6637 default:
1da177e4
LT
6638 break;
6639 }
6640 return NOTIFY_DONE;
6641}
6642
6643static struct notifier_block rtnetlink_dev_notifier = {
6644 .notifier_call = rtnetlink_event,
6645};
6646
97c53cac 6647
2c8c1e72 6648static int __net_init rtnetlink_net_init(struct net *net)
97c53cac
DL
6649{
6650 struct sock *sk;
a31f2d17
PNA
6651 struct netlink_kernel_cfg cfg = {
6652 .groups = RTNLGRP_MAX,
6653 .input = rtnetlink_rcv,
6654 .cb_mutex = &rtnl_mutex,
9785e10a 6655 .flags = NL_CFG_F_NONROOT_RECV,
5f729eaa 6656 .bind = rtnetlink_bind,
a31f2d17
PNA
6657 };
6658
9f00d977 6659 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
97c53cac
DL
6660 if (!sk)
6661 return -ENOMEM;
97c53cac
DL
6662 net->rtnl = sk;
6663 return 0;
6664}
6665
2c8c1e72 6666static void __net_exit rtnetlink_net_exit(struct net *net)
97c53cac 6667{
775516bf
DL
6668 netlink_kernel_release(net->rtnl);
6669 net->rtnl = NULL;
97c53cac
DL
6670}
6671
6672static struct pernet_operations rtnetlink_net_ops = {
6673 .init = rtnetlink_net_init,
6674 .exit = rtnetlink_net_exit,
6675};
6676
1da177e4
LT
6677void __init rtnetlink_init(void)
6678{
97c53cac 6679 if (register_pernet_subsys(&rtnetlink_net_ops))
1da177e4 6680 panic("rtnetlink_init: cannot initialize rtnetlink\n");
97c53cac 6681
1da177e4 6682 register_netdevice_notifier(&rtnetlink_dev_notifier);
340d17fc 6683
c7ac8679 6684 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
b97bac64
FW
6685 rtnl_dump_ifinfo, 0);
6686 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6687 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6688 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
687ad8cc 6689
b97bac64
FW
6690 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6691 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6692 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
77162022 6693
36fbf1e5
JP
6694 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6695 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6696
b97bac64 6697 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
9e834259
NA
6698 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6699 RTNL_FLAG_BULK_DEL_SUPPORTED);
5b2f94b2 6700 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
e5a55a89 6701
b97bac64
FW
6702 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6703 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6704 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
10c9ead9
RP
6705
6706 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
b97bac64 6707 0);
03ba3566 6708 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
cc7f5022 6709
ddd17a54 6710 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
cc7f5022 6711 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
2601e9c4
IS
6712 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
6713 RTNL_FLAG_BULK_DEL_SUPPORTED);
1da177e4 6714}