rtnetlink: do not depend on RTNL in rtnl_xdp_prog_skb()
[linux-2.6-block.git] / net / core / rtnetlink.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
1da177e4 11 * Fixes:
d467d0bc 12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
1da177e4
LT
13 */
14
ee5d032f 15#include <linux/bitops.h>
1da177e4
LT
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
1da177e4
LT
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
6756ae4b 33#include <linux/mutex.h>
1823730f 34#include <linux/if_addr.h>
77162022 35#include <linux/if_bridge.h>
f6f6424b 36#include <linux/if_vlan.h>
ebc08a6f 37#include <linux/pci.h>
77162022 38#include <linux/etherdevice.h>
58038695 39#include <linux/bpf.h>
1da177e4 40
7c0f6ba6 41#include <linux/uaccess.h>
1da177e4
LT
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
ea697639 50#include <net/tcp.h>
1da177e4
LT
51#include <net/sock.h>
52#include <net/pkt_sched.h>
14c0b97d 53#include <net/fib_rules.h>
e2849863 54#include <net/rtnetlink.h>
30ffee84 55#include <net/net_namespace.h>
dca56c30 56#include <net/devlink.h>
cc7f5022
IS
57#if IS_ENABLED(CONFIG_IPV6)
58#include <net/addrconf.h>
59#endif
5f184269 60#include <linux/dpll.h>
1da177e4 61
6264f58c
JK
62#include "dev.h"
63
a428afe8 64#define RTNL_MAX_TYPE 50
29cfb2aa 65#define RTNL_SLAVE_MAX_TYPE 44
ccf8dbcd 66
e0d087af 67struct rtnl_link {
e2849863
TG
68 rtnl_doit_func doit;
69 rtnl_dumpit_func dumpit;
e4202511 70 struct module *owner;
62256f98 71 unsigned int flags;
addf9b90 72 struct rcu_head rcu;
e2849863
TG
73};
74
6756ae4b 75static DEFINE_MUTEX(rtnl_mutex);
1da177e4
LT
76
77void rtnl_lock(void)
78{
6756ae4b 79 mutex_lock(&rtnl_mutex);
1da177e4 80}
e0d087af 81EXPORT_SYMBOL(rtnl_lock);
1da177e4 82
79ffdfc6
KT
83int rtnl_lock_killable(void)
84{
85 return mutex_lock_killable(&rtnl_mutex);
86}
87EXPORT_SYMBOL(rtnl_lock_killable);
88
1b5c5493
ED
89static struct sk_buff *defer_kfree_skb_list;
90void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91{
92 if (head && tail) {
93 tail->next = defer_kfree_skb_list;
94 defer_kfree_skb_list = head;
95 }
96}
97EXPORT_SYMBOL(rtnl_kfree_skbs);
98
6756ae4b 99void __rtnl_unlock(void)
1da177e4 100{
1b5c5493
ED
101 struct sk_buff *head = defer_kfree_skb_list;
102
103 defer_kfree_skb_list = NULL;
104
0b5c21bb
JB
105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
106 * is used. In some places, e.g. in cfg80211, we have code that will do
107 * something like
108 * rtnl_lock()
109 * wiphy_lock()
110 * ...
111 * rtnl_unlock()
112 *
113 * and because netdev_run_todo() acquires the RTNL for items on the list
114 * we could cause a situation such as this:
115 * Thread 1 Thread 2
116 * rtnl_lock()
117 * unregister_netdevice()
118 * __rtnl_unlock()
119 * rtnl_lock()
120 * wiphy_lock()
121 * rtnl_unlock()
122 * netdev_run_todo()
123 * __rtnl_unlock()
124 *
125 * // list not empty now
126 * // because of thread 2
127 * rtnl_lock()
128 * while (!list_empty(...))
129 * rtnl_lock()
130 * wiphy_lock()
131 * **** DEADLOCK ****
132 *
133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
134 * it's not used in cases where something is added to do the list.
135 */
136 WARN_ON(!list_empty(&net_todo_list));
137
6756ae4b 138 mutex_unlock(&rtnl_mutex);
1b5c5493
ED
139
140 while (head) {
141 struct sk_buff *next = head->next;
142
143 kfree_skb(head);
144 cond_resched();
145 head = next;
146 }
1da177e4 147}
6756ae4b 148
1da177e4
LT
149void rtnl_unlock(void)
150{
58ec3b4d 151 /* This fellow will unlock it for us. */
1da177e4
LT
152 netdev_run_todo();
153}
e0d087af 154EXPORT_SYMBOL(rtnl_unlock);
1da177e4 155
6756ae4b
SH
156int rtnl_trylock(void)
157{
158 return mutex_trylock(&rtnl_mutex);
159}
e0d087af 160EXPORT_SYMBOL(rtnl_trylock);
6756ae4b 161
c9c1014b
PM
162int rtnl_is_locked(void)
163{
164 return mutex_is_locked(&rtnl_mutex);
165}
e0d087af 166EXPORT_SYMBOL(rtnl_is_locked);
c9c1014b 167
6f99528e
VB
168bool refcount_dec_and_rtnl_lock(refcount_t *r)
169{
170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
171}
172EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
173
a898def2 174#ifdef CONFIG_PROVE_LOCKING
0cbf3343 175bool lockdep_rtnl_is_held(void)
a898def2
PM
176{
177 return lockdep_is_held(&rtnl_mutex);
178}
179EXPORT_SYMBOL(lockdep_rtnl_is_held);
180#endif /* #ifdef CONFIG_PROVE_LOCKING */
181
51e13685 182static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
e2849863
TG
183
184static inline int rtm_msgindex(int msgtype)
185{
186 int msgindex = msgtype - RTM_BASE;
187
188 /*
189 * msgindex < 0 implies someone tried to register a netlink
190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
191 * the message type has not been added to linux/rtnetlink.h
192 */
193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
194
195 return msgindex;
196}
197
addf9b90
FW
198static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
199{
51e13685 200 struct rtnl_link __rcu **tab;
addf9b90
FW
201
202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
203 protocol = PF_UNSPEC;
204
205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
206 if (!tab)
207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
208
51e13685 209 return rcu_dereference_rtnl(tab[msgtype]);
addf9b90
FW
210}
211
e4202511
FW
212static int rtnl_register_internal(struct module *owner,
213 int protocol, int msgtype,
214 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
215 unsigned int flags)
e2849863 216{
b0e9fe1b
FW
217 struct rtnl_link *link, *old;
218 struct rtnl_link __rcu **tab;
e2849863 219 int msgindex;
addf9b90 220 int ret = -ENOBUFS;
e2849863 221
25239cee 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863
TG
223 msgindex = rtm_msgindex(msgtype);
224
addf9b90 225 rtnl_lock();
51e13685 226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
e2849863 227 if (tab == NULL) {
addf9b90
FW
228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
229 if (!tab)
230 goto unlock;
e2849863 231
addf9b90 232 /* ensures we see the 0 stores */
6853dd48 233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
e2849863
TG
234 }
235
addf9b90
FW
236 old = rtnl_dereference(tab[msgindex]);
237 if (old) {
238 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 } else {
242 link = kzalloc(sizeof(*link), GFP_KERNEL);
243 if (!link)
244 goto unlock;
245 }
246
e4202511
FW
247 WARN_ON(link->owner && link->owner != owner);
248 link->owner = owner;
249
addf9b90 250 WARN_ON(doit && link->doit && link->doit != doit);
e2849863 251 if (doit)
addf9b90
FW
252 link->doit = doit;
253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
e2849863 254 if (dumpit)
addf9b90 255 link->dumpit = dumpit;
e2849863 256
a6cec0bc
NA
257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
addf9b90
FW
259 link->flags |= flags;
260
261 /* publish protocol:msgtype */
262 rcu_assign_pointer(tab[msgindex], link);
263 ret = 0;
264 if (old)
265 kfree_rcu(old, rcu);
266unlock:
267 rtnl_unlock();
268 return ret;
e2849863 269}
e4202511
FW
270
271/**
272 * rtnl_register_module - Register a rtnetlink message type
273 *
274 * @owner: module registering the hook (THIS_MODULE)
275 * @protocol: Protocol family or PF_UNSPEC
276 * @msgtype: rtnetlink message type
277 * @doit: Function pointer called for each request message
278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
d467d0bc 279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
e4202511
FW
280 *
281 * Like rtnl_register, but for use by removable modules.
282 */
283int rtnl_register_module(struct module *owner,
284 int protocol, int msgtype,
285 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
286 unsigned int flags)
287{
288 return rtnl_register_internal(owner, protocol, msgtype,
289 doit, dumpit, flags);
290}
291EXPORT_SYMBOL_GPL(rtnl_register_module);
292
293/**
16feebcf 294 * rtnl_register - Register a rtnetlink message type
e4202511
FW
295 * @protocol: Protocol family or PF_UNSPEC
296 * @msgtype: rtnetlink message type
297 * @doit: Function pointer called for each request message
298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
d467d0bc 299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
e4202511
FW
300 *
301 * Registers the specified function pointers (at least one of them has
302 * to be non-NULL) to be called whenever a request message for the
303 * specified protocol family and message type is received.
304 *
305 * The special protocol family PF_UNSPEC may be used to define fallback
306 * function pointers for the case when no entry for the specific protocol
307 * family exists.
e2849863
TG
308 */
309void rtnl_register(int protocol, int msgtype,
c7ac8679 310 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
b97bac64 311 unsigned int flags)
e2849863 312{
16feebcf
FW
313 int err;
314
315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
316 flags);
317 if (err)
318 pr_err("Unable to register rtnetlink message handler, "
319 "protocol = %d, message type = %d\n", protocol, msgtype);
e2849863 320}
e2849863
TG
321
322/**
323 * rtnl_unregister - Unregister a rtnetlink message type
324 * @protocol: Protocol family or PF_UNSPEC
325 * @msgtype: rtnetlink message type
326 *
327 * Returns 0 on success or a negative error code.
328 */
329int rtnl_unregister(int protocol, int msgtype)
330{
51e13685
JK
331 struct rtnl_link __rcu **tab;
332 struct rtnl_link *link;
e2849863
TG
333 int msgindex;
334
25239cee 335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863
TG
336 msgindex = rtm_msgindex(msgtype);
337
6853dd48 338 rtnl_lock();
addf9b90
FW
339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
340 if (!tab) {
6853dd48 341 rtnl_unlock();
e2849863 342 return -ENOENT;
6853dd48 343 }
e2849863 344
17452347 345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
6853dd48 346 rtnl_unlock();
e2849863 347
addf9b90
FW
348 kfree_rcu(link, rcu);
349
e2849863
TG
350 return 0;
351}
e2849863
TG
352EXPORT_SYMBOL_GPL(rtnl_unregister);
353
354/**
355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
356 * @protocol : Protocol family or PF_UNSPEC
357 *
358 * Identical to calling rtnl_unregster() for all registered message types
359 * of a certain protocol family.
360 */
361void rtnl_unregister_all(int protocol)
362{
51e13685
JK
363 struct rtnl_link __rcu **tab;
364 struct rtnl_link *link;
addf9b90 365 int msgindex;
019a3169 366
25239cee 367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
e2849863 368
019a3169 369 rtnl_lock();
17452347 370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
f707ef61
SD
371 if (!tab) {
372 rtnl_unlock();
373 return;
374 }
addf9b90 375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
17452347 376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
addf9b90
FW
377 kfree_rcu(link, rcu);
378 }
019a3169
FW
379 rtnl_unlock();
380
6853dd48
FW
381 synchronize_net();
382
addf9b90 383 kfree(tab);
e2849863 384}
e2849863 385EXPORT_SYMBOL_GPL(rtnl_unregister_all);
1da177e4 386
38f7b870
PM
387static LIST_HEAD(link_ops);
388
c63044f0
ED
389static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
390{
391 const struct rtnl_link_ops *ops;
392
393 list_for_each_entry(ops, &link_ops, list) {
394 if (!strcmp(ops->kind, kind))
395 return ops;
396 }
397 return NULL;
398}
399
38f7b870
PM
400/**
401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
402 * @ops: struct rtnl_link_ops * to register
403 *
404 * The caller must hold the rtnl_mutex. This function should be used
405 * by drivers that create devices during module initialization. It
406 * must be called before registering the devices.
407 *
408 * Returns 0 on success or a negative error code.
409 */
410int __rtnl_link_register(struct rtnl_link_ops *ops)
411{
c63044f0
ED
412 if (rtnl_link_ops_get(ops->kind))
413 return -EEXIST;
414
8c713dc9 415 /* The check for alloc/setup is here because if ops
b0ab2fab
JP
416 * does not have that filled up, it is not possible
417 * to use the ops for creating device. So do not
418 * fill up dellink as well. That disables rtnl_dellink.
419 */
8c713dc9 420 if ((ops->alloc || ops->setup) && !ops->dellink)
23289a37 421 ops->dellink = unregister_netdevice_queue;
2d85cba2 422
38f7b870
PM
423 list_add_tail(&ops->list, &link_ops);
424 return 0;
425}
38f7b870
PM
426EXPORT_SYMBOL_GPL(__rtnl_link_register);
427
428/**
429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
430 * @ops: struct rtnl_link_ops * to register
431 *
432 * Returns 0 on success or a negative error code.
433 */
434int rtnl_link_register(struct rtnl_link_ops *ops)
435{
436 int err;
437
ccf8dbcd
KC
438 /* Sanity-check max sizes to avoid stack buffer overflow. */
439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
441 return -EINVAL;
442
38f7b870
PM
443 rtnl_lock();
444 err = __rtnl_link_register(ops);
445 rtnl_unlock();
446 return err;
447}
38f7b870
PM
448EXPORT_SYMBOL_GPL(rtnl_link_register);
449
669f87ba
PE
450static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
451{
452 struct net_device *dev;
23289a37
ED
453 LIST_HEAD(list_kill);
454
669f87ba 455 for_each_netdev(net, dev) {
23289a37
ED
456 if (dev->rtnl_link_ops == ops)
457 ops->dellink(dev, &list_kill);
669f87ba 458 }
23289a37 459 unregister_netdevice_many(&list_kill);
669f87ba
PE
460}
461
38f7b870
PM
462/**
463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
464 * @ops: struct rtnl_link_ops * to unregister
465 *
554873e5
KT
466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
467 * integrity (hold pernet_ops_rwsem for writing to close the race
468 * with setup_net() and cleanup_net()).
38f7b870
PM
469 */
470void __rtnl_link_unregister(struct rtnl_link_ops *ops)
471{
881d966b 472 struct net *net;
2d85cba2 473
881d966b 474 for_each_net(net) {
669f87ba 475 __rtnl_kill_links(net, ops);
2d85cba2 476 }
38f7b870
PM
477 list_del(&ops->list);
478}
38f7b870
PM
479EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
480
200b916f
CW
481/* Return with the rtnl_lock held when there are no network
482 * devices unregistering in any network namespace.
483 */
484static void rtnl_lock_unregistering_all(void)
485{
ff960a73 486 DEFINE_WAIT_FUNC(wait, woken_wake_function);
200b916f 487
ff960a73 488 add_wait_queue(&netdev_unregistering_wq, &wait);
200b916f 489 for (;;) {
200b916f 490 rtnl_lock();
f0b07bb1
KT
491 /* We held write locked pernet_ops_rwsem, and parallel
492 * setup_net() and cleanup_net() are not possible.
493 */
ffabe98c 494 if (!atomic_read(&dev_unreg_count))
200b916f
CW
495 break;
496 __rtnl_unlock();
ff960a73
PZ
497
498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
200b916f 499 }
ff960a73 500 remove_wait_queue(&netdev_unregistering_wq, &wait);
200b916f
CW
501}
502
38f7b870
PM
503/**
504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
505 * @ops: struct rtnl_link_ops * to unregister
506 */
507void rtnl_link_unregister(struct rtnl_link_ops *ops)
508{
8518e9bb 509 /* Close the race with setup_net() and cleanup_net() */
4420bf21 510 down_write(&pernet_ops_rwsem);
200b916f 511 rtnl_lock_unregistering_all();
38f7b870
PM
512 __rtnl_link_unregister(ops);
513 rtnl_unlock();
4420bf21 514 up_write(&pernet_ops_rwsem);
38f7b870 515}
38f7b870
PM
516EXPORT_SYMBOL_GPL(rtnl_link_unregister);
517
ba7d49b1
JP
518static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
519{
520 struct net_device *master_dev;
521 const struct rtnl_link_ops *ops;
8515ae38 522 size_t size = 0;
ba7d49b1 523
8515ae38
FW
524 rcu_read_lock();
525
526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
ba7d49b1 527 if (!master_dev)
8515ae38
FW
528 goto out;
529
ba7d49b1 530 ops = master_dev->rtnl_link_ops;
6049f253 531 if (!ops || !ops->get_slave_size)
8515ae38 532 goto out;
ba7d49b1 533 /* IFLA_INFO_SLAVE_DATA + nested data */
8515ae38 534 size = nla_total_size(sizeof(struct nlattr)) +
ba7d49b1 535 ops->get_slave_size(master_dev, dev);
8515ae38
FW
536
537out:
538 rcu_read_unlock();
539 return size;
ba7d49b1
JP
540}
541
38f7b870
PM
542static size_t rtnl_link_get_size(const struct net_device *dev)
543{
544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
545 size_t size;
546
547 if (!ops)
548 return 0;
549
369cf77a
TG
550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
38f7b870
PM
552
553 if (ops->get_size)
554 /* IFLA_INFO_DATA + nested data */
369cf77a 555 size += nla_total_size(sizeof(struct nlattr)) +
38f7b870
PM
556 ops->get_size(dev);
557
558 if (ops->get_xstats_size)
369cf77a
TG
559 /* IFLA_INFO_XSTATS */
560 size += nla_total_size(ops->get_xstats_size(dev));
38f7b870 561
ba7d49b1
JP
562 size += rtnl_link_get_slave_info_data_size(dev);
563
38f7b870
PM
564 return size;
565}
566
f8ff182c
TG
567static LIST_HEAD(rtnl_af_ops);
568
569static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
570{
571 const struct rtnl_af_ops *ops;
572
a100243d
CW
573 ASSERT_RTNL();
574
575 list_for_each_entry(ops, &rtnl_af_ops, list) {
f8ff182c
TG
576 if (ops->family == family)
577 return ops;
578 }
579
580 return NULL;
581}
582
f8ff182c
TG
583/**
584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
585 * @ops: struct rtnl_af_ops * to register
586 *
587 * Returns 0 on success or a negative error code.
588 */
3678a9d8 589void rtnl_af_register(struct rtnl_af_ops *ops)
f8ff182c 590{
f8ff182c 591 rtnl_lock();
5fa85a09 592 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
f8ff182c 593 rtnl_unlock();
f8ff182c
TG
594}
595EXPORT_SYMBOL_GPL(rtnl_af_register);
596
f8ff182c
TG
597/**
598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
599 * @ops: struct rtnl_af_ops * to unregister
600 */
601void rtnl_af_unregister(struct rtnl_af_ops *ops)
602{
603 rtnl_lock();
5fa85a09 604 list_del_rcu(&ops->list);
f8ff182c 605 rtnl_unlock();
5fa85a09
FW
606
607 synchronize_rcu();
f8ff182c
TG
608}
609EXPORT_SYMBOL_GPL(rtnl_af_unregister);
610
b1974ed0
AR
611static size_t rtnl_link_get_af_size(const struct net_device *dev,
612 u32 ext_filter_mask)
f8ff182c
TG
613{
614 struct rtnl_af_ops *af_ops;
615 size_t size;
616
617 /* IFLA_AF_SPEC */
618 size = nla_total_size(sizeof(struct nlattr));
619
5fa85a09
FW
620 rcu_read_lock();
621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
f8ff182c
TG
622 if (af_ops->get_link_af_size) {
623 /* AF_* + nested data */
624 size += nla_total_size(sizeof(struct nlattr)) +
b1974ed0 625 af_ops->get_link_af_size(dev, ext_filter_mask);
f8ff182c
TG
626 }
627 }
5fa85a09 628 rcu_read_unlock();
f8ff182c
TG
629
630 return size;
631}
632
ba7d49b1 633static bool rtnl_have_link_slave_info(const struct net_device *dev)
38f7b870 634{
ba7d49b1 635 struct net_device *master_dev;
4c82a95e 636 bool ret = false;
38f7b870 637
4c82a95e
FW
638 rcu_read_lock();
639
640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
813f020c 641 if (master_dev && master_dev->rtnl_link_ops)
4c82a95e
FW
642 ret = true;
643 rcu_read_unlock();
644 return ret;
ba7d49b1
JP
645}
646
647static int rtnl_link_slave_info_fill(struct sk_buff *skb,
648 const struct net_device *dev)
649{
650 struct net_device *master_dev;
651 const struct rtnl_link_ops *ops;
652 struct nlattr *slave_data;
653 int err;
38f7b870 654
ba7d49b1
JP
655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
656 if (!master_dev)
657 return 0;
658 ops = master_dev->rtnl_link_ops;
659 if (!ops)
660 return 0;
661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
662 return -EMSGSIZE;
663 if (ops->fill_slave_info) {
ae0be8de 664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
ba7d49b1
JP
665 if (!slave_data)
666 return -EMSGSIZE;
667 err = ops->fill_slave_info(skb, master_dev, dev);
668 if (err < 0)
669 goto err_cancel_slave_data;
670 nla_nest_end(skb, slave_data);
671 }
672 return 0;
673
674err_cancel_slave_data:
675 nla_nest_cancel(skb, slave_data);
676 return err;
677}
678
679static int rtnl_link_info_fill(struct sk_buff *skb,
680 const struct net_device *dev)
681{
682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
683 struct nlattr *data;
684 int err;
685
686 if (!ops)
687 return 0;
38f7b870 688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
ba7d49b1 689 return -EMSGSIZE;
38f7b870
PM
690 if (ops->fill_xstats) {
691 err = ops->fill_xstats(skb, dev);
692 if (err < 0)
ba7d49b1 693 return err;
38f7b870
PM
694 }
695 if (ops->fill_info) {
ae0be8de 696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
ba7d49b1
JP
697 if (data == NULL)
698 return -EMSGSIZE;
38f7b870
PM
699 err = ops->fill_info(skb, dev);
700 if (err < 0)
701 goto err_cancel_data;
702 nla_nest_end(skb, data);
703 }
38f7b870
PM
704 return 0;
705
706err_cancel_data:
707 nla_nest_cancel(skb, data);
ba7d49b1
JP
708 return err;
709}
710
711static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
712{
713 struct nlattr *linkinfo;
714 int err = -EMSGSIZE;
715
ae0be8de 716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
ba7d49b1
JP
717 if (linkinfo == NULL)
718 goto out;
719
720 err = rtnl_link_info_fill(skb, dev);
721 if (err < 0)
722 goto err_cancel_link;
723
724 err = rtnl_link_slave_info_fill(skb, dev);
725 if (err < 0)
726 goto err_cancel_link;
727
728 nla_nest_end(skb, linkinfo);
729 return 0;
730
38f7b870
PM
731err_cancel_link:
732 nla_nest_cancel(skb, linkinfo);
733out:
734 return err;
735}
736
95c96174 737int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
1da177e4 738{
97c53cac 739 struct sock *rtnl = net->rtnl;
cfdf0d9a
YD
740
741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
1da177e4
LT
742}
743
97c53cac 744int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
2942e900 745{
97c53cac
DL
746 struct sock *rtnl = net->rtnl;
747
2942e900
TG
748 return nlmsg_unicast(rtnl, skb, pid);
749}
e0d087af 750EXPORT_SYMBOL(rtnl_unicast);
2942e900 751
1ce85fe4 752void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
1d997f10 753 const struct nlmsghdr *nlh, gfp_t flags)
97676b6b 754{
97c53cac 755 struct sock *rtnl = net->rtnl;
97676b6b 756
f9b282b3 757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
97676b6b 758}
e0d087af 759EXPORT_SYMBOL(rtnl_notify);
97676b6b 760
97c53cac 761void rtnl_set_sk_err(struct net *net, u32 group, int error)
97676b6b 762{
97c53cac
DL
763 struct sock *rtnl = net->rtnl;
764
97676b6b
TG
765 netlink_set_err(rtnl, 0, group, error);
766}
e0d087af 767EXPORT_SYMBOL(rtnl_set_sk_err);
97676b6b 768
1da177e4
LT
769int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
770{
2d7202bf
TG
771 struct nlattr *mx;
772 int i, valid = 0;
773
c22a133a
DA
774 /* nothing is dumped for dst_default_metrics, so just skip the loop */
775 if (metrics == dst_default_metrics.metrics)
776 return 0;
777
ae0be8de 778 mx = nla_nest_start_noflag(skb, RTA_METRICS);
2d7202bf
TG
779 if (mx == NULL)
780 return -ENOBUFS;
781
782 for (i = 0; i < RTAX_MAX; i++) {
783 if (metrics[i]) {
ea697639
DB
784 if (i == RTAX_CC_ALGO - 1) {
785 char tmp[TCP_CA_NAME_MAX], *name;
786
787 name = tcp_ca_get_name_by_key(metrics[i], tmp);
788 if (!name)
789 continue;
790 if (nla_put_string(skb, i + 1, name))
791 goto nla_put_failure;
c3a8d947
DB
792 } else if (i == RTAX_FEATURES - 1) {
793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
794
f8edcd12
PS
795 if (!user_features)
796 continue;
c3a8d947
DB
797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
798 if (nla_put_u32(skb, i + 1, user_features))
799 goto nla_put_failure;
ea697639
DB
800 } else {
801 if (nla_put_u32(skb, i + 1, metrics[i]))
802 goto nla_put_failure;
803 }
2d7202bf 804 valid++;
2d7202bf 805 }
1da177e4 806 }
1da177e4 807
a57d27fc
DM
808 if (!valid) {
809 nla_nest_cancel(skb, mx);
810 return 0;
811 }
2d7202bf
TG
812
813 return nla_nest_end(skb, mx);
814
815nla_put_failure:
bc3ed28c
TG
816 nla_nest_cancel(skb, mx);
817 return -EMSGSIZE;
1da177e4 818}
e0d087af 819EXPORT_SYMBOL(rtnetlink_put_metrics);
1da177e4 820
e3703b3d 821int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
87a50699 822 long expires, u32 error)
e3703b3d
TG
823{
824 struct rta_cacheinfo ci = {
e3703b3d
TG
825 .rta_error = error,
826 .rta_id = id,
e3703b3d
TG
827 };
828
3940746d
DA
829 if (dst) {
830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
831 ci.rta_used = dst->__use;
bc9d3a9f 832 ci.rta_clntref = rcuref_read(&dst->__rcuref);
3940746d 833 }
8253947e
LW
834 if (expires) {
835 unsigned long clock;
e3703b3d 836
8253947e
LW
837 clock = jiffies_to_clock_t(abs(expires));
838 clock = min_t(unsigned long, clock, INT_MAX);
839 ci.rta_expires = (expires > 0) ? clock : -clock;
840 }
e3703b3d
TG
841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
842}
e3703b3d 843EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
1da177e4 844
6a2968ee
ED
845void netdev_set_operstate(struct net_device *dev, int newstate)
846{
847 unsigned int old = READ_ONCE(dev->operstate);
848
849 do {
850 if (old == newstate)
851 return;
852 } while (!try_cmpxchg(&dev->operstate, &old, newstate));
853
854 netdev_state_change(dev);
855}
856EXPORT_SYMBOL(netdev_set_operstate);
857
93b2d4a2 858static void set_operstate(struct net_device *dev, unsigned char transition)
b00055aa 859{
6a2968ee 860 unsigned char operstate = READ_ONCE(dev->operstate);
b00055aa 861
e0d087af 862 switch (transition) {
b00055aa
SR
863 case IF_OPER_UP:
864 if ((operstate == IF_OPER_DORMANT ||
eec517cd 865 operstate == IF_OPER_TESTING ||
b00055aa 866 operstate == IF_OPER_UNKNOWN) &&
eec517cd 867 !netif_dormant(dev) && !netif_testing(dev))
b00055aa
SR
868 operstate = IF_OPER_UP;
869 break;
870
eec517cd 871 case IF_OPER_TESTING:
abbc7928 872 if (netif_oper_up(dev))
eec517cd
AL
873 operstate = IF_OPER_TESTING;
874 break;
875
b00055aa 876 case IF_OPER_DORMANT:
abbc7928 877 if (netif_oper_up(dev))
b00055aa
SR
878 operstate = IF_OPER_DORMANT;
879 break;
3ff50b79 880 }
b00055aa 881
6a2968ee 882 netdev_set_operstate(dev, operstate);
b00055aa
SR
883}
884
b1beb681
JB
885static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
886{
887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
889}
890
3729d502
PM
891static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
892 const struct ifinfomsg *ifm)
893{
894 unsigned int flags = ifm->ifi_flags;
895
896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
897 if (ifm->ifi_change)
898 flags = (flags & ifm->ifi_change) |
b1beb681 899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
3729d502
PM
900
901 return flags;
902}
903
b60c5115 904static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
be1f3c2c 905 const struct rtnl_link_stats64 *b)
1da177e4 906{
b60c5115
TG
907 a->rx_packets = b->rx_packets;
908 a->tx_packets = b->tx_packets;
909 a->rx_bytes = b->rx_bytes;
910 a->tx_bytes = b->tx_bytes;
911 a->rx_errors = b->rx_errors;
912 a->tx_errors = b->tx_errors;
913 a->rx_dropped = b->rx_dropped;
914 a->tx_dropped = b->tx_dropped;
915
916 a->multicast = b->multicast;
917 a->collisions = b->collisions;
918
919 a->rx_length_errors = b->rx_length_errors;
920 a->rx_over_errors = b->rx_over_errors;
921 a->rx_crc_errors = b->rx_crc_errors;
922 a->rx_frame_errors = b->rx_frame_errors;
923 a->rx_fifo_errors = b->rx_fifo_errors;
924 a->rx_missed_errors = b->rx_missed_errors;
925
926 a->tx_aborted_errors = b->tx_aborted_errors;
927 a->tx_carrier_errors = b->tx_carrier_errors;
928 a->tx_fifo_errors = b->tx_fifo_errors;
929 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
930 a->tx_window_errors = b->tx_window_errors;
931
932 a->rx_compressed = b->rx_compressed;
933 a->tx_compressed = b->tx_compressed;
6e7333d3
JW
934
935 a->rx_nohandler = b->rx_nohandler;
10708f37
JE
936}
937
c02db8c6 938/* All VF info */
115c9b81
GR
939static inline int rtnl_vfinfo_size(const struct net_device *dev,
940 u32 ext_filter_mask)
ebc08a6f 941{
9af15c38 942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
c02db8c6 943 int num_vfs = dev_num_vf(dev->dev.parent);
7e75f74a 944 size_t size = nla_total_size(0);
045de01a 945 size += num_vfs *
7e75f74a
SD
946 (nla_total_size(0) +
947 nla_total_size(sizeof(struct ifla_vf_mac)) +
75345f88 948 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
7e75f74a
SD
949 nla_total_size(sizeof(struct ifla_vf_vlan)) +
950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
79aab093
MS
951 nla_total_size(MAX_VLAN_LIST_LEN *
952 sizeof(struct ifla_vf_vlan_info)) +
ed616689 953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
7e75f74a 954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
945a3676 955 nla_total_size(sizeof(struct ifla_vf_rate)) +
01a3d796 956 nla_total_size(sizeof(struct ifla_vf_link_state)) +
3b766cd8 957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
dd461d6a 958 nla_total_size(sizeof(struct ifla_vf_trust)));
fa0e21fa
EP
959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
960 size += num_vfs *
961 (nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)));
978 }
c02db8c6
CW
979 return size;
980 } else
ebc08a6f
WM
981 return 0;
982}
983
c53864fd
DG
984static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
57b61080
SF
986{
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
57b61080
SF
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
c53864fd
DG
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
57b61080
SF
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007}
1008
b5cdae32 1009static size_t rtnl_xdp_size(void)
d1fdd913 1010{
b3cfaa31 1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
58038695 1012 nla_total_size(1) + /* XDP_ATTACHED */
a25717d2 1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
4f91da26 1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
d1fdd913 1015
b5cdae32 1016 return xdp_size;
d1fdd913
BB
1017}
1018
88f4fb0c
JP
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021 struct netdev_name_node *name_node;
9f308313
ED
1022 unsigned int cnt = 0;
1023
1024 rcu_read_lock();
1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1026 cnt++;
1027 rcu_read_unlock();
88f4fb0c 1028
9f308313 1029 if (!cnt)
88f4fb0c 1030 return 0;
9f308313
ED
1031
1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
88f4fb0c
JP
1033}
1034
829eb208
RP
1035static size_t rtnl_proto_down_size(const struct net_device *dev)
1036{
1037 size_t size = nla_total_size(1);
1038
6890ab31
ED
1039 /* Assume dev->proto_down_reason is not zero. */
1040 size += nla_total_size(0) + nla_total_size(4);
829eb208
RP
1041
1042 return size;
1043}
1044
dca56c30
JP
1045static size_t rtnl_devlink_port_size(const struct net_device *dev)
1046{
1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1048
1049 if (dev->devlink_port)
1050 size += devlink_nl_port_handle_size(dev->devlink_port);
1051
1052 return size;
1053}
1054
5f184269
JP
1055static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1056{
1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1058
289e9225 1059 size += dpll_netdev_pin_handle_size(dev);
5f184269
JP
1060
1061 return size;
1062}
1063
115c9b81
GR
1064static noinline size_t if_nlmsg_size(const struct net_device *dev,
1065 u32 ext_filter_mask)
339bf98f
TG
1066{
1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
0b815a1a 1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
339bf98f 1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
270cb4d0 1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
339bf98f 1072 + nla_total_size(sizeof(struct rtnl_link_stats))
35c58459 1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
339bf98f
TG
1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1076 + nla_total_size(4) /* IFLA_TXQLEN */
1077 + nla_total_size(4) /* IFLA_WEIGHT */
1078 + nla_total_size(4) /* IFLA_MTU */
1079 + nla_total_size(4) /* IFLA_LINK */
1080 + nla_total_size(4) /* IFLA_MASTER */
9a57247f 1081 + nla_total_size(1) /* IFLA_CARRIER */
edbc0bb3 1082 + nla_total_size(4) /* IFLA_PROMISCUITY */
7e6e1b57 1083 + nla_total_size(4) /* IFLA_ALLMULTI */
76ff5cc9
JP
1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
6919756c
TK
1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
eac1b93c 1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
9eefedd5
XL
1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
89527be8
ED
1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
339bf98f 1093 + nla_total_size(1) /* IFLA_OPERSTATE */
38f7b870 1094 + nla_total_size(1) /* IFLA_LINKMODE */
2d3b479d 1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
d37512a2 1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */
db833d40 1097 + nla_total_size(4) /* IFLA_GROUP */
115c9b81
GR
1098 + nla_total_size(ext_filter_mask
1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
c53864fd 1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
f8ff182c 1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
b1974ed0 1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
82f28412 1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
88d6378b 1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
c57c7a95 1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
b5cdae32 1107 + rtnl_xdp_size() /* IFLA_XDP */
3d3ea5af 1108 + nla_total_size(4) /* IFLA_EVENT */
6621dd29 1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */
38e01b30 1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
829eb208 1111 + rtnl_proto_down_size(dev) /* proto down */
7e4a8d5a 1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
b2d3bcfa
DD
1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
3e7a50ce
SH
1115 + nla_total_size(4) /* IFLA_MIN_MTU */
1116 + nla_total_size(4) /* IFLA_MAX_MTU */
88f4fb0c 1117 + rtnl_prop_list_size(dev)
f74877a5 1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
dca56c30 1119 + rtnl_devlink_port_size(dev)
5f184269 1120 + rtnl_dpll_pin_size(dev)
79e1ad14 1121 + 0;
339bf98f
TG
1122}
1123
57b61080
SF
1124static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1125{
1126 struct nlattr *vf_ports;
1127 struct nlattr *vf_port;
1128 int vf;
1129 int err;
1130
ae0be8de 1131 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
57b61080
SF
1132 if (!vf_ports)
1133 return -EMSGSIZE;
1134
1135 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
ae0be8de 1136 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
8ca94183
SF
1137 if (!vf_port)
1138 goto nla_put_failure;
a6574349
DM
1139 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1140 goto nla_put_failure;
57b61080 1141 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
8ca94183
SF
1142 if (err == -EMSGSIZE)
1143 goto nla_put_failure;
57b61080 1144 if (err) {
57b61080
SF
1145 nla_nest_cancel(skb, vf_port);
1146 continue;
1147 }
1148 nla_nest_end(skb, vf_port);
1149 }
1150
1151 nla_nest_end(skb, vf_ports);
1152
1153 return 0;
8ca94183
SF
1154
1155nla_put_failure:
1156 nla_nest_cancel(skb, vf_ports);
1157 return -EMSGSIZE;
57b61080
SF
1158}
1159
1160static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1161{
1162 struct nlattr *port_self;
1163 int err;
1164
ae0be8de 1165 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
57b61080
SF
1166 if (!port_self)
1167 return -EMSGSIZE;
1168
1169 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1170 if (err) {
1171 nla_nest_cancel(skb, port_self);
8ca94183 1172 return (err == -EMSGSIZE) ? err : 0;
57b61080
SF
1173 }
1174
1175 nla_nest_end(skb, port_self);
1176
1177 return 0;
1178}
1179
c53864fd
DG
1180static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1181 u32 ext_filter_mask)
57b61080
SF
1182{
1183 int err;
1184
c53864fd
DG
1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1186 !(ext_filter_mask & RTEXT_FILTER_VF))
57b61080
SF
1187 return 0;
1188
1189 err = rtnl_port_self_fill(skb, dev);
1190 if (err)
1191 return err;
1192
1193 if (dev_num_vf(dev->dev.parent)) {
1194 err = rtnl_vf_ports_fill(skb, dev);
1195 if (err)
1196 return err;
1197 }
1198
1199 return 0;
1200}
1201
66cae9ed
JP
1202static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1203{
1204 int err;
02637fce 1205 struct netdev_phys_item_id ppid;
66cae9ed
JP
1206
1207 err = dev_get_phys_port_id(dev, &ppid);
1208 if (err) {
1209 if (err == -EOPNOTSUPP)
1210 return 0;
1211 return err;
1212 }
1213
1214 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1215 return -EMSGSIZE;
1216
1217 return 0;
1218}
1219
db24a904
DA
1220static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1221{
1222 char name[IFNAMSIZ];
1223 int err;
1224
1225 err = dev_get_phys_port_name(dev, name, sizeof(name));
1226 if (err) {
1227 if (err == -EOPNOTSUPP)
1228 return 0;
1229 return err;
1230 }
1231
77ef033b 1232 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
db24a904
DA
1233 return -EMSGSIZE;
1234
1235 return 0;
1236}
1237
82f28412
JP
1238static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1239{
bccb3025 1240 struct netdev_phys_item_id ppid = { };
82f28412 1241 int err;
82f28412 1242
bccb3025 1243 err = dev_get_port_parent_id(dev, &ppid, false);
82f28412
JP
1244 if (err) {
1245 if (err == -EOPNOTSUPP)
1246 return 0;
1247 return err;
1248 }
1249
bccb3025 1250 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
82f28412
JP
1251 return -EMSGSIZE;
1252
1253 return 0;
1254}
1255
b22b941b
HFS
1256static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1257 struct net_device *dev)
1258{
550bce59 1259 struct rtnl_link_stats64 *sp;
b22b941b 1260 struct nlattr *attr;
18402843 1261
58414d32
ND
1262 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1263 sizeof(struct rtnl_link_stats64), IFLA_PAD);
b22b941b
HFS
1264 if (!attr)
1265 return -EMSGSIZE;
1266
550bce59
RP
1267 sp = nla_data(attr);
1268 dev_get_stats(dev, sp);
b22b941b 1269
550bce59
RP
1270 attr = nla_reserve(skb, IFLA_STATS,
1271 sizeof(struct rtnl_link_stats));
b22b941b
HFS
1272 if (!attr)
1273 return -EMSGSIZE;
1274
550bce59 1275 copy_rtnl_link_stats(nla_data(attr), sp);
b22b941b
HFS
1276
1277 return 0;
1278}
1279
1280static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1281 struct net_device *dev,
1282 int vfs_num,
fa0e21fa 1283 u32 ext_filter_mask)
b22b941b
HFS
1284{
1285 struct ifla_vf_rss_query_en vf_rss_query_en;
79aab093 1286 struct nlattr *vf, *vfstats, *vfvlanlist;
b22b941b 1287 struct ifla_vf_link_state vf_linkstate;
79aab093 1288 struct ifla_vf_vlan_info vf_vlan_info;
b22b941b
HFS
1289 struct ifla_vf_spoofchk vf_spoofchk;
1290 struct ifla_vf_tx_rate vf_tx_rate;
1291 struct ifla_vf_stats vf_stats;
1292 struct ifla_vf_trust vf_trust;
1293 struct ifla_vf_vlan vf_vlan;
1294 struct ifla_vf_rate vf_rate;
b22b941b 1295 struct ifla_vf_mac vf_mac;
75345f88 1296 struct ifla_vf_broadcast vf_broadcast;
b22b941b 1297 struct ifla_vf_info ivi;
30aad417
DG
1298 struct ifla_vf_guid node_guid;
1299 struct ifla_vf_guid port_guid;
b22b941b 1300
0eed9cf5
MY
1301 memset(&ivi, 0, sizeof(ivi));
1302
b22b941b
HFS
1303 /* Not all SR-IOV capable drivers support the
1304 * spoofcheck and "RSS query enable" query. Preset to
1305 * -1 so the user space tool can detect that the driver
1306 * didn't report anything.
1307 */
1308 ivi.spoofchk = -1;
1309 ivi.rss_query_en = -1;
1310 ivi.trusted = -1;
b22b941b
HFS
1311 /* The default value for VF link state is "auto"
1312 * IFLA_VF_LINK_STATE_AUTO which equals zero
1313 */
1314 ivi.linkstate = 0;
79aab093
MS
1315 /* VLAN Protocol by default is 802.1Q */
1316 ivi.vlan_proto = htons(ETH_P_8021Q);
b22b941b
HFS
1317 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1318 return 0;
1319
775f4f05 1320 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
9fbf082f
LR
1321 memset(&node_guid, 0, sizeof(node_guid));
1322 memset(&port_guid, 0, sizeof(port_guid));
775f4f05 1323
b22b941b
HFS
1324 vf_mac.vf =
1325 vf_vlan.vf =
79aab093 1326 vf_vlan_info.vf =
b22b941b
HFS
1327 vf_rate.vf =
1328 vf_tx_rate.vf =
1329 vf_spoofchk.vf =
1330 vf_linkstate.vf =
1331 vf_rss_query_en.vf =
9aed6ae0
DG
1332 vf_trust.vf =
1333 node_guid.vf =
1334 port_guid.vf = ivi.vf;
b22b941b
HFS
1335
1336 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
75345f88 1337 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
b22b941b
HFS
1338 vf_vlan.vlan = ivi.vlan;
1339 vf_vlan.qos = ivi.qos;
79aab093
MS
1340 vf_vlan_info.vlan = ivi.vlan;
1341 vf_vlan_info.qos = ivi.qos;
1342 vf_vlan_info.vlan_proto = ivi.vlan_proto;
b22b941b
HFS
1343 vf_tx_rate.rate = ivi.max_tx_rate;
1344 vf_rate.min_tx_rate = ivi.min_tx_rate;
1345 vf_rate.max_tx_rate = ivi.max_tx_rate;
1346 vf_spoofchk.setting = ivi.spoofchk;
1347 vf_linkstate.link_state = ivi.linkstate;
1348 vf_rss_query_en.setting = ivi.rss_query_en;
1349 vf_trust.setting = ivi.trusted;
ae0be8de 1350 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
79aab093 1351 if (!vf)
4a59cdfd 1352 return -EMSGSIZE;
b22b941b 1353 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
75345f88 1354 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
b22b941b
HFS
1355 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1356 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1357 &vf_rate) ||
1358 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1359 &vf_tx_rate) ||
1360 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1361 &vf_spoofchk) ||
1362 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1363 &vf_linkstate) ||
1364 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1365 sizeof(vf_rss_query_en),
1366 &vf_rss_query_en) ||
1367 nla_put(skb, IFLA_VF_TRUST,
1368 sizeof(vf_trust), &vf_trust))
79aab093 1369 goto nla_put_vf_failure;
30aad417 1370
30aad417
DG
1371 if (dev->netdev_ops->ndo_get_vf_guid &&
1372 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1373 &port_guid)) {
1374 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1375 &node_guid) ||
1376 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1377 &port_guid))
1378 goto nla_put_vf_failure;
1379 }
ae0be8de 1380 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
79aab093
MS
1381 if (!vfvlanlist)
1382 goto nla_put_vf_failure;
1383 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1384 &vf_vlan_info)) {
1385 nla_nest_cancel(skb, vfvlanlist);
1386 goto nla_put_vf_failure;
1387 }
1388 nla_nest_end(skb, vfvlanlist);
fa0e21fa
EP
1389 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1390 memset(&vf_stats, 0, sizeof(vf_stats));
1391 if (dev->netdev_ops->ndo_get_vf_stats)
1392 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1393 &vf_stats);
1394 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1395 if (!vfstats)
1396 goto nla_put_vf_failure;
1397 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1398 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1400 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1402 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1404 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1406 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1407 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1408 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1409 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1410 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1411 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1412 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1413 nla_nest_cancel(skb, vfstats);
1414 goto nla_put_vf_failure;
1415 }
1416 nla_nest_end(skb, vfstats);
79aab093 1417 }
b22b941b
HFS
1418 nla_nest_end(skb, vf);
1419 return 0;
79aab093
MS
1420
1421nla_put_vf_failure:
1422 nla_nest_cancel(skb, vf);
79aab093 1423 return -EMSGSIZE;
b22b941b
HFS
1424}
1425
250fc3df
FW
1426static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1427 struct net_device *dev,
1428 u32 ext_filter_mask)
1429{
1430 struct nlattr *vfinfo;
1431 int i, num_vfs;
1432
1433 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1434 return 0;
1435
1436 num_vfs = dev_num_vf(dev->dev.parent);
1437 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1438 return -EMSGSIZE;
1439
1440 if (!dev->netdev_ops->ndo_get_vf_config)
1441 return 0;
1442
ae0be8de 1443 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
250fc3df
FW
1444 if (!vfinfo)
1445 return -EMSGSIZE;
1446
1447 for (i = 0; i < num_vfs; i++) {
4a59cdfd
GP
1448 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1449 nla_nest_cancel(skb, vfinfo);
250fc3df 1450 return -EMSGSIZE;
4a59cdfd 1451 }
250fc3df
FW
1452 }
1453
1454 nla_nest_end(skb, vfinfo);
1455 return 0;
1456}
1457
74808e72
ED
1458static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1459 const struct net_device *dev)
b22b941b 1460{
5f8e4474
KL
1461 struct rtnl_link_ifmap map;
1462
1463 memset(&map, 0, sizeof(map));
74808e72
ED
1464 map.mem_start = READ_ONCE(dev->mem_start);
1465 map.mem_end = READ_ONCE(dev->mem_end);
1466 map.base_addr = READ_ONCE(dev->base_addr);
1467 map.irq = READ_ONCE(dev->irq);
1468 map.dma = READ_ONCE(dev->dma);
1469 map.port = READ_ONCE(dev->if_port);
5f8e4474 1470
270cb4d0 1471 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
b22b941b
HFS
1472 return -EMSGSIZE;
1473
1474 return 0;
1475}
1476
a25717d2 1477static u32 rtnl_xdp_prog_skb(struct net_device *dev)
d67b9cd2 1478{
58038695 1479 const struct bpf_prog *generic_xdp_prog;
979aad40 1480 u32 res = 0;
d67b9cd2 1481
979aad40
ED
1482 rcu_read_lock();
1483 generic_xdp_prog = rcu_dereference(dev->xdp_prog);
1484 if (generic_xdp_prog)
1485 res = generic_xdp_prog->aux->id;
1486 rcu_read_unlock();
d67b9cd2 1487
979aad40 1488 return res;
a25717d2 1489}
d67b9cd2 1490
a25717d2
JK
1491static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1492{
7f0a8382 1493 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
a25717d2 1494}
118b4aa2 1495
a25717d2
JK
1496static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1497{
7f0a8382 1498 return dev_xdp_prog_id(dev, XDP_MODE_HW);
a25717d2
JK
1499}
1500
1501static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1502 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1503 u32 (*get_prog_id)(struct net_device *dev))
1504{
1505 u32 curr_id;
1506 int err;
1507
1508 curr_id = get_prog_id(dev);
1509 if (!curr_id)
1510 return 0;
1511
1512 *prog_id = curr_id;
1513 err = nla_put_u32(skb, attr, curr_id);
1514 if (err)
1515 return err;
d67b9cd2 1516
a25717d2
JK
1517 if (*mode != XDP_ATTACHED_NONE)
1518 *mode = XDP_ATTACHED_MULTI;
1519 else
1520 *mode = tgt_mode;
118b4aa2 1521
a25717d2 1522 return 0;
d67b9cd2
DB
1523}
1524
d1fdd913
BB
1525static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1526{
d1fdd913 1527 struct nlattr *xdp;
58038695 1528 u32 prog_id;
d1fdd913 1529 int err;
4f91da26 1530 u8 mode;
d1fdd913 1531
ae0be8de 1532 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
d1fdd913
BB
1533 if (!xdp)
1534 return -EMSGSIZE;
d67b9cd2 1535
a25717d2
JK
1536 prog_id = 0;
1537 mode = XDP_ATTACHED_NONE;
202aabe8
JK
1538 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1539 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1540 if (err)
a25717d2 1541 goto err_cancel;
202aabe8
JK
1542 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1543 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1544 if (err)
a25717d2 1545 goto err_cancel;
202aabe8
JK
1546 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1547 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1548 if (err)
a25717d2
JK
1549 goto err_cancel;
1550
4f91da26 1551 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
d1fdd913
BB
1552 if (err)
1553 goto err_cancel;
1554
a25717d2 1555 if (prog_id && mode != XDP_ATTACHED_MULTI) {
58038695
MKL
1556 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1557 if (err)
1558 goto err_cancel;
1559 }
1560
d1fdd913
BB
1561 nla_nest_end(skb, xdp);
1562 return 0;
1563
1564err_cancel:
1565 nla_nest_cancel(skb, xdp);
1566 return err;
1567}
1568
3d3ea5af
VY
1569static u32 rtnl_get_event(unsigned long event)
1570{
1571 u32 rtnl_event_type = IFLA_EVENT_NONE;
1572
1573 switch (event) {
1574 case NETDEV_REBOOT:
1575 rtnl_event_type = IFLA_EVENT_REBOOT;
1576 break;
1577 case NETDEV_FEAT_CHANGE:
1578 rtnl_event_type = IFLA_EVENT_FEATURES;
1579 break;
1580 case NETDEV_BONDING_FAILOVER:
1581 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1582 break;
1583 case NETDEV_NOTIFY_PEERS:
1584 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1585 break;
1586 case NETDEV_RESEND_IGMP:
1587 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1588 break;
1589 case NETDEV_CHANGEINFODATA:
1590 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1591 break;
1592 default:
1593 break;
1594 }
1595
1596 return rtnl_event_type;
1597}
1598
79110a04
FW
1599static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1600{
1601 const struct net_device *upper_dev;
1602 int ret = 0;
1603
1604 rcu_read_lock();
1605
1606 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1607 if (upper_dev)
6747a5d4
ED
1608 ret = nla_put_u32(skb, IFLA_MASTER,
1609 READ_ONCE(upper_dev->ifindex));
79110a04
FW
1610
1611 rcu_read_unlock();
1612 return ret;
1613}
1614
feadc4b6
SD
1615static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1616 bool force)
79110a04 1617{
e353ea9c 1618 int iflink = dev_get_iflink(dev);
79110a04 1619
e353ea9c
ED
1620 if (force || READ_ONCE(dev->ifindex) != iflink)
1621 return nla_put_u32(skb, IFLA_LINK, iflink);
79110a04 1622
feadc4b6 1623 return 0;
79110a04
FW
1624}
1625
6c557001
FW
1626static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1627 struct net_device *dev)
1628{
1629 char buf[IFALIASZ];
1630 int ret;
1631
1632 ret = dev_get_alias(dev, buf, sizeof(buf));
1633 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1634}
1635
b1e66b9a 1636static int rtnl_fill_link_netnsid(struct sk_buff *skb,
79e1ad14 1637 const struct net_device *dev,
d4e4fdf9 1638 struct net *src_net, gfp_t gfp)
b1e66b9a 1639{
feadc4b6
SD
1640 bool put_iflink = false;
1641
b1e66b9a
FW
1642 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1643 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1644
1645 if (!net_eq(dev_net(dev), link_net)) {
d4e4fdf9 1646 int id = peernet2id_alloc(src_net, link_net, gfp);
b1e66b9a
FW
1647
1648 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1649 return -EMSGSIZE;
feadc4b6
SD
1650
1651 put_iflink = true;
b1e66b9a
FW
1652 }
1653 }
1654
feadc4b6 1655 return nla_put_iflink(skb, dev, put_iflink);
b1e66b9a
FW
1656}
1657
070cbf5b
FW
1658static int rtnl_fill_link_af(struct sk_buff *skb,
1659 const struct net_device *dev,
1660 u32 ext_filter_mask)
1661{
1662 const struct rtnl_af_ops *af_ops;
1663 struct nlattr *af_spec;
1664
ae0be8de 1665 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
070cbf5b
FW
1666 if (!af_spec)
1667 return -EMSGSIZE;
1668
5fa85a09 1669 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
070cbf5b
FW
1670 struct nlattr *af;
1671 int err;
1672
1673 if (!af_ops->fill_link_af)
1674 continue;
1675
ae0be8de 1676 af = nla_nest_start_noflag(skb, af_ops->family);
070cbf5b
FW
1677 if (!af)
1678 return -EMSGSIZE;
1679
1680 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1681 /*
1682 * Caller may return ENODATA to indicate that there
1683 * was no data to be dumped. This is not an error, it
1684 * means we should trim the attribute header and
1685 * continue.
1686 */
1687 if (err == -ENODATA)
1688 nla_nest_cancel(skb, af);
1689 else if (err < 0)
1690 return -EMSGSIZE;
1691
1692 nla_nest_end(skb, af);
1693 }
1694
1695 nla_nest_end(skb, af_spec);
1696 return 0;
1697}
1698
88f4fb0c
JP
1699static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1700 const struct net_device *dev)
1701{
1702 struct netdev_name_node *name_node;
1703 int count = 0;
1704
0ec4e48c 1705 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
88f4fb0c
JP
1706 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1707 return -EMSGSIZE;
1708 count++;
1709 }
1710 return count;
1711}
1712
0ec4e48c 1713/* RCU protected. */
88f4fb0c
JP
1714static int rtnl_fill_prop_list(struct sk_buff *skb,
1715 const struct net_device *dev)
1716{
1717 struct nlattr *prop_list;
1718 int ret;
1719
1720 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1721 if (!prop_list)
1722 return -EMSGSIZE;
1723
1724 ret = rtnl_fill_alt_ifnames(skb, dev);
1725 if (ret <= 0)
1726 goto nest_cancel;
1727
1728 nla_nest_end(skb, prop_list);
1729 return 0;
1730
1731nest_cancel:
1732 nla_nest_cancel(skb, prop_list);
1733 return ret;
1734}
1735
829eb208
RP
1736static int rtnl_fill_proto_down(struct sk_buff *skb,
1737 const struct net_device *dev)
1738{
1739 struct nlattr *pr;
1740 u32 preason;
1741
6890ab31 1742 if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
829eb208
RP
1743 goto nla_put_failure;
1744
6890ab31 1745 preason = READ_ONCE(dev->proto_down_reason);
829eb208
RP
1746 if (!preason)
1747 return 0;
1748
1749 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1750 if (!pr)
1751 return -EMSGSIZE;
1752
1753 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1754 nla_nest_cancel(skb, pr);
1755 goto nla_put_failure;
1756 }
1757
1758 nla_nest_end(skb, pr);
1759 return 0;
1760
1761nla_put_failure:
1762 return -EMSGSIZE;
1763}
1764
dca56c30
JP
1765static int rtnl_fill_devlink_port(struct sk_buff *skb,
1766 const struct net_device *dev)
1767{
1768 struct nlattr *devlink_port_nest;
1769 int ret;
1770
1771 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1772 if (!devlink_port_nest)
1773 return -EMSGSIZE;
1774
1775 if (dev->devlink_port) {
1776 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1777 if (ret < 0)
1778 goto nest_cancel;
1779 }
1780
1781 nla_nest_end(skb, devlink_port_nest);
1782 return 0;
1783
1784nest_cancel:
1785 nla_nest_cancel(skb, devlink_port_nest);
1786 return ret;
1787}
1788
5f184269
JP
1789static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1790 const struct net_device *dev)
1791{
1792 struct nlattr *dpll_pin_nest;
1793 int ret;
1794
1795 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1796 if (!dpll_pin_nest)
1797 return -EMSGSIZE;
1798
289e9225 1799 ret = dpll_netdev_add_pin_handle(skb, dev);
5f184269
JP
1800 if (ret < 0)
1801 goto nest_cancel;
1802
1803 nla_nest_end(skb, dpll_pin_nest);
1804 return 0;
1805
1806nest_cancel:
1807 nla_nest_cancel(skb, dpll_pin_nest);
1808 return ret;
1809}
1810
79e1ad14
JB
1811static int rtnl_fill_ifinfo(struct sk_buff *skb,
1812 struct net_device *dev, struct net *src_net,
575c3e2a 1813 int type, u32 pid, u32 seq, u32 change,
3d3ea5af 1814 unsigned int flags, u32 ext_filter_mask,
38e01b30 1815 u32 event, int *new_nsid, int new_ifindex,
d4e4fdf9 1816 int tgt_netnsid, gfp_t gfp)
b60c5115 1817{
8a582681 1818 char devname[IFNAMSIZ];
b60c5115
TG
1819 struct ifinfomsg *ifm;
1820 struct nlmsghdr *nlh;
5891cd5e 1821 struct Qdisc *qdisc;
1da177e4 1822
2907c35f 1823 ASSERT_RTNL();
b60c5115
TG
1824 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1825 if (nlh == NULL)
26932566 1826 return -EMSGSIZE;
1da177e4 1827
b60c5115
TG
1828 ifm = nlmsg_data(nlh);
1829 ifm->ifi_family = AF_UNSPEC;
1830 ifm->__ifi_pad = 0;
6747a5d4
ED
1831 ifm->ifi_type = READ_ONCE(dev->type);
1832 ifm->ifi_index = READ_ONCE(dev->ifindex);
b60c5115
TG
1833 ifm->ifi_flags = dev_get_flags(dev);
1834 ifm->ifi_change = change;
1835
7e4a8d5a 1836 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
79e1ad14
JB
1837 goto nla_put_failure;
1838
8a582681
ED
1839 netdev_copy_name(dev, devname);
1840 if (nla_put_string(skb, IFLA_IFNAME, devname))
1841 goto nla_put_failure;
1842
ad13b5b0 1843 if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
a6574349 1844 nla_put_u8(skb, IFLA_OPERSTATE,
6747a5d4
ED
1845 netif_running(dev) ? READ_ONCE(dev->operstate) :
1846 IF_OPER_DOWN) ||
1847 nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
1848 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
1849 nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
1850 nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
1851 nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
1852 nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
1853 nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
1854 nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
1855 READ_ONCE(dev->num_tx_queues)) ||
1856 nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
1857 READ_ONCE(dev->gso_max_segs)) ||
1858 nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
1859 READ_ONCE(dev->gso_max_size)) ||
1860 nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
1861 READ_ONCE(dev->gro_max_size)) ||
1862 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
1863 READ_ONCE(dev->gso_ipv4_max_size)) ||
1864 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
1865 READ_ONCE(dev->gro_ipv4_max_size)) ||
1866 nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
1867 READ_ONCE(dev->tso_max_size)) ||
1868 nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
1869 READ_ONCE(dev->tso_max_segs)) ||
1d69c2b3 1870#ifdef CONFIG_RPS
6747a5d4
ED
1871 nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
1872 READ_ONCE(dev->num_rx_queues)) ||
1d69c2b3 1873#endif
79110a04 1874 put_master_ifindex(skb, dev) ||
9a57247f 1875 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
6c557001 1876 nla_put_ifalias(skb, dev) ||
2d3b479d 1877 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
b2d3bcfa
DD
1878 atomic_read(&dev->carrier_up_count) +
1879 atomic_read(&dev->carrier_down_count)) ||
b2d3bcfa
DD
1880 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1881 atomic_read(&dev->carrier_up_count)) ||
1882 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1883 atomic_read(&dev->carrier_down_count)))
a6574349 1884 goto nla_put_failure;
0b815a1a 1885
829eb208
RP
1886 if (rtnl_fill_proto_down(skb, dev))
1887 goto nla_put_failure;
1888
3d3ea5af
VY
1889 if (event != IFLA_EVENT_NONE) {
1890 if (nla_put_u32(skb, IFLA_EVENT, event))
1891 goto nla_put_failure;
1892 }
1893
1da177e4 1894 if (dev->addr_len) {
a6574349
DM
1895 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1896 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1897 goto nla_put_failure;
1da177e4
LT
1898 }
1899
66cae9ed
JP
1900 if (rtnl_phys_port_id_fill(skb, dev))
1901 goto nla_put_failure;
1902
db24a904
DA
1903 if (rtnl_phys_port_name_fill(skb, dev))
1904 goto nla_put_failure;
1905
82f28412
JP
1906 if (rtnl_phys_switch_id_fill(skb, dev))
1907 goto nla_put_failure;
1908
b22b941b 1909 if (rtnl_fill_stats(skb, dev))
10708f37 1910 goto nla_put_failure;
10708f37 1911
250fc3df 1912 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
a6574349 1913 goto nla_put_failure;
57b61080 1914
c53864fd 1915 if (rtnl_port_fill(skb, dev, ext_filter_mask))
57b61080
SF
1916 goto nla_put_failure;
1917
d1fdd913
BB
1918 if (rtnl_xdp_fill(skb, dev))
1919 goto nla_put_failure;
1920
ba7d49b1 1921 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
38f7b870
PM
1922 if (rtnl_link_fill(skb, dev) < 0)
1923 goto nla_put_failure;
1924 }
1925
d4e4fdf9 1926 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
b1e66b9a 1927 goto nla_put_failure;
d37512a2 1928
6621dd29
ND
1929 if (new_nsid &&
1930 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1931 goto nla_put_failure;
38e01b30
ND
1932 if (new_ifindex &&
1933 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1934 goto nla_put_failure;
1935
f74877a5
MK
1936 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1937 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1938 goto nla_put_failure;
6621dd29 1939
5fa85a09 1940 rcu_read_lock();
698419ff
ED
1941 qdisc = rcu_dereference(dev->qdisc);
1942 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
1943 goto nla_put_failure_rcu;
070cbf5b 1944 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
5fa85a09 1945 goto nla_put_failure_rcu;
74808e72
ED
1946 if (rtnl_fill_link_ifmap(skb, dev))
1947 goto nla_put_failure_rcu;
88f4fb0c 1948 if (rtnl_fill_prop_list(skb, dev))
0ec4e48c
ED
1949 goto nla_put_failure_rcu;
1950 rcu_read_unlock();
88f4fb0c 1951
00e77ed8
JB
1952 if (dev->dev.parent &&
1953 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1954 dev_name(dev->dev.parent)))
1955 goto nla_put_failure;
1956
1957 if (dev->dev.parent && dev->dev.parent->bus &&
1958 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1959 dev->dev.parent->bus->name))
1960 goto nla_put_failure;
1961
dca56c30
JP
1962 if (rtnl_fill_devlink_port(skb, dev))
1963 goto nla_put_failure;
1964
5f184269
JP
1965 if (rtnl_fill_dpll_pin(skb, dev))
1966 goto nla_put_failure;
1967
053c095a
JB
1968 nlmsg_end(skb, nlh);
1969 return 0;
b60c5115 1970
5fa85a09
FW
1971nla_put_failure_rcu:
1972 rcu_read_unlock();
b60c5115 1973nla_put_failure:
26932566
PM
1974 nlmsg_cancel(skb, nlh);
1975 return -EMSGSIZE;
1da177e4
LT
1976}
1977
f7b12606 1978static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
5176f91e 1979 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
38f7b870
PM
1980 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1981 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
5176f91e 1982 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
da5e0494 1983 [IFLA_MTU] = { .type = NLA_U32 },
76e87306 1984 [IFLA_LINK] = { .type = NLA_U32 },
fbaec0ea 1985 [IFLA_MASTER] = { .type = NLA_U32 },
9a57247f 1986 [IFLA_CARRIER] = { .type = NLA_U8 },
da5e0494
TG
1987 [IFLA_TXQLEN] = { .type = NLA_U32 },
1988 [IFLA_WEIGHT] = { .type = NLA_U32 },
1989 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1990 [IFLA_LINKMODE] = { .type = NLA_U8 },
76e87306 1991 [IFLA_LINKINFO] = { .type = NLA_NESTED },
d8a5ec67 1992 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
f0630529 1993 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
2459b4c6
ND
1994 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1995 * allow 0-length string (needed to remove an alias).
1996 */
1997 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
c02db8c6 1998 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
57b61080
SF
1999 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
2000 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
f8ff182c 2001 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
115c9b81 2002 [IFLA_EXT_MASK] = { .type = NLA_U32 },
edbc0bb3 2003 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
76ff5cc9
JP
2004 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
2005 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
46e6b992
SH
2006 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
2007 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
02637fce 2008 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2d3b479d 2009 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
82f28412 2010 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
317f4810 2011 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
88d6378b 2012 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
d1fdd913 2013 [IFLA_XDP] = { .type = NLA_NESTED },
3d3ea5af 2014 [IFLA_EVENT] = { .type = NLA_U32 },
db833d40 2015 [IFLA_GROUP] = { .type = NLA_U32 },
7e4a8d5a 2016 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
b2d3bcfa
DD
2017 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
2018 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
3e7a50ce
SH
2019 [IFLA_MIN_MTU] = { .type = NLA_U32 },
2020 [IFLA_MAX_MTU] = { .type = NLA_U32 },
36fbf1e5
JP
2021 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
2022 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
2023 .len = ALTIFNAMSIZ - 1 },
f74877a5 2024 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
829eb208 2025 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
7e4a5131 2026 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
88b71053 2027 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
eac1b93c 2028 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
89527be8
ED
2029 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
2030 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
7e6e1b57 2031 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
9eefedd5
XL
2032 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2033 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
da5e0494
TG
2034};
2035
38f7b870
PM
2036static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2037 [IFLA_INFO_KIND] = { .type = NLA_STRING },
2038 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
ba7d49b1
JP
2039 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
2040 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
38f7b870
PM
2041};
2042
c02db8c6 2043static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
364d5716 2044 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
75345f88 2045 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
364d5716 2046 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
79aab093 2047 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
364d5716
DB
2048 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
2049 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
2050 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
2051 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
01a3d796 2052 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
3b766cd8 2053 [IFLA_VF_STATS] = { .type = NLA_NESTED },
dd461d6a 2054 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
cc8e27cc
EC
2055 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2056 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
3b766cd8
EBE
2057};
2058
57b61080
SF
2059static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2060 [IFLA_PORT_VF] = { .type = NLA_U32 },
2061 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2062 .len = PORT_PROFILE_MAX },
57b61080
SF
2063 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2064 .len = PORT_UUID_MAX },
2065 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2066 .len = PORT_UUID_MAX },
2067 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2068 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
025331df
DB
2069
2070 /* Unused, but we need to keep it here since user space could
2071 * fill it. It's also broken with regard to NLA_BINARY use in
2072 * combination with structs.
2073 */
2074 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2075 .len = sizeof(struct ifla_port_vsi) },
57b61080
SF
2076};
2077
d1fdd913 2078static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
92234c8f 2079 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
d1fdd913 2080 [IFLA_XDP_FD] = { .type = NLA_S32 },
92234c8f 2081 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
d1fdd913 2082 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
85de8576 2083 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
58038695 2084 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
d1fdd913
BB
2085};
2086
dc599f76
DA
2087static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2088{
2089 const struct rtnl_link_ops *ops = NULL;
2090 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2091
8cb08174 2092 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
dc599f76
DA
2093 return NULL;
2094
2095 if (linfo[IFLA_INFO_KIND]) {
2096 char kind[MODULE_NAME_LEN];
2097
872f6903 2098 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
dc599f76
DA
2099 ops = rtnl_link_ops_get(kind);
2100 }
2101
2102 return ops;
2103}
2104
2105static bool link_master_filtered(struct net_device *dev, int master_idx)
2106{
2107 struct net_device *master;
2108
2109 if (!master_idx)
2110 return false;
2111
2112 master = netdev_master_upper_dev_get(dev);
d3432bf1
LS
2113
2114 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2115 * another invalid value for ifindex to denote "no master".
2116 */
2117 if (master_idx == -1)
2118 return !!master;
2119
dc599f76
DA
2120 if (!master || master->ifindex != master_idx)
2121 return true;
2122
2123 return false;
2124}
2125
2126static bool link_kind_filtered(const struct net_device *dev,
2127 const struct rtnl_link_ops *kind_ops)
2128{
2129 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2130 return true;
2131
2132 return false;
2133}
2134
2135static bool link_dump_filtered(struct net_device *dev,
2136 int master_idx,
2137 const struct rtnl_link_ops *kind_ops)
2138{
2139 if (link_master_filtered(dev, master_idx) ||
2140 link_kind_filtered(dev, kind_ops))
2141 return true;
2142
2143 return false;
2144}
2145
c383edc4
CB
2146/**
2147 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2148 * @sk: netlink socket
2149 * @netnsid: network namespace identifier
2150 *
2151 * Returns the network namespace identified by netnsid on success or an error
2152 * pointer on failure.
2153 */
2154struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
79e1ad14
JB
2155{
2156 struct net *net;
2157
f428fe4a 2158 net = get_net_ns_by_id(sock_net(sk), netnsid);
79e1ad14
JB
2159 if (!net)
2160 return ERR_PTR(-EINVAL);
2161
2162 /* For now, the caller is required to have CAP_NET_ADMIN in
2163 * the user namespace owning the target net ns.
2164 */
f428fe4a 2165 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
79e1ad14
JB
2166 put_net(net);
2167 return ERR_PTR(-EACCES);
2168 }
2169 return net;
2170}
c383edc4 2171EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
79e1ad14 2172
905cf0ab
DA
2173static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2174 bool strict_check, struct nlattr **tb,
2175 struct netlink_ext_ack *extack)
2176{
2177 int hdrlen;
2178
2179 if (strict_check) {
2180 struct ifinfomsg *ifm;
2181
2182 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2183 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2184 return -EINVAL;
2185 }
2186
2187 ifm = nlmsg_data(nlh);
2188 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2189 ifm->ifi_change) {
2190 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2191 return -EINVAL;
2192 }
2193 if (ifm->ifi_index) {
2194 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2195 return -EINVAL;
2196 }
2197
8cb08174
JB
2198 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2199 IFLA_MAX, ifla_policy,
2200 extack);
905cf0ab
DA
2201 }
2202
2203 /* A hack to preserve kernel<->userspace interface.
2204 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2205 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2206 * what iproute2 < v3.9.0 used.
2207 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2208 * attribute, its netlink message is shorter than struct ifinfomsg.
2209 */
2210 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2211 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2212
8cb08174
JB
2213 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2214 extack);
905cf0ab
DA
2215}
2216
f7b12606
JP
2217static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2218{
3e41af90 2219 const struct rtnl_link_ops *kind_ops = NULL;
905cf0ab
DA
2220 struct netlink_ext_ack *extack = cb->extack;
2221 const struct nlmsghdr *nlh = cb->nlh;
f7b12606 2222 struct net *net = sock_net(skb->sk);
3e41af90 2223 unsigned int flags = NLM_F_MULTI;
f7b12606 2224 struct nlattr *tb[IFLA_MAX+1];
3e41af90
ED
2225 struct {
2226 unsigned long ifindex;
2227 } *ctx = (void *)cb->ctx;
2228 struct net *tgt_net = net;
f7b12606 2229 u32 ext_filter_mask = 0;
3e41af90 2230 struct net_device *dev;
dc599f76 2231 int master_idx = 0;
79e1ad14 2232 int netnsid = -1;
905cf0ab 2233 int err, i;
f7b12606 2234
905cf0ab
DA
2235 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2236 if (err < 0) {
2237 if (cb->strict_check)
2238 return err;
2239
2240 goto walk_entries;
2241 }
2242
2243 for (i = 0; i <= IFLA_MAX; ++i) {
2244 if (!tb[i])
2245 continue;
e5eca6d4 2246
905cf0ab
DA
2247 /* new attributes should only be added with strict checking */
2248 switch (i) {
2249 case IFLA_TARGET_NETNSID:
2250 netnsid = nla_get_s32(tb[i]);
c383edc4 2251 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
905cf0ab
DA
2252 if (IS_ERR(tgt_net)) {
2253 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
893626d6 2254 return PTR_ERR(tgt_net);
905cf0ab
DA
2255 }
2256 break;
2257 case IFLA_EXT_MASK:
2258 ext_filter_mask = nla_get_u32(tb[i]);
2259 break;
2260 case IFLA_MASTER:
2261 master_idx = nla_get_u32(tb[i]);
2262 break;
2263 case IFLA_LINKINFO:
2264 kind_ops = linkinfo_to_kind_ops(tb[i]);
2265 break;
2266 default:
2267 if (cb->strict_check) {
2268 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2269 return -EINVAL;
2270 }
79e1ad14 2271 }
f7b12606
JP
2272 }
2273
905cf0ab
DA
2274 if (master_idx || kind_ops)
2275 flags |= NLM_F_DUMP_FILTERED;
2276
2277walk_entries:
3e41af90
ED
2278 err = 0;
2279 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2280 if (link_dump_filtered(dev, master_idx, kind_ops))
2281 continue;
2282 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2283 NETLINK_CB(cb->skb).portid,
2284 nlh->nlmsg_seq, 0, flags,
2285 ext_filter_mask, 0, NULL, 0,
2286 netnsid, GFP_KERNEL);
02e24903 2287 if (err < 0)
3e41af90 2288 break;
f7b12606 2289 }
a9ecb0cb 2290 cb->seq = tgt_net->dev_base_seq;
d0225784 2291 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
79e1ad14
JB
2292 if (netnsid >= 0)
2293 put_net(tgt_net);
f7b12606 2294
f6c5775f 2295 return err;
f7b12606
JP
2296}
2297
f534f658
JK
2298int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2299 struct netlink_ext_ack *exterr)
f7b12606 2300{
f534f658
JK
2301 const struct ifinfomsg *ifmp;
2302 const struct nlattr *attrs;
2303 size_t len;
2304
2305 ifmp = nla_data(nla_peer);
2306 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2307 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2308
2309 if (ifmp->ifi_index < 0) {
2310 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2311 "ifindex can't be negative");
2312 return -EINVAL;
2313 }
2314
2315 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
8cb08174 2316 exterr);
f7b12606 2317}
f534f658 2318EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
f7b12606 2319
81adee47
EB
2320struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2321{
2322 struct net *net;
2323 /* Examine the link attributes and figure out which
2324 * network namespace we are talking about.
2325 */
2326 if (tb[IFLA_NET_NS_PID])
2327 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
f0630529
EB
2328 else if (tb[IFLA_NET_NS_FD])
2329 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
81adee47
EB
2330 else
2331 net = get_net(src_net);
2332 return net;
2333}
2334EXPORT_SYMBOL(rtnl_link_get_net);
2335
7c4f63ba
CB
2336/* Figure out which network namespace we are talking about by
2337 * examining the link attributes in the following order:
2338 *
2339 * 1. IFLA_NET_NS_PID
2340 * 2. IFLA_NET_NS_FD
7e4a8d5a 2341 * 3. IFLA_TARGET_NETNSID
7c4f63ba
CB
2342 */
2343static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2344 struct nlattr *tb[])
2345{
2346 struct net *net;
2347
2348 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2349 return rtnl_link_get_net(src_net, tb);
2350
7e4a8d5a 2351 if (!tb[IFLA_TARGET_NETNSID])
7c4f63ba
CB
2352 return get_net(src_net);
2353
7e4a8d5a 2354 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
7c4f63ba
CB
2355 if (!net)
2356 return ERR_PTR(-EINVAL);
2357
2358 return net;
2359}
2360
2361static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2362 struct net *src_net,
2363 struct nlattr *tb[], int cap)
2364{
2365 struct net *net;
2366
2367 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2368 if (IS_ERR(net))
2369 return net;
2370
2371 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2372 put_net(net);
2373 return ERR_PTR(-EPERM);
2374 }
2375
2376 return net;
2377}
2378
4ff66cae
CB
2379/* Verify that rtnetlink requests do not pass additional properties
2380 * potentially referring to different network namespaces.
2381 */
2382static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2383 struct netlink_ext_ack *extack,
2384 bool netns_id_only)
2385{
2386
2387 if (netns_id_only) {
2388 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2389 return 0;
2390
2391 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2392 return -EOPNOTSUPP;
2393 }
2394
7e4a8d5a 2395 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
4ff66cae
CB
2396 goto invalid_attr;
2397
7e4a8d5a 2398 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
4ff66cae
CB
2399 goto invalid_attr;
2400
7e4a8d5a 2401 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
4ff66cae
CB
2402 goto invalid_attr;
2403
2404 return 0;
2405
2406invalid_attr:
2407 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2408 return -EINVAL;
2409}
2410
a14857c2
BC
2411static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2412 int max_tx_rate)
2413{
2414 const struct net_device_ops *ops = dev->netdev_ops;
2415
2416 if (!ops->ndo_set_vf_rate)
2417 return -EOPNOTSUPP;
2418 if (max_tx_rate && max_tx_rate < min_tx_rate)
2419 return -EINVAL;
2420
2421 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2422}
2423
8679c31e
RY
2424static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2425 struct netlink_ext_ack *extack)
1840bb13 2426{
89da780a
XL
2427 if (tb[IFLA_ADDRESS] &&
2428 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2429 return -EINVAL;
1840bb13 2430
89da780a
XL
2431 if (tb[IFLA_BROADCAST] &&
2432 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2433 return -EINVAL;
fef5b228 2434
89da780a
XL
2435 if (tb[IFLA_GSO_MAX_SIZE] &&
2436 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2437 NL_SET_ERR_MSG(extack, "too big gso_max_size");
2438 return -EINVAL;
2439 }
fef5b228 2440
89da780a
XL
2441 if (tb[IFLA_GSO_MAX_SEGS] &&
2442 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2443 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2444 NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2445 return -EINVAL;
2446 }
fef5b228 2447
89da780a
XL
2448 if (tb[IFLA_GRO_MAX_SIZE] &&
2449 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2450 NL_SET_ERR_MSG(extack, "too big gro_max_size");
2451 return -EINVAL;
2452 }
65d6914e 2453
89da780a
XL
2454 if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2455 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2456 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2457 return -EINVAL;
2458 }
65d6914e 2459
89da780a
XL
2460 if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2461 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2462 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2463 return -EINVAL;
1840bb13
TG
2464 }
2465
cf7afbfe
TG
2466 if (tb[IFLA_AF_SPEC]) {
2467 struct nlattr *af;
2468 int rem, err;
2469
2470 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2471 const struct rtnl_af_ops *af_ops;
2472
5fa85a09 2473 af_ops = rtnl_af_lookup(nla_type(af));
a100243d 2474 if (!af_ops)
cf7afbfe
TG
2475 return -EAFNOSUPPORT;
2476
a100243d 2477 if (!af_ops->set_link_af)
cf7afbfe
TG
2478 return -EOPNOTSUPP;
2479
2480 if (af_ops->validate_link_af) {
8679c31e 2481 err = af_ops->validate_link_af(dev, af, extack);
a100243d 2482 if (err < 0)
cf7afbfe
TG
2483 return err;
2484 }
2485 }
2486 }
2487
1840bb13
TG
2488 return 0;
2489}
2490
cc8e27cc
EC
2491static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2492 int guid_type)
2493{
2494 const struct net_device_ops *ops = dev->netdev_ops;
2495
2496 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2497}
2498
2499static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2500{
2501 if (dev->type != ARPHRD_INFINIBAND)
2502 return -EOPNOTSUPP;
2503
2504 return handle_infiniband_guid(dev, ivt, guid_type);
2505}
2506
4f7d2cdf 2507static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
c02db8c6 2508{
c02db8c6 2509 const struct net_device_ops *ops = dev->netdev_ops;
4f7d2cdf 2510 int err = -EINVAL;
c02db8c6 2511
4f7d2cdf
DB
2512 if (tb[IFLA_VF_MAC]) {
2513 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
01a3d796 2514
ff08ddba
DC
2515 if (ivm->vf >= INT_MAX)
2516 return -EINVAL;
4f7d2cdf
DB
2517 err = -EOPNOTSUPP;
2518 if (ops->ndo_set_vf_mac)
2519 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2520 ivm->mac);
2521 if (err < 0)
2522 return err;
2523 }
2524
2525 if (tb[IFLA_VF_VLAN]) {
2526 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2527
ff08ddba
DC
2528 if (ivv->vf >= INT_MAX)
2529 return -EINVAL;
4f7d2cdf
DB
2530 err = -EOPNOTSUPP;
2531 if (ops->ndo_set_vf_vlan)
2532 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
79aab093
MS
2533 ivv->qos,
2534 htons(ETH_P_8021Q));
2535 if (err < 0)
2536 return err;
2537 }
2538
2539 if (tb[IFLA_VF_VLAN_LIST]) {
2540 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2541 struct nlattr *attr;
2542 int rem, len = 0;
2543
2544 err = -EOPNOTSUPP;
2545 if (!ops->ndo_set_vf_vlan)
2546 return err;
2547
2548 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2549 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2550 nla_len(attr) < NLA_HDRLEN) {
2551 return -EINVAL;
2552 }
2553 if (len >= MAX_VLAN_LIST_LEN)
2554 return -EOPNOTSUPP;
2555 ivvl[len] = nla_data(attr);
2556
2557 len++;
2558 }
fa34cd94
AB
2559 if (len == 0)
2560 return -EINVAL;
2561
ff08ddba
DC
2562 if (ivvl[0]->vf >= INT_MAX)
2563 return -EINVAL;
79aab093
MS
2564 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2565 ivvl[0]->qos, ivvl[0]->vlan_proto);
4f7d2cdf
DB
2566 if (err < 0)
2567 return err;
c02db8c6 2568 }
4f7d2cdf
DB
2569
2570 if (tb[IFLA_VF_TX_RATE]) {
2571 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2572 struct ifla_vf_info ivf;
2573
ff08ddba
DC
2574 if (ivt->vf >= INT_MAX)
2575 return -EINVAL;
4f7d2cdf
DB
2576 err = -EOPNOTSUPP;
2577 if (ops->ndo_get_vf_config)
2578 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2579 if (err < 0)
2580 return err;
2581
a14857c2
BC
2582 err = rtnl_set_vf_rate(dev, ivt->vf,
2583 ivf.min_tx_rate, ivt->rate);
4f7d2cdf
DB
2584 if (err < 0)
2585 return err;
2586 }
2587
2588 if (tb[IFLA_VF_RATE]) {
2589 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2590
ff08ddba
DC
2591 if (ivt->vf >= INT_MAX)
2592 return -EINVAL;
a14857c2
BC
2593
2594 err = rtnl_set_vf_rate(dev, ivt->vf,
2595 ivt->min_tx_rate, ivt->max_tx_rate);
4f7d2cdf
DB
2596 if (err < 0)
2597 return err;
2598 }
2599
2600 if (tb[IFLA_VF_SPOOFCHK]) {
2601 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2602
ff08ddba
DC
2603 if (ivs->vf >= INT_MAX)
2604 return -EINVAL;
4f7d2cdf
DB
2605 err = -EOPNOTSUPP;
2606 if (ops->ndo_set_vf_spoofchk)
2607 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2608 ivs->setting);
2609 if (err < 0)
2610 return err;
2611 }
2612
2613 if (tb[IFLA_VF_LINK_STATE]) {
2614 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2615
ff08ddba
DC
2616 if (ivl->vf >= INT_MAX)
2617 return -EINVAL;
4f7d2cdf
DB
2618 err = -EOPNOTSUPP;
2619 if (ops->ndo_set_vf_link_state)
2620 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2621 ivl->link_state);
2622 if (err < 0)
2623 return err;
2624 }
2625
2626 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2627 struct ifla_vf_rss_query_en *ivrssq_en;
2628
2629 err = -EOPNOTSUPP;
2630 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
ff08ddba
DC
2631 if (ivrssq_en->vf >= INT_MAX)
2632 return -EINVAL;
4f7d2cdf
DB
2633 if (ops->ndo_set_vf_rss_query_en)
2634 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2635 ivrssq_en->setting);
2636 if (err < 0)
2637 return err;
2638 }
2639
dd461d6a
HS
2640 if (tb[IFLA_VF_TRUST]) {
2641 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2642
ff08ddba
DC
2643 if (ivt->vf >= INT_MAX)
2644 return -EINVAL;
dd461d6a
HS
2645 err = -EOPNOTSUPP;
2646 if (ops->ndo_set_vf_trust)
2647 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2648 if (err < 0)
2649 return err;
2650 }
2651
cc8e27cc
EC
2652 if (tb[IFLA_VF_IB_NODE_GUID]) {
2653 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2654
ff08ddba
DC
2655 if (ivt->vf >= INT_MAX)
2656 return -EINVAL;
cc8e27cc
EC
2657 if (!ops->ndo_set_vf_guid)
2658 return -EOPNOTSUPP;
cc8e27cc
EC
2659 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2660 }
2661
2662 if (tb[IFLA_VF_IB_PORT_GUID]) {
2663 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2664
ff08ddba
DC
2665 if (ivt->vf >= INT_MAX)
2666 return -EINVAL;
cc8e27cc
EC
2667 if (!ops->ndo_set_vf_guid)
2668 return -EOPNOTSUPP;
2669
2670 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2671 }
2672
c02db8c6
CW
2673 return err;
2674}
2675
33eaf2a6
DA
2676static int do_set_master(struct net_device *dev, int ifindex,
2677 struct netlink_ext_ack *extack)
fbaec0ea 2678{
898e5061 2679 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
fbaec0ea
JP
2680 const struct net_device_ops *ops;
2681 int err;
2682
898e5061
JP
2683 if (upper_dev) {
2684 if (upper_dev->ifindex == ifindex)
fbaec0ea 2685 return 0;
898e5061 2686 ops = upper_dev->netdev_ops;
fbaec0ea 2687 if (ops->ndo_del_slave) {
898e5061 2688 err = ops->ndo_del_slave(upper_dev, dev);
fbaec0ea
JP
2689 if (err)
2690 return err;
2691 } else {
2692 return -EOPNOTSUPP;
2693 }
2694 }
2695
2696 if (ifindex) {
898e5061
JP
2697 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2698 if (!upper_dev)
fbaec0ea 2699 return -EINVAL;
898e5061 2700 ops = upper_dev->netdev_ops;
fbaec0ea 2701 if (ops->ndo_add_slave) {
33eaf2a6 2702 err = ops->ndo_add_slave(upper_dev, dev, extack);
fbaec0ea
JP
2703 if (err)
2704 return err;
2705 } else {
2706 return -EOPNOTSUPP;
2707 }
2708 }
2709 return 0;
2710}
2711
829eb208
RP
2712static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2713 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2714 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2715};
2716
2717static int do_set_proto_down(struct net_device *dev,
2718 struct nlattr *nl_proto_down,
2719 struct nlattr *nl_proto_down_reason,
2720 struct netlink_ext_ack *extack)
2721{
2722 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
829eb208
RP
2723 unsigned long mask = 0;
2724 u32 value;
2725 bool proto_down;
2726 int err;
2727
2106efda 2728 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
829eb208
RP
2729 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2730 return -EOPNOTSUPP;
2731 }
2732
2733 if (nl_proto_down_reason) {
2734 err = nla_parse_nested_deprecated(pdreason,
2735 IFLA_PROTO_DOWN_REASON_MAX,
2736 nl_proto_down_reason,
2737 ifla_proto_down_reason_policy,
2738 NULL);
2739 if (err < 0)
2740 return err;
2741
2742 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2743 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2744 return -EINVAL;
2745 }
2746
2747 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2748
2749 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2750 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2751
2752 dev_change_proto_down_reason(dev, mask, value);
2753 }
2754
2755 if (nl_proto_down) {
2756 proto_down = nla_get_u8(nl_proto_down);
2757
d467d0bc 2758 /* Don't turn off protodown if there are active reasons */
829eb208
RP
2759 if (!proto_down && dev->proto_down_reason) {
2760 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2761 return -EBUSY;
2762 }
2763 err = dev_change_proto_down(dev,
2764 proto_down);
2765 if (err)
2766 return err;
2767 }
2768
2769 return 0;
2770}
2771
90c325e3 2772#define DO_SETLINK_MODIFIED 0x01
ba998906
ND
2773/* notify flag means notify + modified. */
2774#define DO_SETLINK_NOTIFY 0x03
90f62cf3
EB
2775static int do_setlink(const struct sk_buff *skb,
2776 struct net_device *dev, struct ifinfomsg *ifm,
ddf9f970 2777 struct netlink_ext_ack *extack,
5ea08b52 2778 struct nlattr **tb, int status)
1da177e4 2779{
d314774c 2780 const struct net_device_ops *ops = dev->netdev_ops;
5ea08b52 2781 char ifname[IFNAMSIZ];
0157f60c 2782 int err;
1da177e4 2783
5ea08b52
FF
2784 if (tb[IFLA_IFNAME])
2785 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2786 else
2787 ifname[0] = '\0';
2788
7e4a8d5a 2789 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
5ea08b52 2790 const char *pat = ifname[0] ? ifname : NULL;
eeb85a14
AV
2791 struct net *net;
2792 int new_ifindex;
2793
2794 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2795 tb, CAP_NET_ADMIN);
d8a5ec67
EB
2796 if (IS_ERR(net)) {
2797 err = PTR_ERR(net);
2798 goto errout;
2799 }
7c4f63ba 2800
eeb85a14
AV
2801 if (tb[IFLA_NEW_IFINDEX])
2802 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2803 else
2804 new_ifindex = 0;
2805
96a6b93b 2806 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
d8a5ec67
EB
2807 put_net(net);
2808 if (err)
2809 goto errout;
90c325e3 2810 status |= DO_SETLINK_MODIFIED;
d8a5ec67
EB
2811 }
2812
da5e0494 2813 if (tb[IFLA_MAP]) {
1da177e4
LT
2814 struct rtnl_link_ifmap *u_map;
2815 struct ifmap k_map;
2816
d314774c 2817 if (!ops->ndo_set_config) {
1da177e4 2818 err = -EOPNOTSUPP;
0157f60c 2819 goto errout;
1da177e4
LT
2820 }
2821
2822 if (!netif_device_present(dev)) {
2823 err = -ENODEV;
0157f60c 2824 goto errout;
1da177e4 2825 }
1da177e4 2826
da5e0494 2827 u_map = nla_data(tb[IFLA_MAP]);
1da177e4
LT
2828 k_map.mem_start = (unsigned long) u_map->mem_start;
2829 k_map.mem_end = (unsigned long) u_map->mem_end;
2830 k_map.base_addr = (unsigned short) u_map->base_addr;
2831 k_map.irq = (unsigned char) u_map->irq;
2832 k_map.dma = (unsigned char) u_map->dma;
2833 k_map.port = (unsigned char) u_map->port;
2834
d314774c 2835 err = ops->ndo_set_config(dev, &k_map);
da5e0494 2836 if (err < 0)
0157f60c 2837 goto errout;
1da177e4 2838
ba998906 2839 status |= DO_SETLINK_NOTIFY;
1da177e4
LT
2840 }
2841
da5e0494 2842 if (tb[IFLA_ADDRESS]) {
70f8e78e
DM
2843 struct sockaddr *sa;
2844 int len;
2845
153711f9
WC
2846 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2847 sizeof(*sa));
70f8e78e
DM
2848 sa = kmalloc(len, GFP_KERNEL);
2849 if (!sa) {
2850 err = -ENOMEM;
0157f60c 2851 goto errout;
70f8e78e
DM
2852 }
2853 sa->sa_family = dev->type;
da5e0494 2854 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
70f8e78e 2855 dev->addr_len);
3b23a32a 2856 err = dev_set_mac_address_user(dev, sa, extack);
70f8e78e 2857 kfree(sa);
1da177e4 2858 if (err)
0157f60c 2859 goto errout;
90c325e3 2860 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2861 }
2862
da5e0494 2863 if (tb[IFLA_MTU]) {
7a4c53be 2864 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
da5e0494 2865 if (err < 0)
0157f60c 2866 goto errout;
90c325e3 2867 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2868 }
2869
cbda10fa
VD
2870 if (tb[IFLA_GROUP]) {
2871 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
ba998906 2872 status |= DO_SETLINK_NOTIFY;
cbda10fa
VD
2873 }
2874
da5e0494
TG
2875 /*
2876 * Interface selected by interface index but interface
2877 * name provided implies that a name change has been
2878 * requested.
2879 */
51055be8 2880 if (ifm->ifi_index > 0 && ifname[0]) {
da5e0494
TG
2881 err = dev_change_name(dev, ifname);
2882 if (err < 0)
0157f60c 2883 goto errout;
90c325e3 2884 status |= DO_SETLINK_MODIFIED;
1da177e4
LT
2885 }
2886
0b815a1a
SH
2887 if (tb[IFLA_IFALIAS]) {
2888 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2889 nla_len(tb[IFLA_IFALIAS]));
2890 if (err < 0)
2891 goto errout;
ba998906 2892 status |= DO_SETLINK_NOTIFY;
0b815a1a
SH
2893 }
2894
da5e0494
TG
2895 if (tb[IFLA_BROADCAST]) {
2896 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
e7c3273e 2897 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
2898 }
2899
a4abfa62
PS
2900 if (ifm->ifi_flags || ifm->ifi_change) {
2901 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2902 extack);
2903 if (err < 0)
2904 goto errout;
2905 }
2906
ec4ffd10
ND
2907 if (tb[IFLA_MASTER]) {
2908 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2909 if (err)
2910 goto errout;
2911 status |= DO_SETLINK_MODIFIED;
2912 }
2913
9a57247f
JP
2914 if (tb[IFLA_CARRIER]) {
2915 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2916 if (err)
2917 goto errout;
90c325e3 2918 status |= DO_SETLINK_MODIFIED;
9a57247f
JP
2919 }
2920
5d1180fc 2921 if (tb[IFLA_TXQLEN]) {
0cd29503 2922 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
6a643ddb
CW
2923
2924 err = dev_change_tx_queue_len(dev, value);
2925 if (err)
2926 goto errout;
2927 status |= DO_SETLINK_MODIFIED;
5d1180fc 2928 }
b00055aa 2929
46e6b992
SH
2930 if (tb[IFLA_GSO_MAX_SIZE]) {
2931 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2932
46e6b992
SH
2933 if (dev->gso_max_size ^ max_size) {
2934 netif_set_gso_max_size(dev, max_size);
2935 status |= DO_SETLINK_MODIFIED;
2936 }
2937 }
2938
2939 if (tb[IFLA_GSO_MAX_SEGS]) {
2940 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2941
46e6b992 2942 if (dev->gso_max_segs ^ max_segs) {
6d872df3 2943 netif_set_gso_max_segs(dev, max_segs);
46e6b992
SH
2944 status |= DO_SETLINK_MODIFIED;
2945 }
2946 }
2947
eac1b93c
CL
2948 if (tb[IFLA_GRO_MAX_SIZE]) {
2949 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2950
2951 if (dev->gro_max_size ^ gro_max_size) {
2952 netif_set_gro_max_size(dev, gro_max_size);
2953 status |= DO_SETLINK_MODIFIED;
2954 }
2955 }
2956
9eefedd5
XL
2957 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2958 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2959
9eefedd5
XL
2960 if (dev->gso_ipv4_max_size ^ max_size) {
2961 netif_set_gso_ipv4_max_size(dev, max_size);
2962 status |= DO_SETLINK_MODIFIED;
2963 }
2964 }
2965
2966 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2967 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2968
2969 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2970 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2971 status |= DO_SETLINK_MODIFIED;
2972 }
2973 }
2974
da5e0494 2975 if (tb[IFLA_OPERSTATE])
93b2d4a2 2976 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
b00055aa 2977
da5e0494 2978 if (tb[IFLA_LINKMODE]) {
1889b0e7
ND
2979 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2980
1889b0e7 2981 if (dev->link_mode ^ value)
ba998906 2982 status |= DO_SETLINK_NOTIFY;
a6473fe9 2983 WRITE_ONCE(dev->link_mode, value);
b00055aa
SR
2984 }
2985
c02db8c6 2986 if (tb[IFLA_VFINFO_LIST]) {
4f7d2cdf 2987 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
c02db8c6
CW
2988 struct nlattr *attr;
2989 int rem;
4f7d2cdf 2990
c02db8c6 2991 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
4f7d2cdf
DB
2992 if (nla_type(attr) != IFLA_VF_INFO ||
2993 nla_len(attr) < NLA_HDRLEN) {
253683bb 2994 err = -EINVAL;
c02db8c6 2995 goto errout;
253683bb 2996 }
8cb08174
JB
2997 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2998 attr,
2999 ifla_vf_policy,
3000 NULL);
4f7d2cdf
DB
3001 if (err < 0)
3002 goto errout;
3003 err = do_setvfinfo(dev, vfinfo);
c02db8c6
CW
3004 if (err < 0)
3005 goto errout;
ba998906 3006 status |= DO_SETLINK_NOTIFY;
c02db8c6 3007 }
ebc08a6f 3008 }
1da177e4
LT
3009 err = 0;
3010
57b61080
SF
3011 if (tb[IFLA_VF_PORTS]) {
3012 struct nlattr *port[IFLA_PORT_MAX+1];
3013 struct nlattr *attr;
3014 int vf;
3015 int rem;
3016
3017 err = -EOPNOTSUPP;
3018 if (!ops->ndo_set_vf_port)
3019 goto errout;
3020
3021 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
035d210f
DB
3022 if (nla_type(attr) != IFLA_VF_PORT ||
3023 nla_len(attr) < NLA_HDRLEN) {
3024 err = -EINVAL;
3025 goto errout;
3026 }
8cb08174
JB
3027 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3028 attr,
3029 ifla_port_policy,
3030 NULL);
57b61080
SF
3031 if (err < 0)
3032 goto errout;
3033 if (!port[IFLA_PORT_VF]) {
3034 err = -EOPNOTSUPP;
3035 goto errout;
3036 }
3037 vf = nla_get_u32(port[IFLA_PORT_VF]);
3038 err = ops->ndo_set_vf_port(dev, vf, port);
3039 if (err < 0)
3040 goto errout;
ba998906 3041 status |= DO_SETLINK_NOTIFY;
57b61080
SF
3042 }
3043 }
3044 err = 0;
3045
3046 if (tb[IFLA_PORT_SELF]) {
3047 struct nlattr *port[IFLA_PORT_MAX+1];
3048
8cb08174
JB
3049 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3050 tb[IFLA_PORT_SELF],
3051 ifla_port_policy, NULL);
57b61080
SF
3052 if (err < 0)
3053 goto errout;
3054
3055 err = -EOPNOTSUPP;
3056 if (ops->ndo_set_vf_port)
3057 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3058 if (err < 0)
3059 goto errout;
ba998906 3060 status |= DO_SETLINK_NOTIFY;
57b61080 3061 }
f8ff182c
TG
3062
3063 if (tb[IFLA_AF_SPEC]) {
3064 struct nlattr *af;
3065 int rem;
3066
3067 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3068 const struct rtnl_af_ops *af_ops;
3069
058c8d59 3070 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
f8ff182c 3071
3583a4e8 3072 err = af_ops->set_link_af(dev, af, extack);
a100243d 3073 if (err < 0)
f8ff182c
TG
3074 goto errout;
3075
ba998906 3076 status |= DO_SETLINK_NOTIFY;
f8ff182c
TG
3077 }
3078 }
57b61080
SF
3079 err = 0;
3080
829eb208
RP
3081 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3082 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3083 tb[IFLA_PROTO_DOWN_REASON], extack);
88d6378b
AK
3084 if (err)
3085 goto errout;
3086 status |= DO_SETLINK_NOTIFY;
3087 }
3088
d1fdd913
BB
3089 if (tb[IFLA_XDP]) {
3090 struct nlattr *xdp[IFLA_XDP_MAX + 1];
85de8576 3091 u32 xdp_flags = 0;
d1fdd913 3092
8cb08174
JB
3093 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3094 tb[IFLA_XDP],
3095 ifla_xdp_policy, NULL);
d1fdd913
BB
3096 if (err < 0)
3097 goto errout;
3098
58038695 3099 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
262d8625
BB
3100 err = -EINVAL;
3101 goto errout;
3102 }
85de8576
DB
3103
3104 if (xdp[IFLA_XDP_FLAGS]) {
3105 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3106 if (xdp_flags & ~XDP_FLAGS_MASK) {
3107 err = -EINVAL;
3108 goto errout;
3109 }
ee5d032f 3110 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
0489df9a
DB
3111 err = -EINVAL;
3112 goto errout;
3113 }
85de8576
DB
3114 }
3115
d1fdd913 3116 if (xdp[IFLA_XDP_FD]) {
92234c8f
THJ
3117 int expected_fd = -1;
3118
3119 if (xdp_flags & XDP_FLAGS_REPLACE) {
3120 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3121 err = -EINVAL;
3122 goto errout;
3123 }
3124 expected_fd =
3125 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3126 }
3127
ddf9f970 3128 err = dev_change_xdp_fd(dev, extack,
85de8576 3129 nla_get_s32(xdp[IFLA_XDP_FD]),
92234c8f 3130 expected_fd,
85de8576 3131 xdp_flags);
d1fdd913
BB
3132 if (err)
3133 goto errout;
3134 status |= DO_SETLINK_NOTIFY;
3135 }
3136 }
3137
0157f60c 3138errout:
ba998906 3139 if (status & DO_SETLINK_MODIFIED) {
64ff90cc 3140 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
ba998906
ND
3141 netdev_state_change(dev);
3142
3143 if (err < 0)
3144 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3145 dev->name);
3146 }
da5e0494 3147
0157f60c
PM
3148 return err;
3149}
1da177e4 3150
cc6090e9 3151static struct net_device *rtnl_dev_get(struct net *net,
5ea08b52
FF
3152 struct nlattr *tb[])
3153{
3154 char ifname[ALTIFNAMSIZ];
3155
3156 if (tb[IFLA_IFNAME])
3157 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3158 else if (tb[IFLA_ALT_IFNAME])
3159 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3160 else
3161 return NULL;
cc6090e9
JP
3162
3163 return __dev_get_by_name(net, ifname);
3164}
3165
c21ef3e3
DA
3166static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3167 struct netlink_ext_ack *extack)
0157f60c 3168{
3b1e0a65 3169 struct net *net = sock_net(skb->sk);
0157f60c
PM
3170 struct ifinfomsg *ifm;
3171 struct net_device *dev;
3172 int err;
3173 struct nlattr *tb[IFLA_MAX+1];
0157f60c 3174
8cb08174
JB
3175 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3176 ifla_policy, extack);
0157f60c
PM
3177 if (err < 0)
3178 goto errout;
3179
4ff66cae
CB
3180 err = rtnl_ensure_unique_netns(tb, extack, false);
3181 if (err < 0)
3182 goto errout;
3183
0157f60c
PM
3184 err = -EINVAL;
3185 ifm = nlmsg_data(nlh);
3186 if (ifm->ifi_index > 0)
a3d12891 3187 dev = __dev_get_by_index(net, ifm->ifi_index);
76c9ac0e 3188 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3189 dev = rtnl_dev_get(net, tb);
0157f60c
PM
3190 else
3191 goto errout;
3192
3193 if (dev == NULL) {
3194 err = -ENODEV;
3195 goto errout;
3196 }
3197
89da780a
XL
3198 err = validate_linkmsg(dev, tb, extack);
3199 if (err < 0)
3200 goto errout;
3201
5ea08b52 3202 err = do_setlink(skb, dev, ifm, extack, tb, 0);
da5e0494 3203errout:
1da177e4
LT
3204 return err;
3205}
3206
66400d54
WC
3207static int rtnl_group_dellink(const struct net *net, int group)
3208{
3209 struct net_device *dev, *aux;
3210 LIST_HEAD(list_kill);
3211 bool found = false;
3212
3213 if (!group)
3214 return -EPERM;
3215
3216 for_each_netdev(net, dev) {
3217 if (dev->group == group) {
3218 const struct rtnl_link_ops *ops;
3219
3220 found = true;
3221 ops = dev->rtnl_link_ops;
3222 if (!ops || !ops->dellink)
3223 return -EOPNOTSUPP;
3224 }
3225 }
3226
3227 if (!found)
3228 return -ENODEV;
3229
3230 for_each_netdev_safe(net, dev, aux) {
3231 if (dev->group == group) {
3232 const struct rtnl_link_ops *ops;
3233
3234 ops = dev->rtnl_link_ops;
3235 ops->dellink(dev, &list_kill);
3236 }
3237 }
3238 unregister_netdevice_many(&list_kill);
3239
3240 return 0;
3241}
3242
f3a63cce 3243int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
614732ea
TG
3244{
3245 const struct rtnl_link_ops *ops;
3246 LIST_HEAD(list_kill);
3247
3248 ops = dev->rtnl_link_ops;
3249 if (!ops || !ops->dellink)
3250 return -EOPNOTSUPP;
3251
3252 ops->dellink(dev, &list_kill);
f3a63cce 3253 unregister_netdevice_many_notify(&list_kill, portid, nlh);
614732ea
TG
3254
3255 return 0;
3256}
3257EXPORT_SYMBOL_GPL(rtnl_delete_link);
3258
c21ef3e3
DA
3259static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3260 struct netlink_ext_ack *extack)
38f7b870 3261{
3b1e0a65 3262 struct net *net = sock_net(skb->sk);
f3a63cce 3263 u32 portid = NETLINK_CB(skb).portid;
b61ad68a
CB
3264 struct net *tgt_net = net;
3265 struct net_device *dev = NULL;
38f7b870 3266 struct ifinfomsg *ifm;
38f7b870
PM
3267 struct nlattr *tb[IFLA_MAX+1];
3268 int err;
b61ad68a 3269 int netnsid = -1;
38f7b870 3270
8cb08174
JB
3271 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3272 ifla_policy, extack);
38f7b870
PM
3273 if (err < 0)
3274 return err;
3275
4ff66cae
CB
3276 err = rtnl_ensure_unique_netns(tb, extack, true);
3277 if (err < 0)
3278 return err;
3279
7e4a8d5a
CB
3280 if (tb[IFLA_TARGET_NETNSID]) {
3281 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
c383edc4 3282 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
b61ad68a
CB
3283 if (IS_ERR(tgt_net))
3284 return PTR_ERR(tgt_net);
3285 }
3286
3287 err = -EINVAL;
38f7b870
PM
3288 ifm = nlmsg_data(nlh);
3289 if (ifm->ifi_index > 0)
b61ad68a 3290 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
76c9ac0e 3291 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3292 dev = rtnl_dev_get(net, tb);
66400d54 3293 else if (tb[IFLA_GROUP])
b61ad68a 3294 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
38f7b870 3295 else
b61ad68a 3296 goto out;
38f7b870 3297
b61ad68a 3298 if (!dev) {
dee04163 3299 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
b61ad68a
CB
3300 err = -ENODEV;
3301
3302 goto out;
3303 }
3304
f3a63cce 3305 err = rtnl_delete_link(dev, portid, nlh);
38f7b870 3306
b61ad68a
CB
3307out:
3308 if (netnsid >= 0)
3309 put_net(tgt_net);
3310
3311 return err;
38f7b870
PM
3312}
3313
1d997f10
HL
3314int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3315 u32 portid, const struct nlmsghdr *nlh)
3729d502
PM
3316{
3317 unsigned int old_flags;
3318 int err;
3319
3320 old_flags = dev->flags;
3321 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
6d040321
PM
3322 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3323 NULL);
3729d502
PM
3324 if (err < 0)
3325 return err;
3326 }
3327
8d356b89 3328 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
1d997f10 3329 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
8d356b89
RP
3330 } else {
3331 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1d997f10 3332 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
8d356b89 3333 }
3729d502
PM
3334 return 0;
3335}
3336EXPORT_SYMBOL(rtnl_configure_link);
3337
d0522f1c
DA
3338struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3339 unsigned char name_assign_type,
3340 const struct rtnl_link_ops *ops,
3341 struct nlattr *tb[],
3342 struct netlink_ext_ack *extack)
e7199288 3343{
e7199288 3344 struct net_device *dev;
d40156aa
JP
3345 unsigned int num_tx_queues = 1;
3346 unsigned int num_rx_queues = 1;
b0ad3c17 3347 int err;
e7199288 3348
76ff5cc9
JP
3349 if (tb[IFLA_NUM_TX_QUEUES])
3350 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3351 else if (ops->get_num_tx_queues)
d40156aa 3352 num_tx_queues = ops->get_num_tx_queues();
76ff5cc9
JP
3353
3354 if (tb[IFLA_NUM_RX_QUEUES])
3355 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3356 else if (ops->get_num_rx_queues)
d40156aa 3357 num_rx_queues = ops->get_num_rx_queues();
efacb309 3358
d0522f1c
DA
3359 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3360 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
0e1d6eca 3361 return ERR_PTR(-EINVAL);
d0522f1c 3362 }
0e1d6eca 3363
d0522f1c
DA
3364 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3365 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
0e1d6eca 3366 return ERR_PTR(-EINVAL);
d0522f1c 3367 }
0e1d6eca 3368
8c713dc9
JB
3369 if (ops->alloc) {
3370 dev = ops->alloc(tb, ifname, name_assign_type,
3371 num_tx_queues, num_rx_queues);
3372 if (IS_ERR(dev))
3373 return dev;
3374 } else {
3375 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3376 name_assign_type, ops->setup,
3377 num_tx_queues, num_rx_queues);
3378 }
3379
e7199288 3380 if (!dev)
d1892e4e 3381 return ERR_PTR(-ENOMEM);
e7199288 3382
b0ad3c17
XL
3383 err = validate_linkmsg(dev, tb, extack);
3384 if (err < 0) {
3385 free_netdev(dev);
3386 return ERR_PTR(err);
3387 }
3388
81adee47
EB
3389 dev_net_set(dev, net);
3390 dev->rtnl_link_ops = ops;
3729d502 3391 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
81adee47 3392
d836f5c6
ED
3393 if (tb[IFLA_MTU]) {
3394 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
d836f5c6
ED
3395
3396 err = dev_validate_mtu(dev, mtu, extack);
3397 if (err) {
3398 free_netdev(dev);
3399 return ERR_PTR(err);
3400 }
3401 dev->mtu = mtu;
3402 }
2afb9b53 3403 if (tb[IFLA_ADDRESS]) {
efd38f75
JK
3404 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3405 nla_len(tb[IFLA_ADDRESS]));
2afb9b53
JP
3406 dev->addr_assign_type = NET_ADDR_SET;
3407 }
e7199288
PE
3408 if (tb[IFLA_BROADCAST])
3409 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3410 nla_len(tb[IFLA_BROADCAST]));
3411 if (tb[IFLA_TXQLEN])
3412 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3413 if (tb[IFLA_OPERSTATE])
93b2d4a2 3414 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
e7199288
PE
3415 if (tb[IFLA_LINKMODE])
3416 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
ffa934f1
PM
3417 if (tb[IFLA_GROUP])
3418 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
46e6b992
SH
3419 if (tb[IFLA_GSO_MAX_SIZE])
3420 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3421 if (tb[IFLA_GSO_MAX_SEGS])
6d872df3 3422 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
eac1b93c
CL
3423 if (tb[IFLA_GRO_MAX_SIZE])
3424 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
9eefedd5
XL
3425 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3426 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3427 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3428 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
e7199288
PE
3429
3430 return dev;
e7199288 3431}
e0d087af 3432EXPORT_SYMBOL(rtnl_create_link);
e7199288 3433
90f62cf3
EB
3434static int rtnl_group_changelink(const struct sk_buff *skb,
3435 struct net *net, int group,
e7ed828f 3436 struct ifinfomsg *ifm,
ddf9f970 3437 struct netlink_ext_ack *extack,
e7ed828f
VD
3438 struct nlattr **tb)
3439{
d079535d 3440 struct net_device *dev, *aux;
e7ed828f
VD
3441 int err;
3442
d079535d 3443 for_each_netdev_safe(net, dev, aux) {
e7ed828f 3444 if (dev->group == group) {
89da780a
XL
3445 err = validate_linkmsg(dev, tb, extack);
3446 if (err < 0)
3447 return err;
5ea08b52 3448 err = do_setlink(skb, dev, ifm, extack, tb, 0);
e7ed828f
VD
3449 if (err < 0)
3450 return err;
3451 }
3452 }
3453
3454 return 0;
3455}
3456
63105e83
JK
3457static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3458 const struct rtnl_link_ops *ops,
d88e136c 3459 const struct nlmsghdr *nlh,
63105e83 3460 struct nlattr **tb, struct nlattr **data,
02839cc8
JK
3461 struct netlink_ext_ack *extack)
3462{
3463 unsigned char name_assign_type = NET_NAME_USER;
3464 struct net *net = sock_net(skb->sk);
d88e136c 3465 u32 portid = NETLINK_CB(skb).portid;
02839cc8
JK
3466 struct net *dest_net, *link_net;
3467 struct net_device *dev;
3468 char ifname[IFNAMSIZ];
3469 int err;
3470
3471 if (!ops->alloc && !ops->setup)
3472 return -EOPNOTSUPP;
3473
3474 if (tb[IFLA_IFNAME]) {
3475 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3476 } else {
3477 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3478 name_assign_type = NET_NAME_ENUM;
3479 }
3480
3481 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3482 if (IS_ERR(dest_net))
3483 return PTR_ERR(dest_net);
3484
3485 if (tb[IFLA_LINK_NETNSID]) {
3486 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3487
3488 link_net = get_net_ns_by_id(dest_net, id);
3489 if (!link_net) {
3490 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3491 err = -EINVAL;
3492 goto out;
3493 }
3494 err = -EPERM;
3495 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3496 goto out;
3497 } else {
3498 link_net = NULL;
3499 }
3500
3501 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3502 name_assign_type, ops, tb, extack);
3503 if (IS_ERR(dev)) {
3504 err = PTR_ERR(dev);
3505 goto out;
3506 }
3507
3508 dev->ifindex = ifm->ifi_index;
3509
3510 if (ops->newlink)
3511 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3512 else
3513 err = register_netdevice(dev);
3514 if (err < 0) {
3515 free_netdev(dev);
3516 goto out;
3517 }
3518
d88e136c 3519 err = rtnl_configure_link(dev, ifm, portid, nlh);
02839cc8
JK
3520 if (err < 0)
3521 goto out_unregister;
3522 if (link_net) {
3523 err = dev_change_net_namespace(dev, dest_net, ifname);
3524 if (err < 0)
3525 goto out_unregister;
3526 }
3527 if (tb[IFLA_MASTER]) {
3528 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3529 if (err)
3530 goto out_unregister;
3531 }
3532out:
3533 if (link_net)
3534 put_net(link_net);
3535 put_net(dest_net);
3536 return err;
3537out_unregister:
3538 if (ops->newlink) {
3539 LIST_HEAD(list_kill);
3540
3541 ops->dellink(dev, &list_kill);
3542 unregister_netdevice_many(&list_kill);
3543 } else {
3544 unregister_netdevice(dev);
3545 }
3546 goto out;
3547}
63105e83 3548
c92bf26c
JK
3549struct rtnl_newlink_tbs {
3550 struct nlattr *tb[IFLA_MAX + 1];
3551 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3552 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3553};
3554
a2939745 3555static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
c92bf26c
JK
3556 struct rtnl_newlink_tbs *tbs,
3557 struct netlink_ext_ack *extack)
38f7b870 3558{
420d0318 3559 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
c92bf26c 3560 struct nlattr ** const tb = tbs->tb;
c6f6f244
ED
3561 const struct rtnl_link_ops *m_ops;
3562 struct net_device *master_dev;
3b1e0a65 3563 struct net *net = sock_net(skb->sk);
38f7b870 3564 const struct rtnl_link_ops *ops;
420d0318
JK
3565 struct nlattr **slave_data;
3566 char kind[MODULE_NAME_LEN];
38f7b870
PM
3567 struct net_device *dev;
3568 struct ifinfomsg *ifm;
420d0318 3569 struct nlattr **data;
ef2a7c90 3570 bool link_specified;
38f7b870
PM
3571 int err;
3572
95a5afca 3573#ifdef CONFIG_MODULES
38f7b870 3574replay:
8072f085 3575#endif
8cb08174
JB
3576 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3577 ifla_policy, extack);
38f7b870
PM
3578 if (err < 0)
3579 return err;
3580
4ff66cae
CB
3581 err = rtnl_ensure_unique_netns(tb, extack, false);
3582 if (err < 0)
3583 return err;
3584
38f7b870 3585 ifm = nlmsg_data(nlh);
ef2a7c90
FF
3586 if (ifm->ifi_index > 0) {
3587 link_specified = true;
881d966b 3588 dev = __dev_get_by_index(net, ifm->ifi_index);
30188bd7
IS
3589 } else if (ifm->ifi_index < 0) {
3590 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3591 return -EINVAL;
ef2a7c90
FF
3592 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3593 link_specified = true;
5ea08b52 3594 dev = rtnl_dev_get(net, tb);
ef2a7c90
FF
3595 } else {
3596 link_specified = false;
7af12cba 3597 dev = NULL;
ef2a7c90 3598 }
38f7b870 3599
c6f6f244
ED
3600 master_dev = NULL;
3601 m_ops = NULL;
ba7d49b1
JP
3602 if (dev) {
3603 master_dev = netdev_master_upper_dev_get(dev);
3604 if (master_dev)
3605 m_ops = master_dev->rtnl_link_ops;
3606 }
3607
38f7b870 3608 if (tb[IFLA_LINKINFO]) {
8cb08174
JB
3609 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3610 tb[IFLA_LINKINFO],
3611 ifla_info_policy, NULL);
38f7b870
PM
3612 if (err < 0)
3613 return err;
3614 } else
3615 memset(linkinfo, 0, sizeof(linkinfo));
3616
3617 if (linkinfo[IFLA_INFO_KIND]) {
872f6903 3618 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
38f7b870
PM
3619 ops = rtnl_link_ops_get(kind);
3620 } else {
3621 kind[0] = '\0';
3622 ops = NULL;
3623 }
3624
420d0318
JK
3625 data = NULL;
3626 if (ops) {
3627 if (ops->maxtype > RTNL_MAX_TYPE)
3628 return -EINVAL;
ccf8dbcd 3629
420d0318 3630 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
c92bf26c 3631 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
8cb08174
JB
3632 linkinfo[IFLA_INFO_DATA],
3633 ops->policy, extack);
420d0318
JK
3634 if (err < 0)
3635 return err;
c92bf26c 3636 data = tbs->attr;
38f7b870 3637 }
420d0318
JK
3638 if (ops->validate) {
3639 err = ops->validate(tb, data, extack);
3640 if (err < 0)
3641 return err;
3642 }
3643 }
38f7b870 3644
420d0318
JK
3645 slave_data = NULL;
3646 if (m_ops) {
3647 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3648 return -EINVAL;
ccf8dbcd 3649
420d0318
JK
3650 if (m_ops->slave_maxtype &&
3651 linkinfo[IFLA_INFO_SLAVE_DATA]) {
c92bf26c 3652 err = nla_parse_nested_deprecated(tbs->slave_attr,
8cb08174
JB
3653 m_ops->slave_maxtype,
3654 linkinfo[IFLA_INFO_SLAVE_DATA],
3655 m_ops->slave_policy,
3656 extack);
420d0318
JK
3657 if (err < 0)
3658 return err;
c92bf26c 3659 slave_data = tbs->slave_attr;
ba7d49b1 3660 }
420d0318 3661 }
ba7d49b1 3662
420d0318
JK
3663 if (dev) {
3664 int status = 0;
38f7b870 3665
420d0318
JK
3666 if (nlh->nlmsg_flags & NLM_F_EXCL)
3667 return -EEXIST;
3668 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3669 return -EOPNOTSUPP;
38f7b870 3670
89da780a
XL
3671 err = validate_linkmsg(dev, tb, extack);
3672 if (err < 0)
3673 return err;
3674
420d0318
JK
3675 if (linkinfo[IFLA_INFO_DATA]) {
3676 if (!ops || ops != dev->rtnl_link_ops ||
3677 !ops->changelink)
3678 return -EOPNOTSUPP;
38f7b870 3679
420d0318
JK
3680 err = ops->changelink(dev, tb, data, extack);
3681 if (err < 0)
3682 return err;
3683 status |= DO_SETLINK_NOTIFY;
3684 }
ba7d49b1 3685
420d0318
JK
3686 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3687 if (!m_ops || !m_ops->slave_changelink)
3688 return -EOPNOTSUPP;
ba7d49b1 3689
420d0318
JK
3690 err = m_ops->slave_changelink(master_dev, dev, tb,
3691 slave_data, extack);
3692 if (err < 0)
3693 return err;
3694 status |= DO_SETLINK_NOTIFY;
38f7b870
PM
3695 }
3696
5ea08b52 3697 return do_setlink(skb, dev, ifm, extack, tb, status);
420d0318
JK
3698 }
3699
3700 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
ef2a7c90
FF
3701 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3702 * or it's for a group
3703 */
3704 if (link_specified)
3705 return -ENODEV;
3706 if (tb[IFLA_GROUP])
420d0318 3707 return rtnl_group_changelink(skb, net,
ffa934f1 3708 nla_get_u32(tb[IFLA_GROUP]),
ddf9f970 3709 ifm, extack, tb);
6f37c9f9 3710 return -ENODEV;
420d0318 3711 }
38f7b870 3712
420d0318
JK
3713 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3714 return -EOPNOTSUPP;
38f7b870 3715
420d0318 3716 if (!ops) {
95a5afca 3717#ifdef CONFIG_MODULES
420d0318
JK
3718 if (kind[0]) {
3719 __rtnl_unlock();
3720 request_module("rtnl-link-%s", kind);
3721 rtnl_lock();
3722 ops = rtnl_link_ops_get(kind);
3723 if (ops)
3724 goto replay;
38f7b870 3725 }
420d0318
JK
3726#endif
3727 NL_SET_ERR_MSG(extack, "Unknown device type");
3728 return -EOPNOTSUPP;
3729 }
38f7b870 3730
d88e136c 3731 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
63105e83
JK
3732}
3733
a2939745
JK
3734static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3735 struct netlink_ext_ack *extack)
3736{
c92bf26c 3737 struct rtnl_newlink_tbs *tbs;
a2939745
JK
3738 int ret;
3739
c92bf26c
JK
3740 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3741 if (!tbs)
a2939745
JK
3742 return -ENOMEM;
3743
c92bf26c
JK
3744 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3745 kfree(tbs);
a2939745
JK
3746 return ret;
3747}
3748
9b3757b0
JK
3749static int rtnl_valid_getlink_req(struct sk_buff *skb,
3750 const struct nlmsghdr *nlh,
3751 struct nlattr **tb,
3752 struct netlink_ext_ack *extack)
3753{
3754 struct ifinfomsg *ifm;
3755 int i, err;
3756
3757 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3758 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3759 return -EINVAL;
3760 }
3761
3762 if (!netlink_strict_get_check(skb))
8cb08174
JB
3763 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3764 ifla_policy, extack);
9b3757b0
JK
3765
3766 ifm = nlmsg_data(nlh);
3767 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3768 ifm->ifi_change) {
3769 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3770 return -EINVAL;
3771 }
3772
8cb08174
JB
3773 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3774 ifla_policy, extack);
9b3757b0
JK
3775 if (err)
3776 return err;
3777
3778 for (i = 0; i <= IFLA_MAX; i++) {
3779 if (!tb[i])
3780 continue;
3781
3782 switch (i) {
3783 case IFLA_IFNAME:
76c9ac0e 3784 case IFLA_ALT_IFNAME:
9b3757b0
JK
3785 case IFLA_EXT_MASK:
3786 case IFLA_TARGET_NETNSID:
3787 break;
3788 default:
3789 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3790 return -EINVAL;
3791 }
3792 }
3793
3794 return 0;
3795}
3796
c21ef3e3
DA
3797static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3798 struct netlink_ext_ack *extack)
711e2c33 3799{
3b1e0a65 3800 struct net *net = sock_net(skb->sk);
79e1ad14 3801 struct net *tgt_net = net;
b60c5115
TG
3802 struct ifinfomsg *ifm;
3803 struct nlattr *tb[IFLA_MAX+1];
3804 struct net_device *dev = NULL;
3805 struct sk_buff *nskb;
79e1ad14 3806 int netnsid = -1;
339bf98f 3807 int err;
115c9b81 3808 u32 ext_filter_mask = 0;
711e2c33 3809
9b3757b0 3810 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
b60c5115 3811 if (err < 0)
9918f230 3812 return err;
b60c5115 3813
4ff66cae
CB
3814 err = rtnl_ensure_unique_netns(tb, extack, true);
3815 if (err < 0)
3816 return err;
3817
7e4a8d5a
CB
3818 if (tb[IFLA_TARGET_NETNSID]) {
3819 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
c383edc4 3820 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
79e1ad14
JB
3821 if (IS_ERR(tgt_net))
3822 return PTR_ERR(tgt_net);
3823 }
3824
115c9b81
GR
3825 if (tb[IFLA_EXT_MASK])
3826 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3827
79e1ad14 3828 err = -EINVAL;
b60c5115 3829 ifm = nlmsg_data(nlh);
a3d12891 3830 if (ifm->ifi_index > 0)
79e1ad14 3831 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
76c9ac0e 3832 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3833 dev = rtnl_dev_get(tgt_net, tb);
a3d12891 3834 else
79e1ad14 3835 goto out;
711e2c33 3836
79e1ad14 3837 err = -ENODEV;
a3d12891 3838 if (dev == NULL)
79e1ad14 3839 goto out;
a3d12891 3840
79e1ad14 3841 err = -ENOBUFS;
ac40916a 3842 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
a3d12891 3843 if (nskb == NULL)
79e1ad14 3844 goto out;
b60c5115 3845
facd15df
JB
3846 /* Synchronize the carrier state so we don't report a state
3847 * that we're not actually going to honour immediately; if
3848 * the driver just did a carrier off->on transition, we can
3849 * only TX if link watch work has run, but without this we'd
3850 * already report carrier on, even if it doesn't work yet.
3851 */
3852 linkwatch_sync_dev(dev);
3853
79e1ad14
JB
3854 err = rtnl_fill_ifinfo(nskb, dev, net,
3855 RTM_NEWLINK, NETLINK_CB(skb).portid,
3856 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
d4e4fdf9 3857 0, NULL, 0, netnsid, GFP_KERNEL);
26932566
PM
3858 if (err < 0) {
3859 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3860 WARN_ON(err == -EMSGSIZE);
3861 kfree_skb(nskb);
a3d12891 3862 } else
15e47304 3863 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
79e1ad14
JB
3864out:
3865 if (netnsid >= 0)
3866 put_net(tgt_net);
711e2c33 3867
b60c5115 3868 return err;
711e2c33 3869}
711e2c33 3870
36fbf1e5
JP
3871static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3872 bool *changed, struct netlink_ext_ack *extack)
3873{
3874 char *alt_ifname;
155fb43b 3875 size_t size;
36fbf1e5
JP
3876 int err;
3877
3878 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3879 if (err)
3880 return err;
3881
155fb43b
JK
3882 if (cmd == RTM_NEWLINKPROP) {
3883 size = rtnl_prop_list_size(dev);
3884 size += nla_total_size(ALTIFNAMSIZ);
3885 if (size >= U16_MAX) {
3886 NL_SET_ERR_MSG(extack,
3887 "effective property list too long");
3888 return -EINVAL;
3889 }
3890 }
3891
5d26cff5 3892 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
44bfa9c5
ED
3893 if (!alt_ifname)
3894 return -ENOMEM;
3895
36fbf1e5 3896 if (cmd == RTM_NEWLINKPROP) {
36fbf1e5 3897 err = netdev_name_node_alt_create(dev, alt_ifname);
44bfa9c5
ED
3898 if (!err)
3899 alt_ifname = NULL;
36fbf1e5
JP
3900 } else if (cmd == RTM_DELLINKPROP) {
3901 err = netdev_name_node_alt_destroy(dev, alt_ifname);
36fbf1e5 3902 } else {
44bfa9c5
ED
3903 WARN_ON_ONCE(1);
3904 err = -EINVAL;
36fbf1e5
JP
3905 }
3906
44bfa9c5
ED
3907 kfree(alt_ifname);
3908 if (!err)
3909 *changed = true;
3910 return err;
36fbf1e5
JP
3911}
3912
3913static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3914 struct netlink_ext_ack *extack)
3915{
3916 struct net *net = sock_net(skb->sk);
3917 struct nlattr *tb[IFLA_MAX + 1];
3918 struct net_device *dev;
3919 struct ifinfomsg *ifm;
3920 bool changed = false;
3921 struct nlattr *attr;
3922 int err, rem;
3923
3924 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3925 if (err)
3926 return err;
3927
3928 err = rtnl_ensure_unique_netns(tb, extack, true);
3929 if (err)
3930 return err;
3931
3932 ifm = nlmsg_data(nlh);
cc6090e9 3933 if (ifm->ifi_index > 0)
36fbf1e5 3934 dev = __dev_get_by_index(net, ifm->ifi_index);
76c9ac0e 3935 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
5ea08b52 3936 dev = rtnl_dev_get(net, tb);
cc6090e9 3937 else
36fbf1e5 3938 return -EINVAL;
36fbf1e5
JP
3939
3940 if (!dev)
3941 return -ENODEV;
3942
3943 if (!tb[IFLA_PROP_LIST])
3944 return 0;
3945
3946 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3947 switch (nla_type(attr)) {
3948 case IFLA_ALT_IFNAME:
3949 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3950 if (err)
3951 return err;
3952 break;
3953 }
3954 }
3955
3956 if (changed)
3957 netdev_state_change(dev);
3958 return 0;
3959}
3960
3961static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3962 struct netlink_ext_ack *extack)
3963{
3964 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3965}
3966
3967static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3968 struct netlink_ext_ack *extack)
3969{
3970 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3971}
3972
ebfe3c51 3973static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
c7ac8679 3974{
115c9b81 3975 struct net *net = sock_net(skb->sk);
ebfe3c51 3976 size_t min_ifinfo_dump_size = 0;
115c9b81
GR
3977 struct nlattr *tb[IFLA_MAX+1];
3978 u32 ext_filter_mask = 0;
ebfe3c51 3979 struct net_device *dev;
e5eca6d4
MS
3980 int hdrlen;
3981
3982 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3983 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3984 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
115c9b81 3985
8cb08174 3986 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
a4b64fbe
ED
3987 if (tb[IFLA_EXT_MASK])
3988 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3989 }
115c9b81
GR
3990
3991 if (!ext_filter_mask)
3992 return NLMSG_GOODSIZE;
3993 /*
3994 * traverse the list of net devices and compute the minimum
3995 * buffer size based upon the filter mask.
3996 */
6853dd48
FW
3997 rcu_read_lock();
3998 for_each_netdev_rcu(net, dev) {
ebfe3c51
DZ
3999 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
4000 if_nlmsg_size(dev, ext_filter_mask));
115c9b81 4001 }
6853dd48 4002 rcu_read_unlock();
115c9b81 4003
93af2056 4004 return nlmsg_total_size(min_ifinfo_dump_size);
c7ac8679
GR
4005}
4006
42bad1da 4007static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1da177e4
LT
4008{
4009 int idx;
4010 int s_idx = cb->family;
87ccbb1f 4011 int type = cb->nlh->nlmsg_type - RTM_BASE;
c63586dc 4012 int ret = 0;
1da177e4
LT
4013
4014 if (s_idx == 0)
4015 s_idx = 1;
6853dd48 4016
25239cee 4017 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
51e13685 4018 struct rtnl_link __rcu **tab;
addf9b90 4019 struct rtnl_link *link;
6853dd48
FW
4020 rtnl_dumpit_func dumpit;
4021
1da177e4
LT
4022 if (idx < s_idx || idx == PF_PACKET)
4023 continue;
6853dd48 4024
addf9b90 4025 if (type < 0 || type >= RTM_NR_MSGTYPES)
1da177e4 4026 continue;
6853dd48 4027
addf9b90
FW
4028 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4029 if (!tab)
4030 continue;
4031
51e13685 4032 link = rcu_dereference_rtnl(tab[type]);
addf9b90
FW
4033 if (!link)
4034 continue;
4035
4036 dumpit = link->dumpit;
6853dd48
FW
4037 if (!dumpit)
4038 continue;
4039
0465277f 4040 if (idx > s_idx) {
1da177e4 4041 memset(&cb->args[0], 0, sizeof(cb->args));
0465277f
ND
4042 cb->prev_seq = 0;
4043 cb->seq = 0;
4044 }
c63586dc 4045 ret = dumpit(skb, cb);
5e1acb4a 4046 if (ret)
1da177e4
LT
4047 break;
4048 }
4049 cb->family = idx;
4050
c63586dc 4051 return skb->len ? : ret;
1da177e4
LT
4052}
4053
395eea6c 4054struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3d3ea5af 4055 unsigned int change,
38e01b30 4056 u32 event, gfp_t flags, int *new_nsid,
59d3efd2
MW
4057 int new_ifindex, u32 portid,
4058 const struct nlmsghdr *nlh)
1da177e4 4059{
c346dca1 4060 struct net *net = dev_net(dev);
1da177e4 4061 struct sk_buff *skb;
0ec6d3f4 4062 int err = -ENOBUFS;
59d3efd2 4063 u32 seq = 0;
1da177e4 4064
50af5969 4065 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
0ec6d3f4
TG
4066 if (skb == NULL)
4067 goto errout;
1da177e4 4068
59d3efd2
MW
4069 if (nlmsg_report(nlh))
4070 seq = nlmsg_seq(nlh);
4071 else
4072 portid = 0;
4073
79e1ad14 4074 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
1d997f10 4075 type, portid, seq, change, 0, 0, event,
d4e4fdf9 4076 new_nsid, new_ifindex, -1, flags);
26932566
PM
4077 if (err < 0) {
4078 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
4079 WARN_ON(err == -EMSGSIZE);
4080 kfree_skb(skb);
4081 goto errout;
4082 }
395eea6c 4083 return skb;
0ec6d3f4
TG
4084errout:
4085 if (err < 0)
4b3da706 4086 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
395eea6c
MB
4087 return NULL;
4088}
4089
1d997f10
HL
4090void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4091 u32 portid, const struct nlmsghdr *nlh)
395eea6c
MB
4092{
4093 struct net *net = dev_net(dev);
4094
1d997f10 4095 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
395eea6c
MB
4096}
4097
3d3ea5af
VY
4098static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4099 unsigned int change, u32 event,
1d997f10
HL
4100 gfp_t flags, int *new_nsid, int new_ifindex,
4101 u32 portid, const struct nlmsghdr *nlh)
395eea6c
MB
4102{
4103 struct sk_buff *skb;
4104
ed2a80ab
ND
4105 if (dev->reg_state != NETREG_REGISTERED)
4106 return;
4107
38e01b30 4108 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
59d3efd2 4109 new_ifindex, portid, nlh);
395eea6c 4110 if (skb)
1d997f10 4111 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
1da177e4 4112}
3d3ea5af
VY
4113
4114void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
1d997f10 4115 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
3d3ea5af 4116{
38e01b30 4117 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
1d997f10 4118 NULL, 0, portid, nlh);
3d3ea5af 4119}
1da177e4 4120
6621dd29 4121void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
38e01b30 4122 gfp_t flags, int *new_nsid, int new_ifindex)
6621dd29
ND
4123{
4124 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
1d997f10 4125 new_nsid, new_ifindex, 0, NULL);
6621dd29
ND
4126}
4127
d83b0603
JF
4128static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4129 struct net_device *dev,
1e53d5bb 4130 u8 *addr, u16 vid, u32 pid, u32 seq,
1c104a6b 4131 int type, unsigned int flags,
b3379041 4132 int nlflags, u16 ndm_state)
d83b0603
JF
4133{
4134 struct nlmsghdr *nlh;
4135 struct ndmsg *ndm;
4136
1c104a6b 4137 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
d83b0603
JF
4138 if (!nlh)
4139 return -EMSGSIZE;
4140
4141 ndm = nlmsg_data(nlh);
4142 ndm->ndm_family = AF_BRIDGE;
4143 ndm->ndm_pad1 = 0;
4144 ndm->ndm_pad2 = 0;
4145 ndm->ndm_flags = flags;
4146 ndm->ndm_type = 0;
4147 ndm->ndm_ifindex = dev->ifindex;
b3379041 4148 ndm->ndm_state = ndm_state;
d83b0603 4149
aa540695 4150 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
d83b0603 4151 goto nla_put_failure;
1e53d5bb
HS
4152 if (vid)
4153 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4154 goto nla_put_failure;
d83b0603 4155
053c095a
JB
4156 nlmsg_end(skb, nlh);
4157 return 0;
d83b0603
JF
4158
4159nla_put_failure:
4160 nlmsg_cancel(skb, nlh);
4161 return -EMSGSIZE;
4162}
4163
aa540695 4164static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
3ff661c3 4165{
f82ef3e1 4166 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
aa540695 4167 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
f82ef3e1
SD
4168 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4169 0;
3ff661c3
JF
4170}
4171
b3379041
HS
4172static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4173 u16 ndm_state)
3ff661c3
JF
4174{
4175 struct net *net = dev_net(dev);
4176 struct sk_buff *skb;
4177 int err = -ENOBUFS;
4178
aa540695 4179 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
3ff661c3
JF
4180 if (!skb)
4181 goto errout;
4182
1e53d5bb 4183 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
b3379041 4184 0, 0, type, NTF_SELF, 0, ndm_state);
3ff661c3
JF
4185 if (err < 0) {
4186 kfree_skb(skb);
4187 goto errout;
4188 }
4189
4190 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4191 return;
4192errout:
4193 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4194}
4195
a986967e 4196/*
090096bf
VY
4197 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4198 */
4199int ndo_dflt_fdb_add(struct ndmsg *ndm,
4200 struct nlattr *tb[],
4201 struct net_device *dev,
f6f6424b 4202 const unsigned char *addr, u16 vid,
090096bf
VY
4203 u16 flags)
4204{
4205 int err = -EINVAL;
4206
4207 /* If aging addresses are supported device will need to
4208 * implement its own handler for this.
4209 */
4210 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
78ecc890 4211 netdev_info(dev, "default FDB implementation only supports local addresses\n");
090096bf
VY
4212 return err;
4213 }
4214
a35ec8e3
HS
4215 if (tb[NDA_FLAGS_EXT]) {
4216 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4217 return err;
4218 }
4219
65891fea 4220 if (vid) {
23ac0b42 4221 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
65891fea
OG
4222 return err;
4223 }
4224
090096bf
VY
4225 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4226 err = dev_uc_add_excl(dev, addr);
4227 else if (is_multicast_ether_addr(addr))
4228 err = dev_mc_add_excl(dev, addr);
4229
4230 /* Only return duplicate errors if NLM_F_EXCL is set */
4231 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4232 err = 0;
4233
4234 return err;
4235}
4236EXPORT_SYMBOL(ndo_dflt_fdb_add);
4237
b88d12e4
FW
4238static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4239 struct netlink_ext_ack *extack)
f6f6424b
JP
4240{
4241 u16 vid = 0;
4242
4243 if (vlan_attr) {
4244 if (nla_len(vlan_attr) != sizeof(u16)) {
b88d12e4 4245 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
f6f6424b
JP
4246 return -EINVAL;
4247 }
4248
4249 vid = nla_get_u16(vlan_attr);
4250
4251 if (!vid || vid >= VLAN_VID_MASK) {
b88d12e4 4252 NL_SET_ERR_MSG(extack, "invalid vlan id");
f6f6424b
JP
4253 return -EINVAL;
4254 }
4255 }
4256 *p_vid = vid;
4257 return 0;
4258}
4259
c21ef3e3
DA
4260static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4261 struct netlink_ext_ack *extack)
77162022
JF
4262{
4263 struct net *net = sock_net(skb->sk);
77162022
JF
4264 struct ndmsg *ndm;
4265 struct nlattr *tb[NDA_MAX+1];
4266 struct net_device *dev;
4267 u8 *addr;
f6f6424b 4268 u16 vid;
77162022
JF
4269 int err;
4270
8cb08174
JB
4271 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4272 extack);
77162022
JF
4273 if (err < 0)
4274 return err;
4275
4276 ndm = nlmsg_data(nlh);
4277 if (ndm->ndm_ifindex == 0) {
b88d12e4 4278 NL_SET_ERR_MSG(extack, "invalid ifindex");
77162022
JF
4279 return -EINVAL;
4280 }
4281
4282 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4283 if (dev == NULL) {
b88d12e4 4284 NL_SET_ERR_MSG(extack, "unknown ifindex");
77162022
JF
4285 return -ENODEV;
4286 }
4287
4288 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
b88d12e4 4289 NL_SET_ERR_MSG(extack, "invalid address");
77162022
JF
4290 return -EINVAL;
4291 }
4292
da715775
IS
4293 if (dev->type != ARPHRD_ETHER) {
4294 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4295 return -EINVAL;
4296 }
4297
77162022 4298 addr = nla_data(tb[NDA_LLADDR]);
77162022 4299
b88d12e4 4300 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
f6f6424b
JP
4301 if (err)
4302 return err;
4303
77162022
JF
4304 err = -EOPNOTSUPP;
4305
4306 /* Support fdb on master device the net/bridge default case */
4307 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2e92a2d0 4308 netif_is_bridge_port(dev)) {
898e5061
JP
4309 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4310 const struct net_device_ops *ops = br_dev->netdev_ops;
4311
f6f6424b 4312 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
87b0984e 4313 nlh->nlmsg_flags, extack);
77162022
JF
4314 if (err)
4315 goto out;
4316 else
4317 ndm->ndm_flags &= ~NTF_MASTER;
4318 }
4319
4320 /* Embedded bridge, macvlan, and any other device support */
090096bf
VY
4321 if ((ndm->ndm_flags & NTF_SELF)) {
4322 if (dev->netdev_ops->ndo_fdb_add)
4323 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
f6f6424b 4324 vid,
87b0984e
PM
4325 nlh->nlmsg_flags,
4326 extack);
090096bf 4327 else
f6f6424b 4328 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
090096bf 4329 nlh->nlmsg_flags);
77162022 4330
3ff661c3 4331 if (!err) {
b3379041
HS
4332 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4333 ndm->ndm_state);
77162022 4334 ndm->ndm_flags &= ~NTF_SELF;
3ff661c3 4335 }
77162022
JF
4336 }
4337out:
4338 return err;
4339}
4340
a986967e 4341/*
090096bf
VY
4342 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4343 */
4344int ndo_dflt_fdb_del(struct ndmsg *ndm,
4345 struct nlattr *tb[],
4346 struct net_device *dev,
f6f6424b 4347 const unsigned char *addr, u16 vid)
090096bf 4348{
c8a89c4a 4349 int err = -EINVAL;
090096bf
VY
4350
4351 /* If aging addresses are supported device will need to
4352 * implement its own handler for this.
4353 */
64535993 4354 if (!(ndm->ndm_state & NUD_PERMANENT)) {
78ecc890 4355 netdev_info(dev, "default FDB implementation only supports local addresses\n");
c8a89c4a 4356 return err;
090096bf
VY
4357 }
4358
4359 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4360 err = dev_uc_del(dev, addr);
4361 else if (is_multicast_ether_addr(addr))
4362 err = dev_mc_del(dev, addr);
090096bf
VY
4363
4364 return err;
4365}
4366EXPORT_SYMBOL(ndo_dflt_fdb_del);
4367
c21ef3e3
DA
4368static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4369 struct netlink_ext_ack *extack)
77162022 4370{
9e834259 4371 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
77162022 4372 struct net *net = sock_net(skb->sk);
9e834259 4373 const struct net_device_ops *ops;
77162022 4374 struct ndmsg *ndm;
1690be63 4375 struct nlattr *tb[NDA_MAX+1];
77162022 4376 struct net_device *dev;
9e834259 4377 __u8 *addr = NULL;
7d311801 4378 int err;
f6f6424b 4379 u16 vid;
77162022 4380
90f62cf3 4381 if (!netlink_capable(skb, CAP_NET_ADMIN))
1690be63
VY
4382 return -EPERM;
4383
9e834259
NA
4384 if (!del_bulk) {
4385 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4386 NULL, extack);
4387 } else {
38985e8c
AC
4388 /* For bulk delete, the drivers will parse the message with
4389 * policy.
4390 */
4391 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
9e834259 4392 }
1690be63
VY
4393 if (err < 0)
4394 return err;
77162022
JF
4395
4396 ndm = nlmsg_data(nlh);
4397 if (ndm->ndm_ifindex == 0) {
b88d12e4 4398 NL_SET_ERR_MSG(extack, "invalid ifindex");
77162022
JF
4399 return -EINVAL;
4400 }
4401
4402 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4403 if (dev == NULL) {
b88d12e4 4404 NL_SET_ERR_MSG(extack, "unknown ifindex");
77162022
JF
4405 return -ENODEV;
4406 }
4407
9e834259
NA
4408 if (!del_bulk) {
4409 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4410 NL_SET_ERR_MSG(extack, "invalid address");
4411 return -EINVAL;
4412 }
4413 addr = nla_data(tb[NDA_LLADDR]);
38985e8c
AC
4414
4415 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4416 if (err)
4417 return err;
1690be63
VY
4418 }
4419
da715775
IS
4420 if (dev->type != ARPHRD_ETHER) {
4421 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4422 return -EINVAL;
4423 }
4424
77162022
JF
4425 err = -EOPNOTSUPP;
4426
4427 /* Support fdb on master device the net/bridge default case */
4428 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2e92a2d0 4429 netif_is_bridge_port(dev)) {
898e5061 4430 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
77162022 4431
9e834259
NA
4432 ops = br_dev->netdev_ops;
4433 if (!del_bulk) {
4434 if (ops->ndo_fdb_del)
ca4567f1 4435 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
9e834259
NA
4436 } else {
4437 if (ops->ndo_fdb_del_bulk)
38985e8c 4438 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
9e834259 4439 }
77162022
JF
4440
4441 if (err)
4442 goto out;
4443 else
4444 ndm->ndm_flags &= ~NTF_MASTER;
4445 }
4446
4447 /* Embedded bridge, macvlan, and any other device support */
090096bf 4448 if (ndm->ndm_flags & NTF_SELF) {
9e834259
NA
4449 ops = dev->netdev_ops;
4450 if (!del_bulk) {
4451 if (ops->ndo_fdb_del)
ca4567f1 4452 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
9e834259
NA
4453 else
4454 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4455 } else {
4456 /* in case err was cleared by NTF_MASTER call */
4457 err = -EOPNOTSUPP;
4458 if (ops->ndo_fdb_del_bulk)
38985e8c 4459 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
9e834259 4460 }
77162022 4461
3ff661c3 4462 if (!err) {
9e834259
NA
4463 if (!del_bulk)
4464 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4465 ndm->ndm_state);
77162022 4466 ndm->ndm_flags &= ~NTF_SELF;
3ff661c3 4467 }
77162022
JF
4468 }
4469out:
4470 return err;
4471}
4472
d83b0603
JF
4473static int nlmsg_populate_fdb(struct sk_buff *skb,
4474 struct netlink_callback *cb,
4475 struct net_device *dev,
4476 int *idx,
4477 struct netdev_hw_addr_list *list)
4478{
4479 struct netdev_hw_addr *ha;
4480 int err;
15e47304 4481 u32 portid, seq;
d83b0603 4482
15e47304 4483 portid = NETLINK_CB(cb->skb).portid;
d83b0603
JF
4484 seq = cb->nlh->nlmsg_seq;
4485
4486 list_for_each_entry(ha, &list->list, list) {
d297653d 4487 if (*idx < cb->args[2])
d83b0603
JF
4488 goto skip;
4489
1e53d5bb 4490 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
a7a558fe 4491 portid, seq,
1c104a6b 4492 RTM_NEWNEIGH, NTF_SELF,
b3379041 4493 NLM_F_MULTI, NUD_PERMANENT);
d83b0603
JF
4494 if (err < 0)
4495 return err;
4496skip:
4497 *idx += 1;
4498 }
4499 return 0;
4500}
4501
4502/**
2c53040f 4503 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
a986967e
BVA
4504 * @skb: socket buffer to store message in
4505 * @cb: netlink callback
d83b0603 4506 * @dev: netdevice
a986967e
BVA
4507 * @filter_dev: ignored
4508 * @idx: the number of FDB table entries dumped is added to *@idx
d83b0603
JF
4509 *
4510 * Default netdevice operation to dump the existing unicast address list.
91f3e7b1 4511 * Returns number of addresses from list put in skb.
d83b0603
JF
4512 */
4513int ndo_dflt_fdb_dump(struct sk_buff *skb,
4514 struct netlink_callback *cb,
4515 struct net_device *dev,
5d5eacb3 4516 struct net_device *filter_dev,
d297653d 4517 int *idx)
d83b0603
JF
4518{
4519 int err;
4520
68883893
ED
4521 if (dev->type != ARPHRD_ETHER)
4522 return -EINVAL;
4523
d83b0603 4524 netif_addr_lock_bh(dev);
d297653d 4525 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
d83b0603
JF
4526 if (err)
4527 goto out;
2934c9db 4528 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
d83b0603
JF
4529out:
4530 netif_addr_unlock_bh(dev);
d297653d 4531 return err;
d83b0603
JF
4532}
4533EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4534
8c6e137f
DA
4535static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4536 int *br_idx, int *brport_idx,
4537 struct netlink_ext_ack *extack)
4538{
4539 struct nlattr *tb[NDA_MAX + 1];
4540 struct ndmsg *ndm;
4541 int err, i;
4542
4543 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4544 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4545 return -EINVAL;
4546 }
4547
4548 ndm = nlmsg_data(nlh);
4549 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4550 ndm->ndm_flags || ndm->ndm_type) {
8b73018f 4551 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
8c6e137f
DA
4552 return -EINVAL;
4553 }
4554
8cb08174
JB
4555 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4556 NDA_MAX, NULL, extack);
8c6e137f
DA
4557 if (err < 0)
4558 return err;
4559
4560 *brport_idx = ndm->ndm_ifindex;
4561 for (i = 0; i <= NDA_MAX; ++i) {
4562 if (!tb[i])
4563 continue;
4564
4565 switch (i) {
4566 case NDA_IFINDEX:
4567 if (nla_len(tb[i]) != sizeof(u32)) {
4568 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4569 return -EINVAL;
4570 }
4571 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4572 break;
4573 case NDA_MASTER:
4574 if (nla_len(tb[i]) != sizeof(u32)) {
4575 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4576 return -EINVAL;
4577 }
4578 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4579 break;
4580 default:
4581 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4582 return -EINVAL;
4583 }
4584 }
4585
4586 return 0;
4587}
4588
8dfbda19
DA
4589static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4590 int *br_idx, int *brport_idx,
4591 struct netlink_ext_ack *extack)
77162022 4592{
5e6d2435 4593 struct nlattr *tb[IFLA_MAX+1];
8dfbda19 4594 int err;
5e6d2435 4595
bd961c9b
MFO
4596 /* A hack to preserve kernel<->userspace interface.
4597 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4598 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4599 * So, check for ndmsg with an optional u32 attribute (not used here).
4600 * Fortunately these sizes don't conflict with the size of ifinfomsg
4601 * with an optional attribute.
4602 */
8dfbda19
DA
4603 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4604 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
bd961c9b 4605 nla_attr_size(sizeof(u32)))) {
4565d7e5
DA
4606 struct ifinfomsg *ifm;
4607
8cb08174
JB
4608 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4609 tb, IFLA_MAX, ifla_policy,
4610 extack);
bd961c9b
MFO
4611 if (err < 0) {
4612 return -EINVAL;
4613 } else if (err == 0) {
4614 if (tb[IFLA_MASTER])
8dfbda19 4615 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
bd961c9b 4616 }
5e6d2435 4617
4565d7e5 4618 ifm = nlmsg_data(nlh);
8dfbda19 4619 *brport_idx = ifm->ifi_index;
bd961c9b 4620 }
8dfbda19
DA
4621 return 0;
4622}
4623
4624static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4625{
4626 struct net_device *dev;
4627 struct net_device *br_dev = NULL;
4628 const struct net_device_ops *ops = NULL;
4629 const struct net_device_ops *cops = NULL;
4630 struct net *net = sock_net(skb->sk);
4631 struct hlist_head *head;
4632 int brport_idx = 0;
4633 int br_idx = 0;
4634 int h, s_h;
4635 int idx = 0, s_idx;
4636 int err = 0;
4637 int fidx = 0;
4638
8c6e137f
DA
4639 if (cb->strict_check)
4640 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4641 cb->extack);
4642 else
4643 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4644 cb->extack);
8dfbda19
DA
4645 if (err < 0)
4646 return err;
5e6d2435
JHS
4647
4648 if (br_idx) {
4649 br_dev = __dev_get_by_index(net, br_idx);
4650 if (!br_dev)
4651 return -ENODEV;
4652
4653 ops = br_dev->netdev_ops;
5e6d2435
JHS
4654 }
4655
d297653d
RP
4656 s_h = cb->args[0];
4657 s_idx = cb->args[1];
5e6d2435 4658
d297653d
RP
4659 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4660 idx = 0;
4661 head = &net->dev_index_head[h];
4662 hlist_for_each_entry(dev, head, index_hlist) {
5e6d2435 4663
d297653d 4664 if (brport_idx && (dev->ifindex != brport_idx))
5e6d2435
JHS
4665 continue;
4666
d297653d 4667 if (!br_idx) { /* user did not specify a specific bridge */
2e92a2d0 4668 if (netif_is_bridge_port(dev)) {
d297653d
RP
4669 br_dev = netdev_master_upper_dev_get(dev);
4670 cops = br_dev->netdev_ops;
4671 }
4672 } else {
4673 if (dev != br_dev &&
2e92a2d0 4674 !netif_is_bridge_port(dev))
d297653d 4675 continue;
5e6d2435 4676
d297653d 4677 if (br_dev != netdev_master_upper_dev_get(dev) &&
254ec036 4678 !netif_is_bridge_master(dev))
d297653d
RP
4679 continue;
4680 cops = ops;
4681 }
77162022 4682
d297653d
RP
4683 if (idx < s_idx)
4684 goto cont;
77162022 4685
2e92a2d0 4686 if (netif_is_bridge_port(dev)) {
d297653d
RP
4687 if (cops && cops->ndo_fdb_dump) {
4688 err = cops->ndo_fdb_dump(skb, cb,
4689 br_dev, dev,
4690 &fidx);
4691 if (err == -EMSGSIZE)
4692 goto out;
4693 }
4694 }
5e6d2435 4695
d297653d
RP
4696 if (dev->netdev_ops->ndo_fdb_dump)
4697 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4698 dev, NULL,
4699 &fidx);
4700 else
4701 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4702 &fidx);
4703 if (err == -EMSGSIZE)
4704 goto out;
4705
4706 cops = NULL;
4707
4708 /* reset fdb offset to 0 for rest of the interfaces */
4709 cb->args[2] = 0;
4710 fidx = 0;
4711cont:
4712 idx++;
4713 }
77162022 4714 }
77162022 4715
d297653d
RP
4716out:
4717 cb->args[0] = h;
4718 cb->args[1] = idx;
4719 cb->args[2] = fidx;
4720
77162022
JF
4721 return skb->len;
4722}
4723
5b2f94b2
RP
4724static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4725 struct nlattr **tb, u8 *ndm_flags,
4726 int *br_idx, int *brport_idx, u8 **addr,
4727 u16 *vid, struct netlink_ext_ack *extack)
4728{
4729 struct ndmsg *ndm;
4730 int err, i;
4731
4732 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4733 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4734 return -EINVAL;
4735 }
4736
4737 ndm = nlmsg_data(nlh);
4738 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4739 ndm->ndm_type) {
4740 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4741 return -EINVAL;
4742 }
4743
4744 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4745 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4746 return -EINVAL;
4747 }
4748
8cb08174
JB
4749 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4750 NDA_MAX, nda_policy, extack);
5b2f94b2
RP
4751 if (err < 0)
4752 return err;
4753
4754 *ndm_flags = ndm->ndm_flags;
4755 *brport_idx = ndm->ndm_ifindex;
4756 for (i = 0; i <= NDA_MAX; ++i) {
4757 if (!tb[i])
4758 continue;
4759
4760 switch (i) {
4761 case NDA_MASTER:
4762 *br_idx = nla_get_u32(tb[i]);
4763 break;
4764 case NDA_LLADDR:
4765 if (nla_len(tb[i]) != ETH_ALEN) {
4766 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4767 return -EINVAL;
4768 }
4769 *addr = nla_data(tb[i]);
4770 break;
4771 case NDA_VLAN:
4772 err = fdb_vid_parse(tb[i], vid, extack);
4773 if (err)
4774 return err;
4775 break;
4776 case NDA_VNI:
4777 break;
4778 default:
4779 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4780 return -EINVAL;
4781 }
4782 }
4783
4784 return 0;
4785}
4786
4787static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4788 struct netlink_ext_ack *extack)
4789{
4790 struct net_device *dev = NULL, *br_dev = NULL;
4791 const struct net_device_ops *ops = NULL;
4792 struct net *net = sock_net(in_skb->sk);
4793 struct nlattr *tb[NDA_MAX + 1];
4794 struct sk_buff *skb;
4795 int brport_idx = 0;
4796 u8 ndm_flags = 0;
4797 int br_idx = 0;
4798 u8 *addr = NULL;
4799 u16 vid = 0;
4800 int err;
4801
4802 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4803 &brport_idx, &addr, &vid, extack);
4804 if (err < 0)
4805 return err;
4806
f989d03e
NA
4807 if (!addr) {
4808 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4809 return -EINVAL;
4810 }
4811
5b2f94b2
RP
4812 if (brport_idx) {
4813 dev = __dev_get_by_index(net, brport_idx);
4814 if (!dev) {
4815 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4816 return -ENODEV;
4817 }
4818 }
4819
4820 if (br_idx) {
4821 if (dev) {
4822 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4823 return -EINVAL;
4824 }
4825
4826 br_dev = __dev_get_by_index(net, br_idx);
4827 if (!br_dev) {
4828 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4829 return -EINVAL;
4830 }
4831 ops = br_dev->netdev_ops;
4832 }
4833
4834 if (dev) {
4835 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
2e92a2d0 4836 if (!netif_is_bridge_port(dev)) {
5b2f94b2
RP
4837 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4838 return -EINVAL;
4839 }
4840 br_dev = netdev_master_upper_dev_get(dev);
4841 if (!br_dev) {
4842 NL_SET_ERR_MSG(extack, "Master of device not found");
4843 return -EINVAL;
4844 }
4845 ops = br_dev->netdev_ops;
4846 } else {
4847 if (!(ndm_flags & NTF_SELF)) {
4848 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4849 return -EINVAL;
4850 }
4851 ops = dev->netdev_ops;
4852 }
4853 }
4854
4855 if (!br_dev && !dev) {
4856 NL_SET_ERR_MSG(extack, "No device specified");
4857 return -ENODEV;
4858 }
4859
4860 if (!ops || !ops->ndo_fdb_get) {
4861 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4862 return -EOPNOTSUPP;
4863 }
4864
4865 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4866 if (!skb)
4867 return -ENOBUFS;
4868
4869 if (br_dev)
4870 dev = br_dev;
4871 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4872 NETLINK_CB(in_skb).portid,
4873 nlh->nlmsg_seq, extack);
4874 if (err)
4875 goto out;
4876
4877 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4878out:
4879 kfree_skb(skb);
4880 return err;
4881}
4882
2c3c031c
SF
4883static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4884 unsigned int attrnum, unsigned int flag)
4885{
4886 if (mask & flag)
4887 return nla_put_u8(skb, attrnum, !!(flags & flag));
4888 return 0;
4889}
4890
815cccbf 4891int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2c3c031c 4892 struct net_device *dev, u16 mode,
7d4f8d87
SF
4893 u32 flags, u32 mask, int nlflags,
4894 u32 filter_mask,
4895 int (*vlan_fill)(struct sk_buff *skb,
4896 struct net_device *dev,
4897 u32 filter_mask))
815cccbf
JF
4898{
4899 struct nlmsghdr *nlh;
4900 struct ifinfomsg *ifm;
4901 struct nlattr *br_afspec;
2c3c031c 4902 struct nlattr *protinfo;
815cccbf 4903 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
898e5061 4904 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
7d4f8d87 4905 int err = 0;
815cccbf 4906
46c264da 4907 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
815cccbf
JF
4908 if (nlh == NULL)
4909 return -EMSGSIZE;
4910
4911 ifm = nlmsg_data(nlh);
4912 ifm->ifi_family = AF_BRIDGE;
4913 ifm->__ifi_pad = 0;
4914 ifm->ifi_type = dev->type;
4915 ifm->ifi_index = dev->ifindex;
4916 ifm->ifi_flags = dev_get_flags(dev);
4917 ifm->ifi_change = 0;
4918
4919
4920 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4921 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4922 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
898e5061
JP
4923 (br_dev &&
4924 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
815cccbf
JF
4925 (dev->addr_len &&
4926 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
a54acb3a
ND
4927 (dev->ifindex != dev_get_iflink(dev) &&
4928 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
815cccbf
JF
4929 goto nla_put_failure;
4930
ae0be8de 4931 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
815cccbf
JF
4932 if (!br_afspec)
4933 goto nla_put_failure;
4934
1d460b98 4935 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
815cccbf
JF
4936 nla_nest_cancel(skb, br_afspec);
4937 goto nla_put_failure;
4938 }
1d460b98
RP
4939
4940 if (mode != BRIDGE_MODE_UNDEF) {
4941 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4942 nla_nest_cancel(skb, br_afspec);
4943 goto nla_put_failure;
4944 }
4945 }
7d4f8d87
SF
4946 if (vlan_fill) {
4947 err = vlan_fill(skb, dev, filter_mask);
4948 if (err) {
4949 nla_nest_cancel(skb, br_afspec);
4950 goto nla_put_failure;
4951 }
4952 }
815cccbf
JF
4953 nla_nest_end(skb, br_afspec);
4954
ae0be8de 4955 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
2c3c031c
SF
4956 if (!protinfo)
4957 goto nla_put_failure;
4958
4959 if (brport_nla_put_flag(skb, flags, mask,
4960 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4961 brport_nla_put_flag(skb, flags, mask,
4962 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4963 brport_nla_put_flag(skb, flags, mask,
4964 IFLA_BRPORT_FAST_LEAVE,
4965 BR_MULTICAST_FAST_LEAVE) ||
4966 brport_nla_put_flag(skb, flags, mask,
4967 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4968 brport_nla_put_flag(skb, flags, mask,
4969 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4970 brport_nla_put_flag(skb, flags, mask,
4971 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4972 brport_nla_put_flag(skb, flags, mask,
4973 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4974 brport_nla_put_flag(skb, flags, mask,
583cb0b4
JW
4975 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4976 brport_nla_put_flag(skb, flags, mask,
4977 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4978 brport_nla_put_flag(skb, flags, mask,
4979 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
2c3c031c
SF
4980 nla_nest_cancel(skb, protinfo);
4981 goto nla_put_failure;
4982 }
4983
4984 nla_nest_end(skb, protinfo);
4985
053c095a
JB
4986 nlmsg_end(skb, nlh);
4987 return 0;
815cccbf
JF
4988nla_put_failure:
4989 nlmsg_cancel(skb, nlh);
7d4f8d87 4990 return err ? err : -EMSGSIZE;
815cccbf 4991}
7d4f8d87 4992EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
815cccbf 4993
2d011be8
DA
4994static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4995 bool strict_check, u32 *filter_mask,
4996 struct netlink_ext_ack *extack)
4997{
4998 struct nlattr *tb[IFLA_MAX+1];
4999 int err, i;
5000
5001 if (strict_check) {
5002 struct ifinfomsg *ifm;
5003
5004 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5005 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
5006 return -EINVAL;
5007 }
5008
5009 ifm = nlmsg_data(nlh);
5010 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5011 ifm->ifi_change || ifm->ifi_index) {
5012 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
5013 return -EINVAL;
5014 }
5015
8cb08174
JB
5016 err = nlmsg_parse_deprecated_strict(nlh,
5017 sizeof(struct ifinfomsg),
5018 tb, IFLA_MAX, ifla_policy,
5019 extack);
2d011be8 5020 } else {
8cb08174
JB
5021 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5022 tb, IFLA_MAX, ifla_policy,
5023 extack);
2d011be8
DA
5024 }
5025 if (err < 0)
5026 return err;
5027
5028 /* new attributes should only be added with strict checking */
5029 for (i = 0; i <= IFLA_MAX; ++i) {
5030 if (!tb[i])
5031 continue;
5032
5033 switch (i) {
5034 case IFLA_EXT_MASK:
5035 *filter_mask = nla_get_u32(tb[i]);
5036 break;
5037 default:
5038 if (strict_check) {
5039 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5040 return -EINVAL;
5041 }
5042 }
5043 }
5044
5045 return 0;
5046}
5047
e5a55a89
JF
5048static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5049{
2d011be8 5050 const struct nlmsghdr *nlh = cb->nlh;
e5a55a89
JF
5051 struct net *net = sock_net(skb->sk);
5052 struct net_device *dev;
5053 int idx = 0;
5054 u32 portid = NETLINK_CB(cb->skb).portid;
2d011be8 5055 u32 seq = nlh->nlmsg_seq;
6cbdceeb 5056 u32 filter_mask = 0;
d64f69b0 5057 int err;
6cbdceeb 5058
2d011be8
DA
5059 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5060 cb->extack);
5061 if (err < 0 && cb->strict_check)
5062 return err;
e5a55a89
JF
5063
5064 rcu_read_lock();
5065 for_each_netdev_rcu(net, dev) {
5066 const struct net_device_ops *ops = dev->netdev_ops;
898e5061 5067 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
e5a55a89 5068
898e5061 5069 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
d64f69b0
RP
5070 if (idx >= cb->args[0]) {
5071 err = br_dev->netdev_ops->ndo_bridge_getlink(
5072 skb, portid, seq, dev,
5073 filter_mask, NLM_F_MULTI);
f6c5775f
DA
5074 if (err < 0 && err != -EOPNOTSUPP) {
5075 if (likely(skb->len))
5076 break;
5077
5078 goto out_err;
5079 }
d64f69b0 5080 }
25b1e679 5081 idx++;
e5a55a89
JF
5082 }
5083
5084 if (ops->ndo_bridge_getlink) {
d64f69b0
RP
5085 if (idx >= cb->args[0]) {
5086 err = ops->ndo_bridge_getlink(skb, portid,
5087 seq, dev,
5088 filter_mask,
5089 NLM_F_MULTI);
f6c5775f
DA
5090 if (err < 0 && err != -EOPNOTSUPP) {
5091 if (likely(skb->len))
5092 break;
5093
5094 goto out_err;
5095 }
d64f69b0 5096 }
25b1e679 5097 idx++;
e5a55a89
JF
5098 }
5099 }
f6c5775f
DA
5100 err = skb->len;
5101out_err:
e5a55a89
JF
5102 rcu_read_unlock();
5103 cb->args[0] = idx;
5104
f6c5775f 5105 return err;
e5a55a89
JF
5106}
5107
2469ffd7
JF
5108static inline size_t bridge_nlmsg_size(void)
5109{
5110 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5111 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5112 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5113 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5114 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5115 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5116 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5117 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5118 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5119 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5120 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5121}
5122
02dba438 5123static int rtnl_bridge_notify(struct net_device *dev)
2469ffd7
JF
5124{
5125 struct net *net = dev_net(dev);
2469ffd7
JF
5126 struct sk_buff *skb;
5127 int err = -EOPNOTSUPP;
5128
02dba438
RP
5129 if (!dev->netdev_ops->ndo_bridge_getlink)
5130 return 0;
5131
2469ffd7
JF
5132 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5133 if (!skb) {
5134 err = -ENOMEM;
5135 goto errout;
5136 }
5137
46c264da 5138 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
02dba438
RP
5139 if (err < 0)
5140 goto errout;
2469ffd7 5141
d2e381c4
IS
5142 /* Notification info is only filled for bridge ports, not the bridge
5143 * device itself. Therefore, a zero notification length is valid and
5144 * should not result in an error.
5145 */
5146 if (!skb->len)
59ccaaaa
RP
5147 goto errout;
5148
2469ffd7
JF
5149 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5150 return 0;
5151errout:
5152 WARN_ON(err == -EMSGSIZE);
5153 kfree_skb(skb);
59ccaaaa
RP
5154 if (err)
5155 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2469ffd7
JF
5156 return err;
5157}
5158
c21ef3e3
DA
5159static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5160 struct netlink_ext_ack *extack)
e5a55a89
JF
5161{
5162 struct net *net = sock_net(skb->sk);
5163 struct ifinfomsg *ifm;
5164 struct net_device *dev;
743ad091 5165 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
2469ffd7 5166 int rem, err = -EOPNOTSUPP;
4de8b413 5167 u16 flags = 0;
e5a55a89
JF
5168
5169 if (nlmsg_len(nlh) < sizeof(*ifm))
5170 return -EINVAL;
5171
5172 ifm = nlmsg_data(nlh);
5173 if (ifm->ifi_family != AF_BRIDGE)
5174 return -EPFNOSUPPORT;
5175
5176 dev = __dev_get_by_index(net, ifm->ifi_index);
5177 if (!dev) {
b88d12e4 5178 NL_SET_ERR_MSG(extack, "unknown ifindex");
e5a55a89
JF
5179 return -ENODEV;
5180 }
5181
2469ffd7
JF
5182 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5183 if (br_spec) {
5184 nla_for_each_nested(attr, br_spec, rem) {
743ad091 5185 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
6e8d1c55
TG
5186 if (nla_len(attr) < sizeof(flags))
5187 return -EINVAL;
5188
743ad091 5189 br_flags_attr = attr;
2469ffd7 5190 flags = nla_get_u16(attr);
d73ef2d6
LM
5191 }
5192
5193 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5194 if (nla_len(attr) < sizeof(u16))
5195 return -EINVAL;
2469ffd7
JF
5196 }
5197 }
5198 }
5199
5200 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
898e5061
JP
5201 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5202
5203 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
2469ffd7
JF
5204 err = -EOPNOTSUPP;
5205 goto out;
5206 }
5207
2fd527b7
PM
5208 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5209 extack);
e5a55a89
JF
5210 if (err)
5211 goto out;
2469ffd7
JF
5212
5213 flags &= ~BRIDGE_FLAGS_MASTER;
e5a55a89
JF
5214 }
5215
2469ffd7
JF
5216 if ((flags & BRIDGE_FLAGS_SELF)) {
5217 if (!dev->netdev_ops->ndo_bridge_setlink)
5218 err = -EOPNOTSUPP;
5219 else
add511b3 5220 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
2fd527b7
PM
5221 flags,
5222 extack);
02dba438 5223 if (!err) {
2469ffd7 5224 flags &= ~BRIDGE_FLAGS_SELF;
02dba438
RP
5225
5226 /* Generate event to notify upper layer of bridge
5227 * change
5228 */
5229 err = rtnl_bridge_notify(dev);
5230 }
2469ffd7 5231 }
e5a55a89 5232
743ad091
LM
5233 if (br_flags_attr)
5234 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
e5a55a89
JF
5235out:
5236 return err;
5237}
5238
c21ef3e3
DA
5239static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5240 struct netlink_ext_ack *extack)
407af329
VY
5241{
5242 struct net *net = sock_net(skb->sk);
5243 struct ifinfomsg *ifm;
5244 struct net_device *dev;
5245 struct nlattr *br_spec, *attr = NULL;
5246 int rem, err = -EOPNOTSUPP;
4de8b413 5247 u16 flags = 0;
407af329
VY
5248 bool have_flags = false;
5249
5250 if (nlmsg_len(nlh) < sizeof(*ifm))
5251 return -EINVAL;
5252
5253 ifm = nlmsg_data(nlh);
5254 if (ifm->ifi_family != AF_BRIDGE)
5255 return -EPFNOSUPPORT;
5256
5257 dev = __dev_get_by_index(net, ifm->ifi_index);
5258 if (!dev) {
b88d12e4 5259 NL_SET_ERR_MSG(extack, "unknown ifindex");
407af329
VY
5260 return -ENODEV;
5261 }
5262
5263 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5264 if (br_spec) {
e8058a49
JB
5265 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
5266 rem) {
5267 if (nla_len(attr) < sizeof(flags))
5268 return -EINVAL;
6e8d1c55 5269
e8058a49
JB
5270 have_flags = true;
5271 flags = nla_get_u16(attr);
5272 break;
407af329
VY
5273 }
5274 }
5275
407af329
VY
5276 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5277 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5278
5279 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5280 err = -EOPNOTSUPP;
5281 goto out;
5282 }
5283
add511b3 5284 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
407af329
VY
5285 if (err)
5286 goto out;
5287
5288 flags &= ~BRIDGE_FLAGS_MASTER;
5289 }
5290
5291 if ((flags & BRIDGE_FLAGS_SELF)) {
5292 if (!dev->netdev_ops->ndo_bridge_dellink)
5293 err = -EOPNOTSUPP;
5294 else
add511b3
RP
5295 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5296 flags);
407af329 5297
02dba438 5298 if (!err) {
407af329 5299 flags &= ~BRIDGE_FLAGS_SELF;
02dba438
RP
5300
5301 /* Generate event to notify upper layer of bridge
5302 * change
5303 */
5304 err = rtnl_bridge_notify(dev);
5305 }
407af329
VY
5306 }
5307
5308 if (have_flags)
5309 memcpy(nla_data(attr), &flags, sizeof(flags));
407af329
VY
5310out:
5311 return err;
5312}
5313
e8872a25
NA
5314static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5315{
5316 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5317 (!idxattr || idxattr == attrid);
5318}
5319
f6e0fb81
PM
5320static bool
5321rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
69ae6ad2 5322{
f6e0fb81
PM
5323 return dev->netdev_ops &&
5324 dev->netdev_ops->ndo_has_offload_stats &&
5325 dev->netdev_ops->ndo_get_offload_stats &&
5326 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5327}
69ae6ad2 5328
f6e0fb81
PM
5329static unsigned int
5330rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5331{
5332 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5333 sizeof(struct rtnl_link_stats64) : 0;
69ae6ad2
NF
5334}
5335
f6e0fb81
PM
5336static int
5337rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5338 struct sk_buff *skb)
69ae6ad2 5339{
f6e0fb81 5340 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
69ae6ad2 5341 struct nlattr *attr = NULL;
69ae6ad2
NF
5342 void *attr_data;
5343 int err;
5344
f6e0fb81 5345 if (!size)
69ae6ad2
NF
5346 return -ENODATA;
5347
f6e0fb81
PM
5348 attr = nla_reserve_64bit(skb, attr_id, size,
5349 IFLA_OFFLOAD_XSTATS_UNSPEC);
5350 if (!attr)
5351 return -EMSGSIZE;
69ae6ad2 5352
f6e0fb81
PM
5353 attr_data = nla_data(attr);
5354 memset(attr_data, 0, size);
69ae6ad2 5355
f6e0fb81
PM
5356 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5357 if (err)
5358 return err;
69ae6ad2 5359
f6e0fb81
PM
5360 return 0;
5361}
69ae6ad2 5362
0e7788fd
PM
5363static unsigned int
5364rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5365 enum netdev_offload_xstats_type type)
5366{
5367 bool enabled = netdev_offload_xstats_enabled(dev, type);
5368
5369 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5370}
5371
5372struct rtnl_offload_xstats_request_used {
5373 bool request;
5374 bool used;
5375};
5376
5377static int
5378rtnl_offload_xstats_get_stats(struct net_device *dev,
5379 enum netdev_offload_xstats_type type,
5380 struct rtnl_offload_xstats_request_used *ru,
5381 struct rtnl_hw_stats64 *stats,
5382 struct netlink_ext_ack *extack)
5383{
5384 bool request;
5385 bool used;
5386 int err;
5387
5388 request = netdev_offload_xstats_enabled(dev, type);
5389 if (!request) {
5390 used = false;
5391 goto out;
5392 }
5393
5394 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5395 if (err)
5396 return err;
5397
5398out:
5399 if (ru) {
5400 ru->request = request;
5401 ru->used = used;
5402 }
5403 return 0;
5404}
5405
5406static int
5407rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5408 struct rtnl_offload_xstats_request_used *ru)
5409{
5410 struct nlattr *nest;
5411
5412 nest = nla_nest_start(skb, attr_id);
5413 if (!nest)
5414 return -EMSGSIZE;
5415
5416 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5417 goto nla_put_failure;
5418
5419 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5420 goto nla_put_failure;
5421
5422 nla_nest_end(skb, nest);
5423 return 0;
5424
5425nla_put_failure:
5426 nla_nest_cancel(skb, nest);
5427 return -EMSGSIZE;
5428}
5429
5430static int
5431rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5432 struct netlink_ext_ack *extack)
5433{
5434 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5435 struct rtnl_offload_xstats_request_used ru_l3;
5436 struct nlattr *nest;
5437 int err;
5438
5439 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5440 if (err)
5441 return err;
5442
5443 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5444 if (!nest)
5445 return -EMSGSIZE;
5446
5447 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5448 IFLA_OFFLOAD_XSTATS_L3_STATS,
5449 &ru_l3))
5450 goto nla_put_failure;
5451
5452 nla_nest_end(skb, nest);
5453 return 0;
5454
5455nla_put_failure:
5456 nla_nest_cancel(skb, nest);
5457 return -EMSGSIZE;
5458}
5459
f6e0fb81 5460static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
05415bcc
PM
5461 int *prividx, u32 off_filter_mask,
5462 struct netlink_ext_ack *extack)
f6e0fb81 5463{
0e7788fd
PM
5464 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5465 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5466 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
f6e0fb81
PM
5467 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5468 bool have_data = false;
5469 int err;
5470
46efc97b
PM
5471 if (*prividx <= attr_id_cpu_hit &&
5472 (off_filter_mask &
5473 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
f6e0fb81
PM
5474 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5475 if (!err) {
5476 have_data = true;
5477 } else if (err != -ENODATA) {
5478 *prividx = attr_id_cpu_hit;
5479 return err;
5480 }
69ae6ad2
NF
5481 }
5482
0e7788fd
PM
5483 if (*prividx <= attr_id_hw_s_info &&
5484 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5485 *prividx = attr_id_hw_s_info;
5486
5487 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5488 if (err)
5489 return err;
5490
5491 have_data = true;
5492 *prividx = 0;
5493 }
5494
5495 if (*prividx <= attr_id_l3_stats &&
5496 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5497 unsigned int size_l3;
5498 struct nlattr *attr;
5499
5500 *prividx = attr_id_l3_stats;
5501
5502 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
23cfe941
PM
5503 if (!size_l3)
5504 goto skip_l3_stats;
0e7788fd
PM
5505 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5506 IFLA_OFFLOAD_XSTATS_UNSPEC);
5507 if (!attr)
5508 return -EMSGSIZE;
5509
5510 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5511 nla_data(attr), extack);
5512 if (err)
5513 return err;
5514
5515 have_data = true;
23cfe941 5516skip_l3_stats:
0e7788fd
PM
5517 *prividx = 0;
5518 }
5519
f6e0fb81 5520 if (!have_data)
69ae6ad2
NF
5521 return -ENODATA;
5522
5523 *prividx = 0;
5524 return 0;
69ae6ad2
NF
5525}
5526
0e7788fd
PM
5527static unsigned int
5528rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5529 enum netdev_offload_xstats_type type)
5530{
0e7788fd
PM
5531 return nla_total_size(0) +
5532 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5533 nla_total_size(sizeof(u8)) +
5534 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
503930f8 5535 nla_total_size(sizeof(u8)) +
0e7788fd
PM
5536 0;
5537}
5538
5539static unsigned int
5540rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5541{
5542 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5543
5544 return nla_total_size(0) +
5545 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5546 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5547 0;
5548}
5549
46efc97b
PM
5550static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5551 u32 off_filter_mask)
69ae6ad2 5552{
0e7788fd 5553 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
f6e0fb81 5554 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
69ae6ad2 5555 int nla_size = 0;
69ae6ad2
NF
5556 int size;
5557
46efc97b
PM
5558 if (off_filter_mask &
5559 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5560 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5561 nla_size += nla_total_size_64bit(size);
5562 }
69ae6ad2 5563
0e7788fd
PM
5564 if (off_filter_mask &
5565 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5566 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5567
5568 if (off_filter_mask &
5569 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5570 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5571 nla_size += nla_total_size_64bit(size);
5572 }
5573
69ae6ad2
NF
5574 if (nla_size != 0)
5575 nla_size += nla_total_size(0);
5576
5577 return nla_size;
5578}
5579
46efc97b
PM
5580struct rtnl_stats_dump_filters {
5581 /* mask[0] filters outer attributes. Then individual nests have their
5582 * filtering mask at the index of the nested attribute.
5583 */
5584 u32 mask[IFLA_STATS_MAX + 1];
5585};
5586
10c9ead9
RP
5587static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5588 int type, u32 pid, u32 seq, u32 change,
46efc97b
PM
5589 unsigned int flags,
5590 const struct rtnl_stats_dump_filters *filters,
05415bcc
PM
5591 int *idxattr, int *prividx,
5592 struct netlink_ext_ack *extack)
10c9ead9 5593{
46efc97b 5594 unsigned int filter_mask = filters->mask[0];
10c9ead9
RP
5595 struct if_stats_msg *ifsm;
5596 struct nlmsghdr *nlh;
5597 struct nlattr *attr;
e8872a25 5598 int s_prividx = *prividx;
69ae6ad2 5599 int err;
10c9ead9
RP
5600
5601 ASSERT_RTNL();
5602
5603 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5604 if (!nlh)
5605 return -EMSGSIZE;
5606
5607 ifsm = nlmsg_data(nlh);
ce024f42
NA
5608 ifsm->family = PF_UNSPEC;
5609 ifsm->pad1 = 0;
5610 ifsm->pad2 = 0;
10c9ead9
RP
5611 ifsm->ifindex = dev->ifindex;
5612 ifsm->filter_mask = filter_mask;
5613
e8872a25 5614 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
10c9ead9 5615 struct rtnl_link_stats64 *sp;
10c9ead9 5616
58414d32
ND
5617 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5618 sizeof(struct rtnl_link_stats64),
5619 IFLA_STATS_UNSPEC);
216e6906
PM
5620 if (!attr) {
5621 err = -EMSGSIZE;
10c9ead9 5622 goto nla_put_failure;
216e6906 5623 }
10c9ead9
RP
5624
5625 sp = nla_data(attr);
5626 dev_get_stats(dev, sp);
5627 }
5628
97a47fac
NA
5629 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5630 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5631
5632 if (ops && ops->fill_linkxstats) {
97a47fac 5633 *idxattr = IFLA_STATS_LINK_XSTATS;
ae0be8de
MK
5634 attr = nla_nest_start_noflag(skb,
5635 IFLA_STATS_LINK_XSTATS);
216e6906
PM
5636 if (!attr) {
5637 err = -EMSGSIZE;
97a47fac 5638 goto nla_put_failure;
216e6906 5639 }
97a47fac 5640
80e73cc5
NA
5641 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5642 nla_nest_end(skb, attr);
5643 if (err)
5644 goto nla_put_failure;
5645 *idxattr = 0;
5646 }
5647 }
5648
5649 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5650 *idxattr)) {
5651 const struct rtnl_link_ops *ops = NULL;
5652 const struct net_device *master;
5653
5654 master = netdev_master_upper_dev_get(dev);
5655 if (master)
5656 ops = master->rtnl_link_ops;
5657 if (ops && ops->fill_linkxstats) {
80e73cc5 5658 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
ae0be8de
MK
5659 attr = nla_nest_start_noflag(skb,
5660 IFLA_STATS_LINK_XSTATS_SLAVE);
216e6906
PM
5661 if (!attr) {
5662 err = -EMSGSIZE;
80e73cc5 5663 goto nla_put_failure;
216e6906 5664 }
80e73cc5
NA
5665
5666 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
97a47fac
NA
5667 nla_nest_end(skb, attr);
5668 if (err)
5669 goto nla_put_failure;
5670 *idxattr = 0;
5671 }
5672 }
5673
69ae6ad2
NF
5674 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5675 *idxattr)) {
46efc97b
PM
5676 u32 off_filter_mask;
5677
5678 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
69ae6ad2 5679 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
ae0be8de
MK
5680 attr = nla_nest_start_noflag(skb,
5681 IFLA_STATS_LINK_OFFLOAD_XSTATS);
216e6906
PM
5682 if (!attr) {
5683 err = -EMSGSIZE;
69ae6ad2 5684 goto nla_put_failure;
216e6906 5685 }
69ae6ad2 5686
46efc97b 5687 err = rtnl_offload_xstats_fill(skb, dev, prividx,
05415bcc 5688 off_filter_mask, extack);
69ae6ad2
NF
5689 if (err == -ENODATA)
5690 nla_nest_cancel(skb, attr);
5691 else
5692 nla_nest_end(skb, attr);
5693
5694 if (err && err != -ENODATA)
5695 goto nla_put_failure;
5696 *idxattr = 0;
5697 }
5698
aefb4d4a
RS
5699 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5700 struct rtnl_af_ops *af_ops;
5701
5702 *idxattr = IFLA_STATS_AF_SPEC;
ae0be8de 5703 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
216e6906
PM
5704 if (!attr) {
5705 err = -EMSGSIZE;
aefb4d4a 5706 goto nla_put_failure;
216e6906 5707 }
aefb4d4a 5708
5fa85a09
FW
5709 rcu_read_lock();
5710 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
aefb4d4a
RS
5711 if (af_ops->fill_stats_af) {
5712 struct nlattr *af;
aefb4d4a 5713
ae0be8de
MK
5714 af = nla_nest_start_noflag(skb,
5715 af_ops->family);
5fa85a09
FW
5716 if (!af) {
5717 rcu_read_unlock();
57d29a29 5718 err = -EMSGSIZE;
aefb4d4a 5719 goto nla_put_failure;
5fa85a09 5720 }
aefb4d4a
RS
5721 err = af_ops->fill_stats_af(skb, dev);
5722
5fa85a09 5723 if (err == -ENODATA) {
aefb4d4a 5724 nla_nest_cancel(skb, af);
5fa85a09
FW
5725 } else if (err < 0) {
5726 rcu_read_unlock();
aefb4d4a 5727 goto nla_put_failure;
5fa85a09 5728 }
aefb4d4a
RS
5729
5730 nla_nest_end(skb, af);
5731 }
5732 }
5fa85a09 5733 rcu_read_unlock();
aefb4d4a
RS
5734
5735 nla_nest_end(skb, attr);
5736
5737 *idxattr = 0;
5738 }
5739
10c9ead9
RP
5740 nlmsg_end(skb, nlh);
5741
5742 return 0;
5743
5744nla_put_failure:
e8872a25
NA
5745 /* not a multi message or no progress mean a real error */
5746 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5747 nlmsg_cancel(skb, nlh);
5748 else
5749 nlmsg_end(skb, nlh);
10c9ead9 5750
216e6906 5751 return err;
10c9ead9
RP
5752}
5753
10c9ead9 5754static size_t if_nlmsg_stats_size(const struct net_device *dev,
46efc97b 5755 const struct rtnl_stats_dump_filters *filters)
10c9ead9 5756{
d3436799 5757 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
46efc97b 5758 unsigned int filter_mask = filters->mask[0];
10c9ead9 5759
e8872a25 5760 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
10c9ead9
RP
5761 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5762
97a47fac
NA
5763 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5764 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
80e73cc5 5765 int attr = IFLA_STATS_LINK_XSTATS;
97a47fac
NA
5766
5767 if (ops && ops->get_linkxstats_size) {
80e73cc5
NA
5768 size += nla_total_size(ops->get_linkxstats_size(dev,
5769 attr));
97a47fac
NA
5770 /* for IFLA_STATS_LINK_XSTATS */
5771 size += nla_total_size(0);
5772 }
5773 }
5774
80e73cc5
NA
5775 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5776 struct net_device *_dev = (struct net_device *)dev;
5777 const struct rtnl_link_ops *ops = NULL;
5778 const struct net_device *master;
5779
5780 /* netdev_master_upper_dev_get can't take const */
5781 master = netdev_master_upper_dev_get(_dev);
5782 if (master)
5783 ops = master->rtnl_link_ops;
5784 if (ops && ops->get_linkxstats_size) {
5785 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5786
5787 size += nla_total_size(ops->get_linkxstats_size(dev,
5788 attr));
5789 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5790 size += nla_total_size(0);
5791 }
5792 }
5793
46efc97b
PM
5794 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5795 u32 off_filter_mask;
5796
5797 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5798 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5799 }
69ae6ad2 5800
aefb4d4a
RS
5801 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5802 struct rtnl_af_ops *af_ops;
5803
5804 /* for IFLA_STATS_AF_SPEC */
5805 size += nla_total_size(0);
5806
5fa85a09
FW
5807 rcu_read_lock();
5808 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
aefb4d4a
RS
5809 if (af_ops->get_stats_af_size) {
5810 size += nla_total_size(
5811 af_ops->get_stats_af_size(dev));
5812
5813 /* for AF_* */
5814 size += nla_total_size(0);
5815 }
5816 }
5fa85a09 5817 rcu_read_unlock();
aefb4d4a
RS
5818 }
5819
10c9ead9
RP
5820 return size;
5821}
5822
46efc97b
PM
5823#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5824
5825static const struct nla_policy
5826rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5827 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5828 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5829};
5830
5831static const struct nla_policy
5832rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5833 [IFLA_STATS_GET_FILTERS] =
5834 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5835};
5836
03ba3566
PM
5837static const struct nla_policy
5838ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5fd0b838 5839 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
03ba3566
PM
5840};
5841
46efc97b
PM
5842static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5843 struct rtnl_stats_dump_filters *filters,
5844 struct netlink_ext_ack *extack)
5845{
5846 struct nlattr *tb[IFLA_STATS_MAX + 1];
5847 int err;
5848 int at;
5849
5850 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5851 rtnl_stats_get_policy_filters, extack);
5852 if (err < 0)
5853 return err;
5854
5855 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5856 if (tb[at]) {
5857 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5858 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5859 return -EINVAL;
5860 }
5861 filters->mask[at] = nla_get_u32(tb[at]);
5862 }
5863 }
5864
5865 return 0;
5866}
5867
5868static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5869 u32 filter_mask,
5870 struct rtnl_stats_dump_filters *filters,
5871 struct netlink_ext_ack *extack)
5872{
5873 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5874 int err;
5875 int i;
5876
5877 filters->mask[0] = filter_mask;
5878 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5879 filters->mask[i] = -1U;
5880
5881 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5882 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5883 if (err < 0)
5884 return err;
5885
5886 if (tb[IFLA_STATS_GET_FILTERS]) {
5887 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5888 filters, extack);
5889 if (err)
5890 return err;
5891 }
5892
5893 return 0;
5894}
5895
51bc860d
JK
5896static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5897 bool is_dump, struct netlink_ext_ack *extack)
5898{
5899 struct if_stats_msg *ifsm;
5900
69f23a09 5901 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
51bc860d
JK
5902 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5903 return -EINVAL;
5904 }
5905
5906 if (!strict_check)
5907 return 0;
5908
5909 ifsm = nlmsg_data(nlh);
5910
5911 /* only requests using strict checks can pass data to influence
5912 * the dump. The legacy exception is filter_mask.
5913 */
5914 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5915 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5916 return -EINVAL;
5917 }
6300acb2
JK
5918 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5919 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5920 return -EINVAL;
5921 }
51bc860d
JK
5922
5923 return 0;
5924}
5925
c21ef3e3
DA
5926static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5927 struct netlink_ext_ack *extack)
10c9ead9 5928{
46efc97b 5929 struct rtnl_stats_dump_filters filters;
10c9ead9 5930 struct net *net = sock_net(skb->sk);
10c9ead9 5931 struct net_device *dev = NULL;
e8872a25
NA
5932 int idxattr = 0, prividx = 0;
5933 struct if_stats_msg *ifsm;
10c9ead9 5934 struct sk_buff *nskb;
10c9ead9
RP
5935 int err;
5936
51bc860d
JK
5937 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5938 false, extack);
5939 if (err)
5940 return err;
4775cc1f 5941
10c9ead9
RP
5942 ifsm = nlmsg_data(nlh);
5943 if (ifsm->ifindex > 0)
5944 dev = __dev_get_by_index(net, ifsm->ifindex);
5945 else
5946 return -EINVAL;
5947
5948 if (!dev)
5949 return -ENODEV;
5950
46efc97b 5951 if (!ifsm->filter_mask) {
22b67d17 5952 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
10c9ead9 5953 return -EINVAL;
22b67d17 5954 }
10c9ead9 5955
46efc97b
PM
5956 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5957 if (err)
5958 return err;
5959
5960 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
10c9ead9
RP
5961 if (!nskb)
5962 return -ENOBUFS;
5963
5964 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5965 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
05415bcc 5966 0, &filters, &idxattr, &prividx, extack);
10c9ead9
RP
5967 if (err < 0) {
5968 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5969 WARN_ON(err == -EMSGSIZE);
5970 kfree_skb(nskb);
5971 } else {
5972 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5973 }
5974
5975 return err;
5976}
5977
5978static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5979{
841891ec 5980 struct netlink_ext_ack *extack = cb->extack;
46efc97b 5981 struct rtnl_stats_dump_filters filters;
10c9ead9 5982 struct net *net = sock_net(skb->sk);
e8872a25 5983 unsigned int flags = NLM_F_MULTI;
10c9ead9 5984 struct if_stats_msg *ifsm;
0feb396f
ED
5985 struct {
5986 unsigned long ifindex;
5987 int idxattr;
5988 int prividx;
5989 } *ctx = (void *)cb->ctx;
e8872a25 5990 struct net_device *dev;
0feb396f 5991 int err;
10c9ead9
RP
5992
5993 cb->seq = net->dev_base_seq;
5994
51bc860d
JK
5995 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5996 if (err)
5997 return err;
4775cc1f 5998
10c9ead9 5999 ifsm = nlmsg_data(cb->nlh);
46efc97b 6000 if (!ifsm->filter_mask) {
841891ec 6001 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
10c9ead9 6002 return -EINVAL;
841891ec 6003 }
10c9ead9 6004
46efc97b
PM
6005 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
6006 extack);
6007 if (err)
6008 return err;
6009
0feb396f
ED
6010 for_each_netdev_dump(net, dev, ctx->ifindex) {
6011 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6012 NETLINK_CB(cb->skb).portid,
6013 cb->nlh->nlmsg_seq, 0,
6014 flags, &filters,
6015 &ctx->idxattr, &ctx->prividx,
6016 extack);
6017 /* If we ran out of room on the first message,
6018 * we're in trouble.
6019 */
6020 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
10c9ead9 6021
0feb396f
ED
6022 if (err < 0)
6023 break;
6024 ctx->prividx = 0;
6025 ctx->idxattr = 0;
6026 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
10c9ead9 6027 }
10c9ead9 6028
136c2a9a 6029 return err;
10c9ead9
RP
6030}
6031
5fd0b838
PM
6032void rtnl_offload_xstats_notify(struct net_device *dev)
6033{
6034 struct rtnl_stats_dump_filters response_filters = {};
6035 struct net *net = dev_net(dev);
6036 int idxattr = 0, prividx = 0;
6037 struct sk_buff *skb;
6038 int err = -ENOBUFS;
6039
6040 ASSERT_RTNL();
6041
6042 response_filters.mask[0] |=
6043 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6044 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6045 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6046
6047 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6048 GFP_KERNEL);
6049 if (!skb)
6050 goto errout;
6051
6052 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6053 &response_filters, &idxattr, &prividx, NULL);
6054 if (err < 0) {
6055 kfree_skb(skb);
6056 goto errout;
6057 }
6058
6059 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6060 return;
6061
6062errout:
6063 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6064}
6065EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6066
03ba3566
PM
6067static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6068 struct netlink_ext_ack *extack)
6069{
5fd0b838 6070 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
03ba3566
PM
6071 struct rtnl_stats_dump_filters response_filters = {};
6072 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6073 struct net *net = sock_net(skb->sk);
6074 struct net_device *dev = NULL;
03ba3566 6075 struct if_stats_msg *ifsm;
5fd0b838 6076 bool notify = false;
03ba3566
PM
6077 int err;
6078
6079 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6080 false, extack);
6081 if (err)
6082 return err;
6083
6084 ifsm = nlmsg_data(nlh);
6085 if (ifsm->family != AF_UNSPEC) {
6086 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6087 return -EINVAL;
6088 }
6089
6090 if (ifsm->ifindex > 0)
6091 dev = __dev_get_by_index(net, ifsm->ifindex);
6092 else
6093 return -EINVAL;
6094
6095 if (!dev)
6096 return -ENODEV;
6097
6098 if (ifsm->filter_mask) {
6099 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6100 return -EINVAL;
6101 }
6102
6103 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6104 ifla_stats_set_policy, extack);
6105 if (err < 0)
6106 return err;
6107
5fd0b838
PM
6108 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6109 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
03ba3566 6110
5fd0b838
PM
6111 if (req)
6112 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6113 else
6114 err = netdev_offload_xstats_disable(dev, t_l3);
6115
6116 if (!err)
6117 notify = true;
6118 else if (err != -EALREADY)
6119 return err;
6120
6121 response_filters.mask[0] |=
6122 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6123 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6124 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
03ba3566
PM
6125 }
6126
5fd0b838
PM
6127 if (notify)
6128 rtnl_offload_xstats_notify(dev);
6129
6130 return 0;
03ba3566
PM
6131}
6132
cc7f5022
IS
6133static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6134 struct netlink_ext_ack *extack)
6135{
6136 struct br_port_msg *bpm;
6137
6138 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6139 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6140 return -EINVAL;
6141 }
6142
6143 bpm = nlmsg_data(nlh);
6144 if (bpm->ifindex) {
6145 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6146 return -EINVAL;
6147 }
6148 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6149 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6150 return -EINVAL;
6151 }
6152
6153 return 0;
6154}
6155
6156struct rtnl_mdb_dump_ctx {
6157 long idx;
6158};
6159
6160static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6161{
6162 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6163 struct net *net = sock_net(skb->sk);
6164 struct net_device *dev;
6165 int idx, s_idx;
6166 int err;
6167
6168 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
6169
6170 if (cb->strict_check) {
6171 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6172 if (err)
6173 return err;
6174 }
6175
6176 s_idx = ctx->idx;
6177 idx = 0;
6178
6179 for_each_netdev(net, dev) {
6180 if (idx < s_idx)
6181 goto skip;
6182 if (!dev->netdev_ops->ndo_mdb_dump)
6183 goto skip;
6184
6185 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6186 if (err == -EMSGSIZE)
6187 goto out;
6188 /* Moving on to next device, reset markers and sequence
6189 * counters since they are all maintained per-device.
6190 */
6191 memset(cb->ctx, 0, sizeof(cb->ctx));
6192 cb->prev_seq = 0;
6193 cb->seq = 0;
6194skip:
6195 idx++;
6196 }
6197
6198out:
6199 ctx->idx = idx;
6200 return skb->len;
6201}
6202
ddd17a54
IS
6203static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6204 struct netlink_ext_ack *extack)
6205{
6206 struct br_mdb_entry *entry = nla_data(attr);
6207
6208 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6209 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6210 return -EINVAL;
6211 }
6212
6213 if (entry->ifindex) {
6214 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6215 return -EINVAL;
6216 }
6217
6218 if (entry->state) {
6219 NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6220 return -EINVAL;
6221 }
6222
6223 if (entry->flags) {
6224 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6225 return -EINVAL;
6226 }
6227
6228 if (entry->vid >= VLAN_VID_MASK) {
6229 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6230 return -EINVAL;
6231 }
6232
6233 if (entry->addr.proto != htons(ETH_P_IP) &&
6234 entry->addr.proto != htons(ETH_P_IPV6) &&
6235 entry->addr.proto != 0) {
6236 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6237 return -EINVAL;
6238 }
6239
6240 return 0;
6241}
6242
6243static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6244 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6245 rtnl_validate_mdb_entry_get,
6246 sizeof(struct br_mdb_entry)),
6247 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6248};
6249
6250static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6251 struct netlink_ext_ack *extack)
6252{
6253 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6254 struct net *net = sock_net(in_skb->sk);
6255 struct br_port_msg *bpm;
6256 struct net_device *dev;
6257 int err;
6258
6259 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6260 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6261 if (err)
6262 return err;
6263
6264 bpm = nlmsg_data(nlh);
6265 if (!bpm->ifindex) {
6266 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6267 return -EINVAL;
6268 }
6269
6270 dev = __dev_get_by_index(net, bpm->ifindex);
6271 if (!dev) {
6272 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6273 return -ENODEV;
6274 }
6275
6276 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6277 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6278 return -EINVAL;
6279 }
6280
6281 if (!dev->netdev_ops->ndo_mdb_get) {
6282 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6283 return -EOPNOTSUPP;
6284 }
6285
6286 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6287 nlh->nlmsg_seq, extack);
6288}
6289
cc7f5022
IS
6290static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6291 struct netlink_ext_ack *extack)
6292{
6293 struct br_mdb_entry *entry = nla_data(attr);
6294
6295 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6296 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6297 return -EINVAL;
6298 }
6299
6300 if (entry->ifindex == 0) {
6301 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6302 return -EINVAL;
6303 }
6304
6305 if (entry->addr.proto == htons(ETH_P_IP)) {
da654c80
IS
6306 if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6307 !ipv4_is_zeronet(entry->addr.u.ip4)) {
6308 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
cc7f5022
IS
6309 return -EINVAL;
6310 }
6311 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6312 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6313 return -EINVAL;
6314 }
6315#if IS_ENABLED(CONFIG_IPV6)
6316 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6317 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6318 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6319 return -EINVAL;
6320 }
6321#endif
6322 } else if (entry->addr.proto == 0) {
6323 /* L2 mdb */
6324 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6325 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6326 return -EINVAL;
6327 }
6328 } else {
6329 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6330 return -EINVAL;
6331 }
6332
6333 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6334 NL_SET_ERR_MSG(extack, "Unknown entry state");
6335 return -EINVAL;
6336 }
6337 if (entry->vid >= VLAN_VID_MASK) {
6338 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6339 return -EINVAL;
6340 }
6341
6342 return 0;
6343}
6344
6345static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6346 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6347 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6348 rtnl_validate_mdb_entry,
6349 sizeof(struct br_mdb_entry)),
6350 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6351};
6352
6353static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6354 struct netlink_ext_ack *extack)
6355{
6356 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6357 struct net *net = sock_net(skb->sk);
6358 struct br_port_msg *bpm;
6359 struct net_device *dev;
6360 int err;
6361
6362 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6363 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6364 if (err)
6365 return err;
6366
6367 bpm = nlmsg_data(nlh);
6368 if (!bpm->ifindex) {
6369 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6370 return -EINVAL;
6371 }
6372
6373 dev = __dev_get_by_index(net, bpm->ifindex);
6374 if (!dev) {
6375 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6376 return -ENODEV;
6377 }
6378
6379 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6380 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6381 return -EINVAL;
6382 }
6383
6384 if (!dev->netdev_ops->ndo_mdb_add) {
6385 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6386 return -EOPNOTSUPP;
6387 }
6388
6389 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6390}
6391
e0cd06f7
IS
6392static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6393 struct netlink_ext_ack *extack)
6394{
6395 struct br_mdb_entry *entry = nla_data(attr);
6396 struct br_mdb_entry zero_entry = {};
6397
6398 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6399 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6400 return -EINVAL;
6401 }
6402
6403 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6404 NL_SET_ERR_MSG(extack, "Unknown entry state");
6405 return -EINVAL;
6406 }
6407
6408 if (entry->flags) {
6409 NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6410 return -EINVAL;
6411 }
6412
6413 if (entry->vid >= VLAN_N_VID - 1) {
6414 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6415 return -EINVAL;
6416 }
6417
6418 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6419 NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6420 return -EINVAL;
6421 }
6422
6423 return 0;
6424}
6425
6426static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6427 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6428 rtnl_validate_mdb_entry_del_bulk,
6429 sizeof(struct br_mdb_entry)),
6430 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6431};
6432
cc7f5022
IS
6433static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6434 struct netlink_ext_ack *extack)
6435{
e0cd06f7 6436 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
cc7f5022
IS
6437 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6438 struct net *net = sock_net(skb->sk);
6439 struct br_port_msg *bpm;
6440 struct net_device *dev;
6441 int err;
6442
e0cd06f7
IS
6443 if (!del_bulk)
6444 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6445 MDBA_SET_ENTRY_MAX, mdba_policy,
6446 extack);
6447 else
6448 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6449 mdba_del_bulk_policy, extack);
cc7f5022
IS
6450 if (err)
6451 return err;
6452
6453 bpm = nlmsg_data(nlh);
6454 if (!bpm->ifindex) {
6455 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6456 return -EINVAL;
6457 }
6458
6459 dev = __dev_get_by_index(net, bpm->ifindex);
6460 if (!dev) {
6461 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6462 return -ENODEV;
6463 }
6464
6465 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6466 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6467 return -EINVAL;
6468 }
6469
d8e81f13
IS
6470 if (del_bulk) {
6471 if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6472 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6473 return -EOPNOTSUPP;
6474 }
6475 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6476 }
6477
cc7f5022
IS
6478 if (!dev->netdev_ops->ndo_mdb_del) {
6479 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6480 return -EOPNOTSUPP;
6481 }
6482
6483 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6484}
6485
1da177e4
LT
6486/* Process one rtnetlink message. */
6487
2d4bc933
JB
6488static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6489 struct netlink_ext_ack *extack)
1da177e4 6490{
3b1e0a65 6491 struct net *net = sock_net(skb->sk);
addf9b90 6492 struct rtnl_link *link;
12dc5c2c 6493 enum rtnl_kinds kind;
e4202511 6494 struct module *owner;
6853dd48 6495 int err = -EOPNOTSUPP;
e2849863 6496 rtnl_doit_func doit;
62256f98 6497 unsigned int flags;
1da177e4
LT
6498 int family;
6499 int type;
1da177e4 6500
1da177e4 6501 type = nlh->nlmsg_type;
1da177e4 6502 if (type > RTM_MAX)
038890fe 6503 return -EOPNOTSUPP;
1da177e4
LT
6504
6505 type -= RTM_BASE;
6506
6507 /* All the messages must have at least 1 byte length */
573ce260 6508 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
1da177e4
LT
6509 return 0;
6510
573ce260 6511 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2e9ea3e3 6512 kind = rtnl_msgtype_kind(type);
1da177e4 6513
12dc5c2c 6514 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
1d00a4eb 6515 return -EPERM;
1da177e4 6516
6853dd48 6517 rcu_read_lock();
12dc5c2c 6518 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
97c53cac 6519 struct sock *rtnl;
e2849863 6520 rtnl_dumpit_func dumpit;
ebfe3c51 6521 u32 min_dump_alloc = 0;
1da177e4 6522
addf9b90
FW
6523 link = rtnl_get_link(family, type);
6524 if (!link || !link->dumpit) {
6853dd48 6525 family = PF_UNSPEC;
addf9b90
FW
6526 link = rtnl_get_link(family, type);
6527 if (!link || !link->dumpit)
6853dd48
FW
6528 goto err_unlock;
6529 }
e4202511 6530 owner = link->owner;
addf9b90 6531 dumpit = link->dumpit;
386520e0 6532 flags = link->flags;
e1fa6d21 6533
5c2bb9b6 6534 if (type == RTM_GETLINK - RTM_BASE)
e1fa6d21 6535 min_dump_alloc = rtnl_calcit(skb, nlh);
9ac4a169 6536
e4202511
FW
6537 err = 0;
6538 /* need to do this before rcu_read_unlock() */
6539 if (!try_module_get(owner))
6540 err = -EPROTONOSUPPORT;
6541
6853dd48
FW
6542 rcu_read_unlock();
6543
97c53cac 6544 rtnl = net->rtnl;
e4202511 6545 if (err == 0) {
80d326fa
PNA
6546 struct netlink_dump_control c = {
6547 .dump = dumpit,
6548 .min_dump_alloc = min_dump_alloc,
e4202511 6549 .module = owner,
386520e0 6550 .flags = flags,
80d326fa
PNA
6551 };
6552 err = netlink_dump_start(rtnl, skb, nlh, &c);
e4202511
FW
6553 /* netlink_dump_start() will keep a reference on
6554 * module if dump is still in progress.
6555 */
6556 module_put(owner);
80d326fa 6557 }
2907c35f 6558 return err;
1da177e4
LT
6559 }
6560
addf9b90
FW
6561 link = rtnl_get_link(family, type);
6562 if (!link || !link->doit) {
8caa38b5 6563 family = PF_UNSPEC;
addf9b90
FW
6564 link = rtnl_get_link(PF_UNSPEC, type);
6565 if (!link || !link->doit)
6566 goto out_unlock;
8caa38b5
FW
6567 }
6568
e4202511
FW
6569 owner = link->owner;
6570 if (!try_module_get(owner)) {
6571 err = -EPROTONOSUPPORT;
6572 goto out_unlock;
6573 }
6574
addf9b90 6575 flags = link->flags;
a6cec0bc
NA
6576 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6577 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6578 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
5b22f627 6579 module_put(owner);
a6cec0bc
NA
6580 goto err_unlock;
6581 }
6582
62256f98 6583 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
addf9b90 6584 doit = link->doit;
62256f98
FW
6585 rcu_read_unlock();
6586 if (doit)
6587 err = doit(skb, nlh, extack);
e4202511 6588 module_put(owner);
62256f98
FW
6589 return err;
6590 }
6853dd48 6591 rcu_read_unlock();
1da177e4 6592
6853dd48 6593 rtnl_lock();
addf9b90
FW
6594 link = rtnl_get_link(family, type);
6595 if (link && link->doit)
6596 err = link->doit(skb, nlh, extack);
0cc09020 6597 rtnl_unlock();
addf9b90 6598
e4202511
FW
6599 module_put(owner);
6600
addf9b90
FW
6601 return err;
6602
6603out_unlock:
6604 rcu_read_unlock();
0cc09020
FW
6605 return err;
6606
6607err_unlock:
6853dd48 6608 rcu_read_unlock();
0cc09020 6609 return -EOPNOTSUPP;
1da177e4
LT
6610}
6611
cd40b7d3 6612static void rtnetlink_rcv(struct sk_buff *skb)
1da177e4 6613{
cd40b7d3 6614 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
1da177e4
LT
6615}
6616
5f729eaa
JG
6617static int rtnetlink_bind(struct net *net, int group)
6618{
6619 switch (group) {
6620 case RTNLGRP_IPV4_MROUTE_R:
6621 case RTNLGRP_IPV6_MROUTE_R:
6622 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6623 return -EPERM;
6624 break;
6625 }
6626 return 0;
6627}
6628
1da177e4
LT
6629static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6630{
351638e7 6631 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
e9dc8653 6632
1da177e4 6633 switch (event) {
5138e86f 6634 case NETDEV_REBOOT:
8a212589 6635 case NETDEV_CHANGEMTU:
3753654e 6636 case NETDEV_CHANGEADDR:
5138e86f
VY
6637 case NETDEV_CHANGENAME:
6638 case NETDEV_FEAT_CHANGE:
6639 case NETDEV_BONDING_FAILOVER:
e6e66594 6640 case NETDEV_POST_TYPE_CHANGE:
5138e86f 6641 case NETDEV_NOTIFY_PEERS:
dc709f37 6642 case NETDEV_CHANGEUPPER:
5138e86f 6643 case NETDEV_RESEND_IGMP:
5138e86f 6644 case NETDEV_CHANGEINFODATA:
eeda3fb9 6645 case NETDEV_CHANGELOWERSTATE:
ebdcf045 6646 case NETDEV_CHANGE_TX_QUEUE_LEN:
3d3ea5af 6647 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
1d997f10 6648 GFP_KERNEL, NULL, 0, 0, NULL);
1da177e4
LT
6649 break;
6650 default:
1da177e4
LT
6651 break;
6652 }
6653 return NOTIFY_DONE;
6654}
6655
6656static struct notifier_block rtnetlink_dev_notifier = {
6657 .notifier_call = rtnetlink_event,
6658};
6659
97c53cac 6660
2c8c1e72 6661static int __net_init rtnetlink_net_init(struct net *net)
97c53cac
DL
6662{
6663 struct sock *sk;
a31f2d17
PNA
6664 struct netlink_kernel_cfg cfg = {
6665 .groups = RTNLGRP_MAX,
6666 .input = rtnetlink_rcv,
6667 .cb_mutex = &rtnl_mutex,
9785e10a 6668 .flags = NL_CFG_F_NONROOT_RECV,
5f729eaa 6669 .bind = rtnetlink_bind,
a31f2d17
PNA
6670 };
6671
9f00d977 6672 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
97c53cac
DL
6673 if (!sk)
6674 return -ENOMEM;
97c53cac
DL
6675 net->rtnl = sk;
6676 return 0;
6677}
6678
2c8c1e72 6679static void __net_exit rtnetlink_net_exit(struct net *net)
97c53cac 6680{
775516bf
DL
6681 netlink_kernel_release(net->rtnl);
6682 net->rtnl = NULL;
97c53cac
DL
6683}
6684
6685static struct pernet_operations rtnetlink_net_ops = {
6686 .init = rtnetlink_net_init,
6687 .exit = rtnetlink_net_exit,
6688};
6689
1da177e4
LT
6690void __init rtnetlink_init(void)
6691{
97c53cac 6692 if (register_pernet_subsys(&rtnetlink_net_ops))
1da177e4 6693 panic("rtnetlink_init: cannot initialize rtnetlink\n");
97c53cac 6694
1da177e4 6695 register_netdevice_notifier(&rtnetlink_dev_notifier);
340d17fc 6696
c7ac8679 6697 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
b97bac64
FW
6698 rtnl_dump_ifinfo, 0);
6699 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6700 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6701 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
687ad8cc 6702
b97bac64
FW
6703 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6704 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6705 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
77162022 6706
36fbf1e5
JP
6707 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6708 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6709
b97bac64 6710 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
9e834259
NA
6711 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6712 RTNL_FLAG_BULK_DEL_SUPPORTED);
5b2f94b2 6713 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
e5a55a89 6714
b97bac64
FW
6715 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6716 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6717 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
10c9ead9
RP
6718
6719 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
b97bac64 6720 0);
03ba3566 6721 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
cc7f5022 6722
ddd17a54 6723 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
cc7f5022 6724 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
2601e9c4
IS
6725 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
6726 RTNL_FLAG_BULK_DEL_SUPPORTED);
1da177e4 6727}