net: bridge: mdb: move all port and bridge checks to br_mdb_add
[linux-2.6-block.git] / net / bridge / br_mdb.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ee07c6e7
CW
2#include <linux/err.h>
3#include <linux/igmp.h>
4#include <linux/kernel.h>
5#include <linux/netdevice.h>
6#include <linux/rculist.h>
7#include <linux/skbuff.h>
cfd56754 8#include <linux/if_ether.h>
ee07c6e7
CW
9#include <net/ip.h>
10#include <net/netlink.h>
f1fecb1d 11#include <net/switchdev.h>
ee07c6e7
CW
12#if IS_ENABLED(CONFIG_IPV6)
13#include <net/ipv6.h>
3c3769e6 14#include <net/addrconf.h>
ee07c6e7
CW
15#endif
16
17#include "br_private.h"
18
19static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
21{
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
59f78f9f 24 struct nlattr *nest, *port_nest;
ee07c6e7
CW
25
26 if (!br->multicast_router || hlist_empty(&br->router_list))
27 return 0;
28
ae0be8de 29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
ee07c6e7
CW
30 if (nest == NULL)
31 return -EMSGSIZE;
32
b67bfe0d 33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
59f78f9f
NA
34 if (!p)
35 continue;
ae0be8de 36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
59f78f9f 37 if (!port_nest)
ee07c6e7 38 goto fail;
59f78f9f
NA
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
45 goto fail;
46 }
47 nla_nest_end(skb, port_nest);
ee07c6e7
CW
48 }
49
50 nla_nest_end(skb, nest);
51 return 0;
52fail:
53 nla_nest_cancel(skb, nest);
54 return -EMSGSIZE;
55}
56
9d06b6d8
ER
57static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
58{
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
60 e->flags = 0;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
3247b272
NA
63 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 e->flags |= MDB_FLAGS_FAST_LEAVE;
9d06b6d8
ER
65}
66
6dd684c0
ER
67static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
68{
69 memset(ip, 0, sizeof(struct br_ip));
70 ip->vid = entry->vid;
71 ip->proto = entry->addr.proto;
72 if (ip->proto == htons(ETH_P_IP))
73 ip->u.ip4 = entry->addr.u.ip4;
74#if IS_ENABLED(CONFIG_IPV6)
75 else
76 ip->u.ip6 = entry->addr.u.ip6;
77#endif
78}
79
5205e919
NA
80static int __mdb_fill_srcs(struct sk_buff *skb,
81 struct net_bridge_port_group *p)
82{
83 struct net_bridge_group_src *ent;
84 struct nlattr *nest, *nest_ent;
85
86 if (hlist_empty(&p->src_list))
87 return 0;
88
89 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
90 if (!nest)
91 return -EMSGSIZE;
92
93 hlist_for_each_entry_rcu(ent, &p->src_list, node,
94 lockdep_is_held(&p->port->br->multicast_lock)) {
95 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
96 if (!nest_ent)
97 goto out_cancel_err;
98 switch (ent->addr.proto) {
99 case htons(ETH_P_IP):
100 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
101 ent->addr.u.ip4)) {
102 nla_nest_cancel(skb, nest_ent);
103 goto out_cancel_err;
104 }
105 break;
106#if IS_ENABLED(CONFIG_IPV6)
107 case htons(ETH_P_IPV6):
108 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
109 &ent->addr.u.ip6)) {
110 nla_nest_cancel(skb, nest_ent);
111 goto out_cancel_err;
112 }
113 break;
114#endif
115 default:
116 nla_nest_cancel(skb, nest_ent);
117 continue;
118 }
119 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
120 br_timer_value(&ent->timer))) {
121 nla_nest_cancel(skb, nest_ent);
122 goto out_cancel_err;
123 }
124 nla_nest_end(skb, nest_ent);
125 }
126
127 nla_nest_end(skb, nest);
128
129 return 0;
130
131out_cancel_err:
132 nla_nest_cancel(skb, nest);
133 return -EMSGSIZE;
134}
135
6545916e 136static int __mdb_fill_info(struct sk_buff *skb,
e77b0c84 137 struct net_bridge_mdb_entry *mp,
6545916e
NA
138 struct net_bridge_port_group *p)
139{
5205e919 140 bool dump_srcs_mode = false;
e77b0c84 141 struct timer_list *mtimer;
6545916e
NA
142 struct nlattr *nest_ent;
143 struct br_mdb_entry e;
e77b0c84
NA
144 u8 flags = 0;
145 int ifindex;
6545916e
NA
146
147 memset(&e, 0, sizeof(e));
e77b0c84
NA
148 if (p) {
149 ifindex = p->port->dev->ifindex;
150 mtimer = &p->timer;
151 flags = p->flags;
152 } else {
153 ifindex = mp->br->dev->ifindex;
154 mtimer = &mp->timer;
155 }
156
157 __mdb_entry_fill_flags(&e, flags);
158 e.ifindex = ifindex;
159 e.vid = mp->addr.vid;
160 if (mp->addr.proto == htons(ETH_P_IP))
161 e.addr.u.ip4 = mp->addr.u.ip4;
6545916e 162#if IS_ENABLED(CONFIG_IPV6)
e77b0c84
NA
163 if (mp->addr.proto == htons(ETH_P_IPV6))
164 e.addr.u.ip6 = mp->addr.u.ip6;
6545916e 165#endif
e77b0c84 166 e.addr.proto = mp->addr.proto;
6545916e
NA
167 nest_ent = nla_nest_start_noflag(skb,
168 MDBA_MDB_ENTRY_INFO);
169 if (!nest_ent)
170 return -EMSGSIZE;
171
172 if (nla_put_nohdr(skb, sizeof(e), &e) ||
173 nla_put_u32(skb,
174 MDBA_MDB_EATTR_TIMER,
e77b0c84 175 br_timer_value(mtimer))) {
6545916e
NA
176 nla_nest_cancel(skb, nest_ent);
177 return -EMSGSIZE;
178 }
5205e919
NA
179 switch (mp->addr.proto) {
180 case htons(ETH_P_IP):
181 dump_srcs_mode = !!(p && mp->br->multicast_igmp_version == 3);
182 break;
183#if IS_ENABLED(CONFIG_IPV6)
184 case htons(ETH_P_IPV6):
185 dump_srcs_mode = !!(p && mp->br->multicast_mld_version == 2);
186 break;
187#endif
188 }
189 if (dump_srcs_mode &&
190 (__mdb_fill_srcs(skb, p) ||
191 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE, p->filter_mode))) {
192 nla_nest_cancel(skb, nest_ent);
193 return -EMSGSIZE;
194 }
195
6545916e
NA
196 nla_nest_end(skb, nest_ent);
197
198 return 0;
199}
200
ee07c6e7
CW
201static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
202 struct net_device *dev)
203{
5205e919 204 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
ee07c6e7 205 struct net_bridge *br = netdev_priv(dev);
19e3a9c9 206 struct net_bridge_mdb_entry *mp;
ee07c6e7 207 struct nlattr *nest, *nest2;
ee07c6e7 208
13cefad2 209 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
ee07c6e7
CW
210 return 0;
211
ae0be8de 212 nest = nla_nest_start_noflag(skb, MDBA_MDB);
ee07c6e7
CW
213 if (nest == NULL)
214 return -EMSGSIZE;
215
19e3a9c9 216 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
762a3d89 217 struct net_bridge_port_group *p;
218 struct net_bridge_port_group __rcu **pp;
ee07c6e7 219
19e3a9c9
NA
220 if (idx < s_idx)
221 goto skip;
ee07c6e7 222
ae0be8de 223 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
19e3a9c9
NA
224 if (!nest2) {
225 err = -EMSGSIZE;
226 break;
227 }
ee07c6e7 228
5205e919 229 if (!s_pidx && mp->host_joined) {
e77b0c84
NA
230 err = __mdb_fill_info(skb, mp, NULL);
231 if (err) {
232 nla_nest_cancel(skb, nest2);
233 break;
234 }
235 }
236
19e3a9c9
NA
237 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
238 pp = &p->next) {
6545916e 239 if (!p->port)
19e3a9c9 240 continue;
5205e919
NA
241 if (pidx < s_pidx)
242 goto skip_pg;
19e3a9c9 243
e77b0c84 244 err = __mdb_fill_info(skb, mp, p);
6545916e 245 if (err) {
12913f74 246 nla_nest_end(skb, nest2);
19e3a9c9
NA
247 goto out;
248 }
5205e919
NA
249skip_pg:
250 pidx++;
ee07c6e7 251 }
5205e919
NA
252 pidx = 0;
253 s_pidx = 0;
19e3a9c9
NA
254 nla_nest_end(skb, nest2);
255skip:
256 idx++;
ee07c6e7
CW
257 }
258
259out:
260 cb->args[1] = idx;
5205e919 261 cb->args[2] = pidx;
ee07c6e7
CW
262 nla_nest_end(skb, nest);
263 return err;
264}
265
c77b9364
DA
266static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
267 struct netlink_ext_ack *extack)
268{
269 struct br_port_msg *bpm;
270
271 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
272 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
273 return -EINVAL;
274 }
275
276 bpm = nlmsg_data(nlh);
277 if (bpm->ifindex) {
278 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
279 return -EINVAL;
280 }
281 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
282 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
283 return -EINVAL;
284 }
285
286 return 0;
287}
288
ee07c6e7
CW
289static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
290{
291 struct net_device *dev;
292 struct net *net = sock_net(skb->sk);
293 struct nlmsghdr *nlh = NULL;
294 int idx = 0, s_idx;
295
c77b9364
DA
296 if (cb->strict_check) {
297 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
298
299 if (err < 0)
300 return err;
301 }
302
ee07c6e7
CW
303 s_idx = cb->args[0];
304
305 rcu_read_lock();
306
19e3a9c9 307 cb->seq = net->dev_base_seq;
ee07c6e7
CW
308
309 for_each_netdev_rcu(net, dev) {
310 if (dev->priv_flags & IFF_EBRIDGE) {
311 struct br_port_msg *bpm;
312
313 if (idx < s_idx)
314 goto skip;
315
316 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
317 cb->nlh->nlmsg_seq, RTM_GETMDB,
318 sizeof(*bpm), NLM_F_MULTI);
319 if (nlh == NULL)
320 break;
321
322 bpm = nlmsg_data(nlh);
c085c499 323 memset(bpm, 0, sizeof(*bpm));
ee07c6e7
CW
324 bpm->ifindex = dev->ifindex;
325 if (br_mdb_fill_info(skb, cb, dev) < 0)
326 goto out;
327 if (br_rports_fill_info(skb, cb, dev) < 0)
328 goto out;
329
330 cb->args[1] = 0;
331 nlmsg_end(skb, nlh);
332 skip:
333 idx++;
334 }
335 }
336
337out:
338 if (nlh)
339 nlmsg_end(skb, nlh);
340 rcu_read_unlock();
341 cb->args[0] = idx;
342 return skb->len;
343}
344
37a393bc
CW
345static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
346 struct net_device *dev,
81f19838
NA
347 struct net_bridge_mdb_entry *mp,
348 struct net_bridge_port_group *pg,
349 int type)
37a393bc
CW
350{
351 struct nlmsghdr *nlh;
352 struct br_port_msg *bpm;
353 struct nlattr *nest, *nest2;
354
81f19838 355 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
37a393bc
CW
356 if (!nlh)
357 return -EMSGSIZE;
358
359 bpm = nlmsg_data(nlh);
c085c499 360 memset(bpm, 0, sizeof(*bpm));
37a393bc
CW
361 bpm->family = AF_BRIDGE;
362 bpm->ifindex = dev->ifindex;
ae0be8de 363 nest = nla_nest_start_noflag(skb, MDBA_MDB);
37a393bc
CW
364 if (nest == NULL)
365 goto cancel;
ae0be8de 366 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
37a393bc
CW
367 if (nest2 == NULL)
368 goto end;
369
81f19838 370 if (__mdb_fill_info(skb, mp, pg))
37a393bc
CW
371 goto end;
372
373 nla_nest_end(skb, nest2);
374 nla_nest_end(skb, nest);
053c095a
JB
375 nlmsg_end(skb, nlh);
376 return 0;
37a393bc
CW
377
378end:
379 nla_nest_end(skb, nest);
380cancel:
381 nlmsg_cancel(skb, nlh);
382 return -EMSGSIZE;
383}
384
81f19838 385static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
37a393bc 386{
81f19838
NA
387 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
388 nla_total_size(sizeof(struct br_mdb_entry)) +
389 nla_total_size(sizeof(u32));
390 struct net_bridge_group_src *ent;
391 size_t addr_size = 0;
392
393 if (!pg)
394 goto out;
395
396 switch (pg->addr.proto) {
397 case htons(ETH_P_IP):
398 if (pg->port->br->multicast_igmp_version == 2)
399 goto out;
400 addr_size = sizeof(__be32);
401 break;
402#if IS_ENABLED(CONFIG_IPV6)
403 case htons(ETH_P_IPV6):
404 if (pg->port->br->multicast_mld_version == 1)
405 goto out;
406 addr_size = sizeof(struct in6_addr);
407 break;
408#endif
409 }
410
411 /* MDBA_MDB_EATTR_GROUP_MODE */
412 nlmsg_size += nla_total_size(sizeof(u8));
413
414 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
415 if (!hlist_empty(&pg->src_list))
416 nlmsg_size += nla_total_size(0);
417
418 hlist_for_each_entry(ent, &pg->src_list, node) {
419 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
420 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
421 */
422 nlmsg_size += nla_total_size(0) +
423 nla_total_size(addr_size) +
424 nla_total_size(sizeof(u32));
425 }
426out:
427 return nlmsg_size;
37a393bc
CW
428}
429
45ebcce5
ER
430struct br_mdb_complete_info {
431 struct net_bridge_port *port;
432 struct br_ip ip;
433};
434
435static void br_mdb_complete(struct net_device *dev, int err, void *priv)
37a393bc 436{
45ebcce5
ER
437 struct br_mdb_complete_info *data = priv;
438 struct net_bridge_port_group __rcu **pp;
439 struct net_bridge_port_group *p;
45ebcce5
ER
440 struct net_bridge_mdb_entry *mp;
441 struct net_bridge_port *port = data->port;
442 struct net_bridge *br = port->br;
443
444 if (err)
445 goto err;
446
447 spin_lock_bh(&br->multicast_lock);
19e3a9c9 448 mp = br_mdb_ip_get(br, &data->ip);
45ebcce5
ER
449 if (!mp)
450 goto out;
451 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
452 pp = &p->next) {
453 if (p->port != port)
454 continue;
455 p->flags |= MDB_PG_FLAGS_OFFLOAD;
456 }
457out:
458 spin_unlock_bh(&br->multicast_lock);
459err:
460 kfree(priv);
461}
462
47d5b6db
AL
463static void br_mdb_switchdev_host_port(struct net_device *dev,
464 struct net_device *lower_dev,
81f19838
NA
465 struct net_bridge_mdb_entry *mp,
466 int type)
47d5b6db
AL
467{
468 struct switchdev_obj_port_mdb mdb = {
469 .obj = {
470 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
471 .flags = SWITCHDEV_F_DEFER,
472 },
81f19838 473 .vid = mp->addr.vid,
47d5b6db
AL
474 };
475
81f19838
NA
476 if (mp->addr.proto == htons(ETH_P_IP))
477 ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
47d5b6db
AL
478#if IS_ENABLED(CONFIG_IPV6)
479 else
81f19838 480 ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
47d5b6db
AL
481#endif
482
483 mdb.obj.orig_dev = dev;
484 switch (type) {
485 case RTM_NEWMDB:
69b7320e 486 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
47d5b6db
AL
487 break;
488 case RTM_DELMDB:
489 switchdev_port_obj_del(lower_dev, &mdb.obj);
490 break;
491 }
492}
493
494static void br_mdb_switchdev_host(struct net_device *dev,
81f19838 495 struct net_bridge_mdb_entry *mp, int type)
47d5b6db
AL
496{
497 struct net_device *lower_dev;
498 struct list_head *iter;
499
500 netdev_for_each_lower_dev(dev, lower_dev, iter)
81f19838 501 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
47d5b6db
AL
502}
503
81f19838
NA
504void br_mdb_notify(struct net_device *dev,
505 struct net_bridge_mdb_entry *mp,
506 struct net_bridge_port_group *pg,
507 int type)
45ebcce5
ER
508{
509 struct br_mdb_complete_info *complete_info;
f1fecb1d
ER
510 struct switchdev_obj_port_mdb mdb = {
511 .obj = {
512 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
513 .flags = SWITCHDEV_F_DEFER,
514 },
81f19838 515 .vid = mp->addr.vid,
f1fecb1d 516 };
37a393bc
CW
517 struct net *net = dev_net(dev);
518 struct sk_buff *skb;
519 int err = -ENOBUFS;
520
81f19838
NA
521 if (pg) {
522 if (mp->addr.proto == htons(ETH_P_IP))
523 ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
f1fecb1d 524#if IS_ENABLED(CONFIG_IPV6)
81f19838
NA
525 else
526 ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
f1fecb1d 527#endif
81f19838
NA
528 mdb.obj.orig_dev = pg->port->dev;
529 switch (type) {
530 case RTM_NEWMDB:
531 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
532 if (!complete_info)
533 break;
534 complete_info->port = pg->port;
535 complete_info->ip = mp->addr;
45ebcce5
ER
536 mdb.obj.complete_priv = complete_info;
537 mdb.obj.complete = br_mdb_complete;
81f19838 538 if (switchdev_port_obj_add(pg->port->dev, &mdb.obj, NULL))
1bfb1596 539 kfree(complete_info);
81f19838
NA
540 break;
541 case RTM_DELMDB:
542 switchdev_port_obj_del(pg->port->dev, &mdb.obj);
543 break;
45ebcce5 544 }
81f19838
NA
545 } else {
546 br_mdb_switchdev_host(dev, mp, type);
9e8430f8 547 }
f1fecb1d 548
81f19838 549 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
37a393bc
CW
550 if (!skb)
551 goto errout;
552
81f19838 553 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
37a393bc
CW
554 if (err < 0) {
555 kfree_skb(skb);
556 goto errout;
557 }
558
559 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
560 return;
561errout:
562 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
563}
564
949f1e39
SA
565static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
566 struct net_device *dev,
567 int ifindex, u32 pid,
568 u32 seq, int type, unsigned int flags)
569{
570 struct br_port_msg *bpm;
571 struct nlmsghdr *nlh;
572 struct nlattr *nest;
573
94a72b3f 574 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
949f1e39
SA
575 if (!nlh)
576 return -EMSGSIZE;
577
578 bpm = nlmsg_data(nlh);
579 memset(bpm, 0, sizeof(*bpm));
580 bpm->family = AF_BRIDGE;
581 bpm->ifindex = dev->ifindex;
ae0be8de 582 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
949f1e39
SA
583 if (!nest)
584 goto cancel;
585
586 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
587 goto end;
588
589 nla_nest_end(skb, nest);
590 nlmsg_end(skb, nlh);
591 return 0;
592
593end:
594 nla_nest_end(skb, nest);
595cancel:
596 nlmsg_cancel(skb, nlh);
597 return -EMSGSIZE;
598}
599
600static inline size_t rtnl_rtr_nlmsg_size(void)
601{
602 return NLMSG_ALIGN(sizeof(struct br_port_msg))
603 + nla_total_size(sizeof(__u32));
604}
605
606void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
607 int type)
608{
609 struct net *net = dev_net(dev);
610 struct sk_buff *skb;
611 int err = -ENOBUFS;
612 int ifindex;
613
614 ifindex = port ? port->dev->ifindex : 0;
615 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
616 if (!skb)
617 goto errout;
618
619 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
620 if (err < 0) {
621 kfree_skb(skb);
622 goto errout;
623 }
624
625 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
626 return;
627
628errout:
629 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
630}
631
2ac95dfe
NA
632static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
633 struct netlink_ext_ack *extack)
cfd56754 634{
2ac95dfe
NA
635 if (entry->ifindex == 0) {
636 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
cfd56754 637 return false;
2ac95dfe 638 }
cfd56754
CW
639
640 if (entry->addr.proto == htons(ETH_P_IP)) {
2ac95dfe
NA
641 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
642 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
cfd56754 643 return false;
2ac95dfe
NA
644 }
645 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
646 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
cfd56754 647 return false;
2ac95dfe 648 }
cfd56754
CW
649#if IS_ENABLED(CONFIG_IPV6)
650 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
2ac95dfe
NA
651 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
652 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
cfd56754 653 return false;
2ac95dfe 654 }
cfd56754 655#endif
2ac95dfe
NA
656 } else {
657 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
cfd56754 658 return false;
2ac95dfe
NA
659 }
660
661 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
662 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
ccb1c31a 663 return false;
2ac95dfe
NA
664 }
665 if (entry->vid >= VLAN_VID_MASK) {
666 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
74fe61f1 667 return false;
2ac95dfe 668 }
cfd56754
CW
669
670 return true;
671}
672
673static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
2ac95dfe
NA
674 struct net_device **pdev, struct br_mdb_entry **pentry,
675 struct netlink_ext_ack *extack)
cfd56754
CW
676{
677 struct net *net = sock_net(skb->sk);
678 struct br_mdb_entry *entry;
679 struct br_port_msg *bpm;
680 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
681 struct net_device *dev;
682 int err;
683
8cb08174
JB
684 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
685 MDBA_SET_ENTRY_MAX, NULL, NULL);
cfd56754
CW
686 if (err < 0)
687 return err;
688
689 bpm = nlmsg_data(nlh);
690 if (bpm->ifindex == 0) {
2ac95dfe 691 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
cfd56754
CW
692 return -EINVAL;
693 }
694
695 dev = __dev_get_by_index(net, bpm->ifindex);
696 if (dev == NULL) {
2ac95dfe 697 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
cfd56754
CW
698 return -ENODEV;
699 }
700
701 if (!(dev->priv_flags & IFF_EBRIDGE)) {
2ac95dfe 702 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
cfd56754
CW
703 return -EOPNOTSUPP;
704 }
705
706 *pdev = dev;
707
2ac95dfe
NA
708 if (!tb[MDBA_SET_ENTRY]) {
709 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
cfd56754
CW
710 return -EINVAL;
711 }
2ac95dfe
NA
712 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
713 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
cfd56754
CW
714 return -EINVAL;
715 }
716
2ac95dfe
NA
717 entry = nla_data(tb[MDBA_SET_ENTRY]);
718 if (!is_valid_mdb_entry(entry, extack))
719 return -EINVAL;
cfd56754 720 *pentry = entry;
2ac95dfe 721
cfd56754
CW
722 return 0;
723}
724
725static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
79abc875 726 struct br_ip *group, struct br_mdb_entry *entry)
cfd56754
CW
727{
728 struct net_bridge_mdb_entry *mp;
729 struct net_bridge_port_group *p;
730 struct net_bridge_port_group __rcu **pp;
f7e2965d 731 unsigned long now = jiffies;
cfd56754
CW
732 int err;
733
19e3a9c9 734 mp = br_mdb_ip_get(br, group);
cfd56754 735 if (!mp) {
19e3a9c9 736 mp = br_multicast_new_group(br, group);
56bb7fd9
AB
737 err = PTR_ERR_OR_ZERO(mp);
738 if (err)
cfd56754
CW
739 return err;
740 }
741
1bc844ee
NA
742 /* host join */
743 if (!port) {
744 /* don't allow any flags for host-joined groups */
79abc875 745 if (entry->state)
1bc844ee
NA
746 return -EINVAL;
747 if (mp->host_joined)
748 return -EEXIST;
749
750 br_multicast_host_join(mp, false);
81f19838 751 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1bc844ee
NA
752
753 return 0;
754 }
755
cfd56754
CW
756 for (pp = &mp->ports;
757 (p = mlock_dereference(*pp, br)) != NULL;
758 pp = &p->next) {
759 if (p->port == port)
760 return -EEXIST;
761 if ((unsigned long)p->port < (unsigned long)port)
762 break;
763 }
764
79abc875 765 p = br_multicast_new_port_group(port, group, *pp, entry->state, NULL,
8b671779 766 MCAST_EXCLUDE);
cfd56754
CW
767 if (unlikely(!p))
768 return -ENOMEM;
769 rcu_assign_pointer(*pp, p);
79abc875 770 if (entry->state == MDB_TEMPORARY)
f7e2965d 771 mod_timer(&p->timer, now + br->multicast_membership_interval);
81f19838 772 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
cfd56754 773
cfd56754
CW
774 return 0;
775}
776
777static int __br_mdb_add(struct net *net, struct net_bridge *br,
7eea629d 778 struct net_bridge_port *p,
45ebcce5 779 struct br_mdb_entry *entry)
cfd56754
CW
780{
781 struct br_ip ip;
cfd56754
CW
782 int ret;
783
6dd684c0 784 __mdb_entry_to_br_ip(entry, &ip);
cfd56754
CW
785
786 spin_lock_bh(&br->multicast_lock);
79abc875 787 ret = br_mdb_add_group(br, p, &ip, entry);
cfd56754 788 spin_unlock_bh(&br->multicast_lock);
7eea629d 789
cfd56754
CW
790 return ret;
791}
792
c21ef3e3
DA
793static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
794 struct netlink_ext_ack *extack)
cfd56754
CW
795{
796 struct net *net = sock_net(skb->sk);
2594e906 797 struct net_bridge_vlan_group *vg;
1bc844ee 798 struct net_bridge_port *p = NULL;
e44deb2f 799 struct net_device *dev, *pdev;
cfd56754 800 struct br_mdb_entry *entry;
2594e906 801 struct net_bridge_vlan *v;
cfd56754
CW
802 struct net_bridge *br;
803 int err;
804
2ac95dfe 805 err = br_mdb_parse(skb, nlh, &dev, &entry, extack);
cfd56754
CW
806 if (err < 0)
807 return err;
808
809 br = netdev_priv(dev);
810
7eea629d
NA
811 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
812 return -EINVAL;
813
1bc844ee
NA
814 if (entry->ifindex != br->dev->ifindex) {
815 pdev = __dev_get_by_index(net, entry->ifindex);
816 if (!pdev)
817 return -ENODEV;
e44deb2f 818
1bc844ee
NA
819 p = br_port_get_rtnl(pdev);
820 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
821 return -EINVAL;
822 vg = nbp_vlan_group(p);
823 } else {
824 vg = br_vlan_group(br);
825 }
e44deb2f 826
f59783f5
NA
827 /* If vlan filtering is enabled and VLAN is not specified
828 * install mdb entry on all vlans configured on the port.
829 */
1f51445a 830 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
2594e906
NA
831 list_for_each_entry(v, &vg->vlan_list, vlist) {
832 entry->vid = v->vid;
7eea629d 833 err = __br_mdb_add(net, br, p, entry);
e44deb2f
SA
834 if (err)
835 break;
e44deb2f
SA
836 }
837 } else {
7eea629d 838 err = __br_mdb_add(net, br, p, entry);
e44deb2f
SA
839 }
840
cfd56754
CW
841 return err;
842}
843
844static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
845{
cfd56754
CW
846 struct net_bridge_mdb_entry *mp;
847 struct net_bridge_port_group *p;
848 struct net_bridge_port_group __rcu **pp;
849 struct br_ip ip;
850 int err = -EINVAL;
851
13cefad2 852 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
cfd56754
CW
853 return -EINVAL;
854
6dd684c0 855 __mdb_entry_to_br_ip(entry, &ip);
cfd56754
CW
856
857 spin_lock_bh(&br->multicast_lock);
19e3a9c9 858 mp = br_mdb_ip_get(br, &ip);
cfd56754
CW
859 if (!mp)
860 goto unlock;
861
1bc844ee
NA
862 /* host leave */
863 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
864 br_multicast_host_leave(mp, false);
865 err = 0;
81f19838 866 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1bc844ee
NA
867 if (!mp->ports && netif_running(br->dev))
868 mod_timer(&mp->timer, jiffies);
869 goto unlock;
870 }
871
cfd56754
CW
872 for (pp = &mp->ports;
873 (p = mlock_dereference(*pp, br)) != NULL;
874 pp = &p->next) {
875 if (!p->port || p->port->dev->ifindex != entry->ifindex)
876 continue;
877
878 if (p->port->state == BR_STATE_DISABLED)
879 goto unlock;
880
681590bd 881 br_multicast_del_pg(mp, p, pp);
cfd56754 882 err = 0;
cfd56754
CW
883 break;
884 }
885
886unlock:
887 spin_unlock_bh(&br->multicast_lock);
888 return err;
889}
890
c21ef3e3
DA
891static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
892 struct netlink_ext_ack *extack)
cfd56754 893{
e44deb2f 894 struct net *net = sock_net(skb->sk);
2594e906 895 struct net_bridge_vlan_group *vg;
1bc844ee 896 struct net_bridge_port *p = NULL;
e44deb2f 897 struct net_device *dev, *pdev;
cfd56754 898 struct br_mdb_entry *entry;
2594e906 899 struct net_bridge_vlan *v;
cfd56754
CW
900 struct net_bridge *br;
901 int err;
902
2ac95dfe 903 err = br_mdb_parse(skb, nlh, &dev, &entry, extack);
cfd56754
CW
904 if (err < 0)
905 return err;
906
907 br = netdev_priv(dev);
908
1bc844ee
NA
909 if (entry->ifindex != br->dev->ifindex) {
910 pdev = __dev_get_by_index(net, entry->ifindex);
911 if (!pdev)
912 return -ENODEV;
e44deb2f 913
1bc844ee
NA
914 p = br_port_get_rtnl(pdev);
915 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
916 return -EINVAL;
917 vg = nbp_vlan_group(p);
918 } else {
919 vg = br_vlan_group(br);
920 }
e44deb2f 921
f59783f5
NA
922 /* If vlan filtering is enabled and VLAN is not specified
923 * delete mdb entry on all vlans configured on the port.
924 */
1f51445a 925 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
2594e906
NA
926 list_for_each_entry(v, &vg->vlan_list, vlist) {
927 entry->vid = v->vid;
e44deb2f 928 err = __br_mdb_del(br, entry);
e44deb2f
SA
929 }
930 } else {
931 err = __br_mdb_del(br, entry);
e44deb2f
SA
932 }
933
cfd56754
CW
934 return err;
935}
936
ee07c6e7
CW
937void br_mdb_init(void)
938{
c1c502b5
FW
939 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
940 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
941 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
ee07c6e7 942}
63233159
VY
943
944void br_mdb_uninit(void)
945{
946 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
947 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
948 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
949}