net: bridge: vlan: enable mcast snooping for existing master vlans
[linux-2.6-block.git] / net / bridge / br_mdb.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ee07c6e7
CW
2#include <linux/err.h>
3#include <linux/igmp.h>
4#include <linux/kernel.h>
5#include <linux/netdevice.h>
6#include <linux/rculist.h>
7#include <linux/skbuff.h>
cfd56754 8#include <linux/if_ether.h>
ee07c6e7
CW
9#include <net/ip.h>
10#include <net/netlink.h>
f1fecb1d 11#include <net/switchdev.h>
ee07c6e7
CW
12#if IS_ENABLED(CONFIG_IPV6)
13#include <net/ipv6.h>
3c3769e6 14#include <net/addrconf.h>
ee07c6e7
CW
15#endif
16
17#include "br_private.h"
18
ff391c5d 19static bool
e04d377f
NA
20br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 unsigned long *timer)
ff391c5d 22{
e04d377f
NA
23 *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 return !hlist_unhashed(&pmctx->ip4_rlist);
ff391c5d
LL
25}
26
27static bool
e04d377f
NA
28br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 unsigned long *timer)
ff391c5d 30{
a3c02e76 31#if IS_ENABLED(CONFIG_IPV6)
e04d377f
NA
32 *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 return !hlist_unhashed(&pmctx->ip6_rlist);
a3c02e76 34#else
ff391c5d
LL
35 *timer = 0;
36 return false;
a3c02e76 37#endif
ff391c5d
LL
38}
39
dc002875
NA
40int br_rports_fill_info(struct sk_buff *skb,
41 const struct net_bridge_mcast *brmctx)
ee07c6e7 42{
e04d377f 43 u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
ff391c5d
LL
44 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
45 unsigned long ip4_timer, ip6_timer;
59f78f9f 46 struct nlattr *nest, *port_nest;
ff391c5d
LL
47 struct net_bridge_port *p;
48
e04d377f 49 if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
ee07c6e7
CW
50 return 0;
51
ae0be8de 52 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
ee07c6e7
CW
53 if (nest == NULL)
54 return -EMSGSIZE;
55
e04d377f
NA
56 list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
57 struct net_bridge_mcast_port *pmctx;
58
59 if (vid) {
60 struct net_bridge_vlan *v;
61
62 v = br_vlan_find(nbp_vlan_group(p), vid);
63 if (!v)
64 continue;
65 pmctx = &v->port_mcast_ctx;
66 } else {
67 pmctx = &p->multicast_ctx;
68 }
69
70 have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
71 have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
ff391c5d
LL
72
73 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
59f78f9f 74 continue;
ff391c5d 75
ae0be8de 76 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
59f78f9f 77 if (!port_nest)
ee07c6e7 78 goto fail;
ff391c5d 79
59f78f9f
NA
80 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
81 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
ff391c5d 82 max(ip4_timer, ip6_timer)) ||
59f78f9f 83 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
9632233e 84 p->multicast_ctx.multicast_router) ||
b7fb0916
LL
85 (have_ip4_mc_rtr &&
86 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
87 ip4_timer)) ||
88 (have_ip6_mc_rtr &&
89 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
dc002875
NA
90 ip6_timer)) ||
91 (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
59f78f9f
NA
92 nla_nest_cancel(skb, port_nest);
93 goto fail;
94 }
95 nla_nest_end(skb, port_nest);
ee07c6e7
CW
96 }
97
98 nla_nest_end(skb, nest);
99 return 0;
100fail:
101 nla_nest_cancel(skb, nest);
102 return -EMSGSIZE;
103}
104
9d06b6d8
ER
105static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
106{
107 e->state = flags & MDB_PG_FLAGS_PERMANENT;
108 e->flags = 0;
109 if (flags & MDB_PG_FLAGS_OFFLOAD)
110 e->flags |= MDB_FLAGS_OFFLOAD;
3247b272
NA
111 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
112 e->flags |= MDB_FLAGS_FAST_LEAVE;
8266a049
NA
113 if (flags & MDB_PG_FLAGS_STAR_EXCL)
114 e->flags |= MDB_FLAGS_STAR_EXCL;
9116ffbf
NA
115 if (flags & MDB_PG_FLAGS_BLOCKED)
116 e->flags |= MDB_FLAGS_BLOCKED;
9d06b6d8
ER
117}
118
88d4bd18
NA
119static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
120 struct nlattr **mdb_attrs)
6dd684c0
ER
121{
122 memset(ip, 0, sizeof(struct br_ip));
123 ip->vid = entry->vid;
124 ip->proto = entry->addr.proto;
88d4bd18
NA
125 switch (ip->proto) {
126 case htons(ETH_P_IP):
eab3227b 127 ip->dst.ip4 = entry->addr.u.ip4;
88d4bd18
NA
128 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
129 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
130 break;
6dd684c0 131#if IS_ENABLED(CONFIG_IPV6)
88d4bd18 132 case htons(ETH_P_IPV6):
eab3227b 133 ip->dst.ip6 = entry->addr.u.ip6;
88d4bd18
NA
134 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
135 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
136 break;
6dd684c0 137#endif
955062b0
NA
138 default:
139 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
88d4bd18
NA
140 }
141
6dd684c0
ER
142}
143
5205e919
NA
144static int __mdb_fill_srcs(struct sk_buff *skb,
145 struct net_bridge_port_group *p)
146{
147 struct net_bridge_group_src *ent;
148 struct nlattr *nest, *nest_ent;
149
150 if (hlist_empty(&p->src_list))
151 return 0;
152
153 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
154 if (!nest)
155 return -EMSGSIZE;
156
157 hlist_for_each_entry_rcu(ent, &p->src_list, node,
085b53c8 158 lockdep_is_held(&p->key.port->br->multicast_lock)) {
5205e919
NA
159 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
160 if (!nest_ent)
161 goto out_cancel_err;
162 switch (ent->addr.proto) {
163 case htons(ETH_P_IP):
164 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
deb96566 165 ent->addr.src.ip4)) {
5205e919
NA
166 nla_nest_cancel(skb, nest_ent);
167 goto out_cancel_err;
168 }
169 break;
170#if IS_ENABLED(CONFIG_IPV6)
171 case htons(ETH_P_IPV6):
172 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
deb96566 173 &ent->addr.src.ip6)) {
5205e919
NA
174 nla_nest_cancel(skb, nest_ent);
175 goto out_cancel_err;
176 }
177 break;
178#endif
179 default:
180 nla_nest_cancel(skb, nest_ent);
181 continue;
182 }
183 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
184 br_timer_value(&ent->timer))) {
185 nla_nest_cancel(skb, nest_ent);
186 goto out_cancel_err;
187 }
188 nla_nest_end(skb, nest_ent);
189 }
190
191 nla_nest_end(skb, nest);
192
193 return 0;
194
195out_cancel_err:
196 nla_nest_cancel(skb, nest);
197 return -EMSGSIZE;
198}
199
6545916e 200static int __mdb_fill_info(struct sk_buff *skb,
e77b0c84 201 struct net_bridge_mdb_entry *mp,
6545916e
NA
202 struct net_bridge_port_group *p)
203{
5205e919 204 bool dump_srcs_mode = false;
e77b0c84 205 struct timer_list *mtimer;
6545916e
NA
206 struct nlattr *nest_ent;
207 struct br_mdb_entry e;
e77b0c84
NA
208 u8 flags = 0;
209 int ifindex;
6545916e
NA
210
211 memset(&e, 0, sizeof(e));
e77b0c84 212 if (p) {
085b53c8 213 ifindex = p->key.port->dev->ifindex;
e77b0c84
NA
214 mtimer = &p->timer;
215 flags = p->flags;
216 } else {
217 ifindex = mp->br->dev->ifindex;
218 mtimer = &mp->timer;
219 }
220
221 __mdb_entry_fill_flags(&e, flags);
222 e.ifindex = ifindex;
223 e.vid = mp->addr.vid;
224 if (mp->addr.proto == htons(ETH_P_IP))
eab3227b 225 e.addr.u.ip4 = mp->addr.dst.ip4;
6545916e 226#if IS_ENABLED(CONFIG_IPV6)
955062b0 227 else if (mp->addr.proto == htons(ETH_P_IPV6))
eab3227b 228 e.addr.u.ip6 = mp->addr.dst.ip6;
6545916e 229#endif
955062b0
NA
230 else
231 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
e77b0c84 232 e.addr.proto = mp->addr.proto;
6545916e
NA
233 nest_ent = nla_nest_start_noflag(skb,
234 MDBA_MDB_ENTRY_INFO);
235 if (!nest_ent)
236 return -EMSGSIZE;
237
238 if (nla_put_nohdr(skb, sizeof(e), &e) ||
239 nla_put_u32(skb,
240 MDBA_MDB_EATTR_TIMER,
88d4bd18
NA
241 br_timer_value(mtimer)))
242 goto nest_err;
8f8cb77e 243
5205e919
NA
244 switch (mp->addr.proto) {
245 case htons(ETH_P_IP):
d3d065c0 246 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
88d4bd18
NA
247 if (mp->addr.src.ip4) {
248 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
249 mp->addr.src.ip4))
250 goto nest_err;
251 break;
252 }
5205e919
NA
253 break;
254#if IS_ENABLED(CONFIG_IPV6)
255 case htons(ETH_P_IPV6):
d3d065c0 256 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
88d4bd18
NA
257 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
258 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
259 &mp->addr.src.ip6))
260 goto nest_err;
261 break;
262 }
5205e919
NA
263 break;
264#endif
955062b0
NA
265 default:
266 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
5205e919 267 }
8f8cb77e
NA
268 if (p) {
269 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
270 goto nest_err;
271 if (dump_srcs_mode &&
272 (__mdb_fill_srcs(skb, p) ||
273 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
274 p->filter_mode)))
275 goto nest_err;
276 }
6545916e
NA
277 nla_nest_end(skb, nest_ent);
278
279 return 0;
88d4bd18
NA
280
281nest_err:
282 nla_nest_cancel(skb, nest_ent);
283 return -EMSGSIZE;
6545916e
NA
284}
285
ee07c6e7
CW
286static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
287 struct net_device *dev)
288{
5205e919 289 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
ee07c6e7 290 struct net_bridge *br = netdev_priv(dev);
19e3a9c9 291 struct net_bridge_mdb_entry *mp;
ee07c6e7 292 struct nlattr *nest, *nest2;
ee07c6e7 293
13cefad2 294 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
ee07c6e7
CW
295 return 0;
296
ae0be8de 297 nest = nla_nest_start_noflag(skb, MDBA_MDB);
ee07c6e7
CW
298 if (nest == NULL)
299 return -EMSGSIZE;
300
19e3a9c9 301 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
762a3d89 302 struct net_bridge_port_group *p;
303 struct net_bridge_port_group __rcu **pp;
ee07c6e7 304
19e3a9c9
NA
305 if (idx < s_idx)
306 goto skip;
ee07c6e7 307
ae0be8de 308 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
19e3a9c9
NA
309 if (!nest2) {
310 err = -EMSGSIZE;
311 break;
312 }
ee07c6e7 313
5205e919 314 if (!s_pidx && mp->host_joined) {
e77b0c84
NA
315 err = __mdb_fill_info(skb, mp, NULL);
316 if (err) {
317 nla_nest_cancel(skb, nest2);
318 break;
319 }
320 }
321
19e3a9c9
NA
322 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
323 pp = &p->next) {
085b53c8 324 if (!p->key.port)
19e3a9c9 325 continue;
5205e919
NA
326 if (pidx < s_pidx)
327 goto skip_pg;
19e3a9c9 328
e77b0c84 329 err = __mdb_fill_info(skb, mp, p);
6545916e 330 if (err) {
12913f74 331 nla_nest_end(skb, nest2);
19e3a9c9
NA
332 goto out;
333 }
5205e919
NA
334skip_pg:
335 pidx++;
ee07c6e7 336 }
5205e919
NA
337 pidx = 0;
338 s_pidx = 0;
19e3a9c9
NA
339 nla_nest_end(skb, nest2);
340skip:
341 idx++;
ee07c6e7
CW
342 }
343
344out:
345 cb->args[1] = idx;
5205e919 346 cb->args[2] = pidx;
ee07c6e7
CW
347 nla_nest_end(skb, nest);
348 return err;
349}
350
c77b9364
DA
351static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
352 struct netlink_ext_ack *extack)
353{
354 struct br_port_msg *bpm;
355
356 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
357 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
358 return -EINVAL;
359 }
360
361 bpm = nlmsg_data(nlh);
362 if (bpm->ifindex) {
363 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
364 return -EINVAL;
365 }
366 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
367 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
368 return -EINVAL;
369 }
370
371 return 0;
372}
373
ee07c6e7
CW
374static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
375{
376 struct net_device *dev;
377 struct net *net = sock_net(skb->sk);
378 struct nlmsghdr *nlh = NULL;
379 int idx = 0, s_idx;
380
c77b9364
DA
381 if (cb->strict_check) {
382 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
383
384 if (err < 0)
385 return err;
386 }
387
ee07c6e7
CW
388 s_idx = cb->args[0];
389
390 rcu_read_lock();
391
19e3a9c9 392 cb->seq = net->dev_base_seq;
ee07c6e7
CW
393
394 for_each_netdev_rcu(net, dev) {
395 if (dev->priv_flags & IFF_EBRIDGE) {
e04d377f 396 struct net_bridge *br = netdev_priv(dev);
ee07c6e7
CW
397 struct br_port_msg *bpm;
398
399 if (idx < s_idx)
400 goto skip;
401
402 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
403 cb->nlh->nlmsg_seq, RTM_GETMDB,
404 sizeof(*bpm), NLM_F_MULTI);
405 if (nlh == NULL)
406 break;
407
408 bpm = nlmsg_data(nlh);
c085c499 409 memset(bpm, 0, sizeof(*bpm));
ee07c6e7
CW
410 bpm->ifindex = dev->ifindex;
411 if (br_mdb_fill_info(skb, cb, dev) < 0)
412 goto out;
e04d377f 413 if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
ee07c6e7
CW
414 goto out;
415
416 cb->args[1] = 0;
417 nlmsg_end(skb, nlh);
418 skip:
419 idx++;
420 }
421 }
422
423out:
424 if (nlh)
425 nlmsg_end(skb, nlh);
426 rcu_read_unlock();
427 cb->args[0] = idx;
428 return skb->len;
429}
430
37a393bc
CW
431static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
432 struct net_device *dev,
81f19838
NA
433 struct net_bridge_mdb_entry *mp,
434 struct net_bridge_port_group *pg,
435 int type)
37a393bc
CW
436{
437 struct nlmsghdr *nlh;
438 struct br_port_msg *bpm;
439 struct nlattr *nest, *nest2;
440
81f19838 441 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
37a393bc
CW
442 if (!nlh)
443 return -EMSGSIZE;
444
445 bpm = nlmsg_data(nlh);
c085c499 446 memset(bpm, 0, sizeof(*bpm));
37a393bc
CW
447 bpm->family = AF_BRIDGE;
448 bpm->ifindex = dev->ifindex;
ae0be8de 449 nest = nla_nest_start_noflag(skb, MDBA_MDB);
37a393bc
CW
450 if (nest == NULL)
451 goto cancel;
ae0be8de 452 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
37a393bc
CW
453 if (nest2 == NULL)
454 goto end;
455
81f19838 456 if (__mdb_fill_info(skb, mp, pg))
37a393bc
CW
457 goto end;
458
459 nla_nest_end(skb, nest2);
460 nla_nest_end(skb, nest);
053c095a
JB
461 nlmsg_end(skb, nlh);
462 return 0;
37a393bc
CW
463
464end:
465 nla_nest_end(skb, nest);
466cancel:
467 nlmsg_cancel(skb, nlh);
468 return -EMSGSIZE;
469}
470
81f19838 471static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
37a393bc 472{
81f19838
NA
473 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
474 nla_total_size(sizeof(struct br_mdb_entry)) +
475 nla_total_size(sizeof(u32));
476 struct net_bridge_group_src *ent;
477 size_t addr_size = 0;
478
479 if (!pg)
480 goto out;
481
8f8cb77e
NA
482 /* MDBA_MDB_EATTR_RTPROT */
483 nlmsg_size += nla_total_size(sizeof(u8));
484
085b53c8 485 switch (pg->key.addr.proto) {
81f19838 486 case htons(ETH_P_IP):
88d4bd18 487 /* MDBA_MDB_EATTR_SOURCE */
085b53c8 488 if (pg->key.addr.src.ip4)
88d4bd18 489 nlmsg_size += nla_total_size(sizeof(__be32));
d3d065c0 490 if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
81f19838
NA
491 goto out;
492 addr_size = sizeof(__be32);
493 break;
494#if IS_ENABLED(CONFIG_IPV6)
495 case htons(ETH_P_IPV6):
88d4bd18 496 /* MDBA_MDB_EATTR_SOURCE */
085b53c8 497 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
88d4bd18 498 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
d3d065c0 499 if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
81f19838
NA
500 goto out;
501 addr_size = sizeof(struct in6_addr);
502 break;
503#endif
504 }
505
506 /* MDBA_MDB_EATTR_GROUP_MODE */
507 nlmsg_size += nla_total_size(sizeof(u8));
508
509 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
510 if (!hlist_empty(&pg->src_list))
511 nlmsg_size += nla_total_size(0);
512
513 hlist_for_each_entry(ent, &pg->src_list, node) {
514 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
515 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
516 */
517 nlmsg_size += nla_total_size(0) +
518 nla_total_size(addr_size) +
519 nla_total_size(sizeof(u32));
520 }
521out:
522 return nlmsg_size;
37a393bc
CW
523}
524
45ebcce5
ER
525struct br_mdb_complete_info {
526 struct net_bridge_port *port;
527 struct br_ip ip;
528};
529
530static void br_mdb_complete(struct net_device *dev, int err, void *priv)
37a393bc 531{
45ebcce5
ER
532 struct br_mdb_complete_info *data = priv;
533 struct net_bridge_port_group __rcu **pp;
534 struct net_bridge_port_group *p;
45ebcce5
ER
535 struct net_bridge_mdb_entry *mp;
536 struct net_bridge_port *port = data->port;
537 struct net_bridge *br = port->br;
538
539 if (err)
540 goto err;
541
542 spin_lock_bh(&br->multicast_lock);
19e3a9c9 543 mp = br_mdb_ip_get(br, &data->ip);
45ebcce5
ER
544 if (!mp)
545 goto out;
546 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
547 pp = &p->next) {
085b53c8 548 if (p->key.port != port)
45ebcce5
ER
549 continue;
550 p->flags |= MDB_PG_FLAGS_OFFLOAD;
551 }
552out:
553 spin_unlock_bh(&br->multicast_lock);
554err:
555 kfree(priv);
556}
557
4f2673b3
VO
558static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
559 const struct net_bridge_mdb_entry *mp)
560{
561 if (mp->addr.proto == htons(ETH_P_IP))
562 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
563#if IS_ENABLED(CONFIG_IPV6)
564 else if (mp->addr.proto == htons(ETH_P_IPV6))
565 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
566#endif
567 else
568 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
569
570 mdb->vid = mp->addr.vid;
571}
572
573static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
bdf123b4 574 const struct switchdev_obj_port_mdb *mdb,
7e8c1858
VO
575 unsigned long action, const void *ctx,
576 struct netlink_ext_ack *extack)
4f2673b3
VO
577{
578 struct switchdev_notifier_port_obj_info obj_info = {
579 .info = {
580 .dev = dev,
581 .extack = extack,
0d2cfbd4 582 .ctx = ctx,
4f2673b3
VO
583 },
584 .obj = &mdb->obj,
585 };
586 int err;
587
7e8c1858 588 err = nb->notifier_call(nb, action, &obj_info);
4f2673b3
VO
589 return notifier_to_errno(err);
590}
591
592static int br_mdb_queue_one(struct list_head *mdb_list,
593 enum switchdev_obj_id id,
594 const struct net_bridge_mdb_entry *mp,
595 struct net_device *orig_dev)
596{
597 struct switchdev_obj_port_mdb *mdb;
598
599 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
600 if (!mdb)
601 return -ENOMEM;
602
603 mdb->obj.id = id;
604 mdb->obj.orig_dev = orig_dev;
605 br_switchdev_mdb_populate(mdb, mp);
606 list_add_tail(&mdb->obj.list, mdb_list);
607
608 return 0;
609}
610
611int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
7e8c1858 612 const void *ctx, bool adding, struct notifier_block *nb,
0d2cfbd4 613 struct netlink_ext_ack *extack)
4f2673b3 614{
bdf123b4 615 const struct net_bridge_mdb_entry *mp;
4f2673b3
VO
616 struct switchdev_obj *obj, *tmp;
617 struct net_bridge *br;
7e8c1858 618 unsigned long action;
4f2673b3
VO
619 LIST_HEAD(mdb_list);
620 int err = 0;
621
622 ASSERT_RTNL();
623
7105b50b
VO
624 if (!nb)
625 return 0;
626
4f2673b3
VO
627 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
628 return -EINVAL;
629
630 br = netdev_priv(br_dev);
631
632 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
633 return 0;
634
635 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
636 * because the write-side protection is br->multicast_lock. But we
637 * need to emulate the [ blocking ] calling context of a regular
638 * switchdev event, so since both br->multicast_lock and RCU read side
639 * critical sections are atomic, we have no choice but to pick the RCU
640 * read side lock, queue up all our events, leave the critical section
641 * and notify switchdev from blocking context.
642 */
643 rcu_read_lock();
644
645 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
bdf123b4
VO
646 struct net_bridge_port_group __rcu * const *pp;
647 const struct net_bridge_port_group *p;
4f2673b3
VO
648
649 if (mp->host_joined) {
650 err = br_mdb_queue_one(&mdb_list,
651 SWITCHDEV_OBJ_ID_HOST_MDB,
652 mp, br_dev);
653 if (err) {
654 rcu_read_unlock();
655 goto out_free_mdb;
656 }
657 }
658
659 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
660 pp = &p->next) {
661 if (p->key.port->dev != dev)
662 continue;
663
664 err = br_mdb_queue_one(&mdb_list,
665 SWITCHDEV_OBJ_ID_PORT_MDB,
666 mp, dev);
667 if (err) {
668 rcu_read_unlock();
669 goto out_free_mdb;
670 }
671 }
672 }
673
674 rcu_read_unlock();
675
7e8c1858
VO
676 if (adding)
677 action = SWITCHDEV_PORT_OBJ_ADD;
678 else
679 action = SWITCHDEV_PORT_OBJ_DEL;
680
4f2673b3
VO
681 list_for_each_entry(obj, &mdb_list, list) {
682 err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
7e8c1858 683 action, ctx, extack);
4f2673b3
VO
684 if (err)
685 goto out_free_mdb;
686 }
687
688out_free_mdb:
689 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
690 list_del(&obj->list);
691 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
692 }
693
694 return err;
695}
4f2673b3 696
47d5b6db
AL
697static void br_mdb_switchdev_host_port(struct net_device *dev,
698 struct net_device *lower_dev,
81f19838
NA
699 struct net_bridge_mdb_entry *mp,
700 int type)
47d5b6db
AL
701{
702 struct switchdev_obj_port_mdb mdb = {
703 .obj = {
704 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
705 .flags = SWITCHDEV_F_DEFER,
4f2673b3 706 .orig_dev = dev,
47d5b6db 707 },
47d5b6db
AL
708 };
709
4f2673b3 710 br_switchdev_mdb_populate(&mdb, mp);
47d5b6db 711
47d5b6db
AL
712 switch (type) {
713 case RTM_NEWMDB:
69b7320e 714 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
47d5b6db
AL
715 break;
716 case RTM_DELMDB:
717 switchdev_port_obj_del(lower_dev, &mdb.obj);
718 break;
719 }
720}
721
722static void br_mdb_switchdev_host(struct net_device *dev,
81f19838 723 struct net_bridge_mdb_entry *mp, int type)
47d5b6db
AL
724{
725 struct net_device *lower_dev;
726 struct list_head *iter;
727
728 netdev_for_each_lower_dev(dev, lower_dev, iter)
81f19838 729 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
47d5b6db
AL
730}
731
81f19838
NA
732void br_mdb_notify(struct net_device *dev,
733 struct net_bridge_mdb_entry *mp,
734 struct net_bridge_port_group *pg,
735 int type)
45ebcce5
ER
736{
737 struct br_mdb_complete_info *complete_info;
f1fecb1d
ER
738 struct switchdev_obj_port_mdb mdb = {
739 .obj = {
740 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
741 .flags = SWITCHDEV_F_DEFER,
742 },
f1fecb1d 743 };
37a393bc
CW
744 struct net *net = dev_net(dev);
745 struct sk_buff *skb;
746 int err = -ENOBUFS;
747
81f19838 748 if (pg) {
4f2673b3 749 br_switchdev_mdb_populate(&mdb, mp);
955062b0 750
085b53c8 751 mdb.obj.orig_dev = pg->key.port->dev;
81f19838
NA
752 switch (type) {
753 case RTM_NEWMDB:
754 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
755 if (!complete_info)
756 break;
085b53c8 757 complete_info->port = pg->key.port;
81f19838 758 complete_info->ip = mp->addr;
45ebcce5
ER
759 mdb.obj.complete_priv = complete_info;
760 mdb.obj.complete = br_mdb_complete;
085b53c8 761 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
1bfb1596 762 kfree(complete_info);
81f19838
NA
763 break;
764 case RTM_DELMDB:
085b53c8 765 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
81f19838 766 break;
45ebcce5 767 }
81f19838
NA
768 } else {
769 br_mdb_switchdev_host(dev, mp, type);
9e8430f8 770 }
f1fecb1d 771
81f19838 772 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
37a393bc
CW
773 if (!skb)
774 goto errout;
775
81f19838 776 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
37a393bc
CW
777 if (err < 0) {
778 kfree_skb(skb);
779 goto errout;
780 }
781
782 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
783 return;
784errout:
785 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
786}
787
949f1e39
SA
788static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
789 struct net_device *dev,
1e9ca456 790 int ifindex, u16 vid, u32 pid,
949f1e39
SA
791 u32 seq, int type, unsigned int flags)
792{
1e9ca456 793 struct nlattr *nest, *port_nest;
949f1e39
SA
794 struct br_port_msg *bpm;
795 struct nlmsghdr *nlh;
949f1e39 796
94a72b3f 797 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
949f1e39
SA
798 if (!nlh)
799 return -EMSGSIZE;
800
801 bpm = nlmsg_data(nlh);
802 memset(bpm, 0, sizeof(*bpm));
803 bpm->family = AF_BRIDGE;
804 bpm->ifindex = dev->ifindex;
ae0be8de 805 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
949f1e39
SA
806 if (!nest)
807 goto cancel;
808
1e9ca456
NA
809 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
810 if (!port_nest)
811 goto end;
812 if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
813 nla_nest_cancel(skb, port_nest);
949f1e39 814 goto end;
1e9ca456
NA
815 }
816 if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
817 nla_nest_cancel(skb, port_nest);
818 goto end;
819 }
820 nla_nest_end(skb, port_nest);
949f1e39
SA
821
822 nla_nest_end(skb, nest);
823 nlmsg_end(skb, nlh);
824 return 0;
825
826end:
827 nla_nest_end(skb, nest);
828cancel:
829 nlmsg_cancel(skb, nlh);
830 return -EMSGSIZE;
831}
832
833static inline size_t rtnl_rtr_nlmsg_size(void)
834{
835 return NLMSG_ALIGN(sizeof(struct br_port_msg))
1e9ca456
NA
836 + nla_total_size(sizeof(__u32))
837 + nla_total_size(sizeof(u16));
949f1e39
SA
838}
839
1e9ca456 840void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
949f1e39
SA
841 int type)
842{
843 struct net *net = dev_net(dev);
844 struct sk_buff *skb;
845 int err = -ENOBUFS;
846 int ifindex;
1e9ca456 847 u16 vid;
949f1e39 848
1e9ca456
NA
849 ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
850 vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
851 0;
949f1e39
SA
852 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
853 if (!skb)
854 goto errout;
855
1e9ca456
NA
856 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
857 NTF_SELF);
949f1e39
SA
858 if (err < 0) {
859 kfree_skb(skb);
860 goto errout;
861 }
862
863 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
864 return;
865
866errout:
867 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
868}
869
2ac95dfe
NA
870static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
871 struct netlink_ext_ack *extack)
cfd56754 872{
2ac95dfe
NA
873 if (entry->ifindex == 0) {
874 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
cfd56754 875 return false;
2ac95dfe 876 }
cfd56754
CW
877
878 if (entry->addr.proto == htons(ETH_P_IP)) {
2ac95dfe
NA
879 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
880 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
cfd56754 881 return false;
2ac95dfe
NA
882 }
883 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
884 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
cfd56754 885 return false;
2ac95dfe 886 }
cfd56754
CW
887#if IS_ENABLED(CONFIG_IPV6)
888 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
2ac95dfe
NA
889 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
890 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
cfd56754 891 return false;
2ac95dfe 892 }
cfd56754 893#endif
955062b0
NA
894 } else if (entry->addr.proto == 0) {
895 /* L2 mdb */
896 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
897 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
898 return false;
899 }
2ac95dfe
NA
900 } else {
901 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
cfd56754 902 return false;
2ac95dfe
NA
903 }
904
905 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
906 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
ccb1c31a 907 return false;
2ac95dfe
NA
908 }
909 if (entry->vid >= VLAN_VID_MASK) {
910 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
74fe61f1 911 return false;
2ac95dfe 912 }
cfd56754
CW
913
914 return true;
915}
916
88d4bd18
NA
917static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
918 struct netlink_ext_ack *extack)
919{
920 switch (proto) {
921 case htons(ETH_P_IP):
922 if (nla_len(attr) != sizeof(struct in_addr)) {
923 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
924 return false;
925 }
926 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
927 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
928 return false;
929 }
930 break;
931#if IS_ENABLED(CONFIG_IPV6)
932 case htons(ETH_P_IPV6): {
933 struct in6_addr src;
934
935 if (nla_len(attr) != sizeof(struct in6_addr)) {
936 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
937 return false;
938 }
939 src = nla_get_in6_addr(attr);
940 if (ipv6_addr_is_multicast(&src)) {
941 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
942 return false;
943 }
944 break;
945 }
946#endif
947 default:
948 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
949 return false;
950 }
951
952 return true;
953}
954
9c4258c7 955static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
88d4bd18
NA
956 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
957 sizeof(struct in_addr),
958 sizeof(struct in6_addr)),
9c4258c7
NA
959};
960
cfd56754 961static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
2ac95dfe 962 struct net_device **pdev, struct br_mdb_entry **pentry,
9c4258c7 963 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
cfd56754
CW
964{
965 struct net *net = sock_net(skb->sk);
966 struct br_mdb_entry *entry;
967 struct br_port_msg *bpm;
968 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
969 struct net_device *dev;
970 int err;
971
8cb08174
JB
972 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
973 MDBA_SET_ENTRY_MAX, NULL, NULL);
cfd56754
CW
974 if (err < 0)
975 return err;
976
977 bpm = nlmsg_data(nlh);
978 if (bpm->ifindex == 0) {
2ac95dfe 979 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
cfd56754
CW
980 return -EINVAL;
981 }
982
983 dev = __dev_get_by_index(net, bpm->ifindex);
984 if (dev == NULL) {
2ac95dfe 985 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
cfd56754
CW
986 return -ENODEV;
987 }
988
989 if (!(dev->priv_flags & IFF_EBRIDGE)) {
2ac95dfe 990 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
cfd56754
CW
991 return -EOPNOTSUPP;
992 }
993
994 *pdev = dev;
995
2ac95dfe
NA
996 if (!tb[MDBA_SET_ENTRY]) {
997 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
cfd56754
CW
998 return -EINVAL;
999 }
2ac95dfe
NA
1000 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
1001 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
cfd56754
CW
1002 return -EINVAL;
1003 }
1004
2ac95dfe
NA
1005 entry = nla_data(tb[MDBA_SET_ENTRY]);
1006 if (!is_valid_mdb_entry(entry, extack))
1007 return -EINVAL;
cfd56754 1008 *pentry = entry;
2ac95dfe 1009
9c4258c7
NA
1010 if (tb[MDBA_SET_ENTRY_ATTRS]) {
1011 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
1012 tb[MDBA_SET_ENTRY_ATTRS],
1013 br_mdbe_attrs_pol, extack);
1014 if (err)
1015 return err;
88d4bd18
NA
1016 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1017 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1018 entry->addr.proto, extack))
1019 return -EINVAL;
9c4258c7
NA
1020 } else {
1021 memset(mdb_attrs, 0,
1022 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
1023 }
1024
cfd56754
CW
1025 return 0;
1026}
1027
6567cb43
NA
1028static struct net_bridge_mcast *
1029__br_mdb_choose_context(struct net_bridge *br,
1030 const struct br_mdb_entry *entry,
1031 struct netlink_ext_ack *extack)
1032{
1033 struct net_bridge_mcast *brmctx = NULL;
1034 struct net_bridge_vlan *v;
1035
1036 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
1037 brmctx = &br->multicast_ctx;
1038 goto out;
1039 }
1040
1041 if (!entry->vid) {
1042 NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
1043 goto out;
1044 }
1045
1046 v = br_vlan_find(br_vlan_group(br), entry->vid);
1047 if (!v) {
1048 NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
1049 goto out;
1050 }
1051 if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
1052 NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
1053 goto out;
1054 }
1055 brmctx = &v->br_mcast_ctx;
1056out:
1057 return brmctx;
1058}
1059
cfd56754 1060static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
8f8cb77e
NA
1061 struct br_mdb_entry *entry,
1062 struct nlattr **mdb_attrs,
83f7398e 1063 struct netlink_ext_ack *extack)
cfd56754 1064{
8266a049 1065 struct net_bridge_mdb_entry *mp, *star_mp;
cfd56754 1066 struct net_bridge_port_group __rcu **pp;
6567cb43
NA
1067 struct net_bridge_port_group *p;
1068 struct net_bridge_mcast *brmctx;
8266a049 1069 struct br_ip group, star_group;
f7e2965d 1070 unsigned long now = jiffies;
0e761ac0 1071 unsigned char flags = 0;
88d4bd18 1072 u8 filter_mode;
cfd56754
CW
1073 int err;
1074
8f8cb77e
NA
1075 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
1076
6567cb43
NA
1077 brmctx = __br_mdb_choose_context(br, entry, extack);
1078 if (!brmctx)
1079 return -EINVAL;
1080
88d4bd18
NA
1081 /* host join errors which can happen before creating the group */
1082 if (!port) {
1083 /* don't allow any flags for host-joined groups */
1084 if (entry->state) {
1085 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1086 return -EINVAL;
1087 }
8f8cb77e 1088 if (!br_multicast_is_star_g(&group)) {
88d4bd18
NA
1089 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1090 return -EINVAL;
1091 }
1092 }
1093
955062b0
NA
1094 if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
1095 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1096 return -EINVAL;
1097 }
1098
8f8cb77e 1099 mp = br_mdb_ip_get(br, &group);
cfd56754 1100 if (!mp) {
8f8cb77e 1101 mp = br_multicast_new_group(br, &group);
56bb7fd9
AB
1102 err = PTR_ERR_OR_ZERO(mp);
1103 if (err)
cfd56754
CW
1104 return err;
1105 }
1106
1bc844ee
NA
1107 /* host join */
1108 if (!port) {
83f7398e
NA
1109 if (mp->host_joined) {
1110 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1bc844ee 1111 return -EEXIST;
83f7398e 1112 }
1bc844ee 1113
58d913a3 1114 br_multicast_host_join(brmctx, mp, false);
81f19838 1115 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1bc844ee
NA
1116
1117 return 0;
1118 }
1119
cfd56754
CW
1120 for (pp = &mp->ports;
1121 (p = mlock_dereference(*pp, br)) != NULL;
1122 pp = &p->next) {
085b53c8 1123 if (p->key.port == port) {
83f7398e 1124 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
cfd56754 1125 return -EEXIST;
83f7398e 1126 }
085b53c8 1127 if ((unsigned long)p->key.port < (unsigned long)port)
cfd56754
CW
1128 break;
1129 }
1130
8f8cb77e
NA
1131 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1132 MCAST_INCLUDE;
88d4bd18 1133
0e761ac0
VO
1134 if (entry->state == MDB_PERMANENT)
1135 flags |= MDB_PG_FLAGS_PERMANENT;
1136
1137 p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
8f8cb77e 1138 filter_mode, RTPROT_STATIC);
83f7398e
NA
1139 if (unlikely(!p)) {
1140 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
cfd56754 1141 return -ENOMEM;
83f7398e 1142 }
cfd56754 1143 rcu_assign_pointer(*pp, p);
79abc875 1144 if (entry->state == MDB_TEMPORARY)
d3d065c0 1145 mod_timer(&p->timer,
6567cb43 1146 now + brmctx->multicast_membership_interval);
81f19838 1147 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
8266a049
NA
1148 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1149 * added to all S,G entries for proper replication, if we are adding
1150 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1151 * added to it for proper replication
1152 */
6567cb43 1153 if (br_multicast_should_handle_mode(brmctx, group.proto)) {
8266a049
NA
1154 switch (filter_mode) {
1155 case MCAST_EXCLUDE:
1156 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1157 break;
1158 case MCAST_INCLUDE:
1159 star_group = p->key.addr;
1160 memset(&star_group.src, 0, sizeof(star_group.src));
1161 star_mp = br_mdb_ip_get(br, &star_group);
1162 if (star_mp)
1163 br_multicast_sg_add_exclude_ports(star_mp, p);
1164 break;
1165 }
1166 }
cfd56754 1167
cfd56754
CW
1168 return 0;
1169}
1170
1171static int __br_mdb_add(struct net *net, struct net_bridge *br,
7eea629d 1172 struct net_bridge_port *p,
83f7398e 1173 struct br_mdb_entry *entry,
88d4bd18 1174 struct nlattr **mdb_attrs,
83f7398e 1175 struct netlink_ext_ack *extack)
cfd56754 1176{
cfd56754
CW
1177 int ret;
1178
cfd56754 1179 spin_lock_bh(&br->multicast_lock);
8f8cb77e 1180 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
cfd56754 1181 spin_unlock_bh(&br->multicast_lock);
7eea629d 1182
cfd56754
CW
1183 return ret;
1184}
1185
c21ef3e3
DA
1186static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1187 struct netlink_ext_ack *extack)
cfd56754 1188{
9c4258c7 1189 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
cfd56754 1190 struct net *net = sock_net(skb->sk);
2594e906 1191 struct net_bridge_vlan_group *vg;
1bc844ee 1192 struct net_bridge_port *p = NULL;
e44deb2f 1193 struct net_device *dev, *pdev;
cfd56754 1194 struct br_mdb_entry *entry;
2594e906 1195 struct net_bridge_vlan *v;
cfd56754
CW
1196 struct net_bridge *br;
1197 int err;
1198
9c4258c7 1199 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
cfd56754
CW
1200 if (err < 0)
1201 return err;
1202
1203 br = netdev_priv(dev);
1204
83f7398e
NA
1205 if (!netif_running(br->dev)) {
1206 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
7eea629d 1207 return -EINVAL;
83f7398e
NA
1208 }
1209
1210 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1211 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1212 return -EINVAL;
1213 }
7eea629d 1214
1bc844ee
NA
1215 if (entry->ifindex != br->dev->ifindex) {
1216 pdev = __dev_get_by_index(net, entry->ifindex);
83f7398e
NA
1217 if (!pdev) {
1218 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1bc844ee 1219 return -ENODEV;
83f7398e 1220 }
e44deb2f 1221
1bc844ee 1222 p = br_port_get_rtnl(pdev);
83f7398e
NA
1223 if (!p) {
1224 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1225 return -EINVAL;
1226 }
1227
1228 if (p->br != br) {
1229 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1230 return -EINVAL;
1231 }
1232 if (p->state == BR_STATE_DISABLED) {
1233 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1bc844ee 1234 return -EINVAL;
83f7398e 1235 }
1bc844ee
NA
1236 vg = nbp_vlan_group(p);
1237 } else {
1238 vg = br_vlan_group(br);
1239 }
e44deb2f 1240
f59783f5
NA
1241 /* If vlan filtering is enabled and VLAN is not specified
1242 * install mdb entry on all vlans configured on the port.
1243 */
1f51445a 1244 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
2594e906
NA
1245 list_for_each_entry(v, &vg->vlan_list, vlist) {
1246 entry->vid = v->vid;
88d4bd18 1247 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
e44deb2f
SA
1248 if (err)
1249 break;
e44deb2f
SA
1250 }
1251 } else {
88d4bd18 1252 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
e44deb2f
SA
1253 }
1254
cfd56754
CW
1255 return err;
1256}
1257
88d4bd18
NA
1258static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1259 struct nlattr **mdb_attrs)
cfd56754 1260{
cfd56754
CW
1261 struct net_bridge_mdb_entry *mp;
1262 struct net_bridge_port_group *p;
1263 struct net_bridge_port_group __rcu **pp;
1264 struct br_ip ip;
1265 int err = -EINVAL;
1266
13cefad2 1267 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
cfd56754
CW
1268 return -EINVAL;
1269
88d4bd18 1270 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
cfd56754
CW
1271
1272 spin_lock_bh(&br->multicast_lock);
19e3a9c9 1273 mp = br_mdb_ip_get(br, &ip);
cfd56754
CW
1274 if (!mp)
1275 goto unlock;
1276
1bc844ee
NA
1277 /* host leave */
1278 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1279 br_multicast_host_leave(mp, false);
1280 err = 0;
81f19838 1281 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1bc844ee
NA
1282 if (!mp->ports && netif_running(br->dev))
1283 mod_timer(&mp->timer, jiffies);
1284 goto unlock;
1285 }
1286
cfd56754
CW
1287 for (pp = &mp->ports;
1288 (p = mlock_dereference(*pp, br)) != NULL;
1289 pp = &p->next) {
085b53c8 1290 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
cfd56754
CW
1291 continue;
1292
085b53c8 1293 if (p->key.port->state == BR_STATE_DISABLED)
cfd56754
CW
1294 goto unlock;
1295
681590bd 1296 br_multicast_del_pg(mp, p, pp);
cfd56754 1297 err = 0;
cfd56754
CW
1298 break;
1299 }
1300
1301unlock:
1302 spin_unlock_bh(&br->multicast_lock);
1303 return err;
1304}
1305
c21ef3e3
DA
1306static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1307 struct netlink_ext_ack *extack)
cfd56754 1308{
9c4258c7 1309 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
e44deb2f 1310 struct net *net = sock_net(skb->sk);
2594e906 1311 struct net_bridge_vlan_group *vg;
1bc844ee 1312 struct net_bridge_port *p = NULL;
e44deb2f 1313 struct net_device *dev, *pdev;
cfd56754 1314 struct br_mdb_entry *entry;
2594e906 1315 struct net_bridge_vlan *v;
cfd56754
CW
1316 struct net_bridge *br;
1317 int err;
1318
9c4258c7 1319 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
cfd56754
CW
1320 if (err < 0)
1321 return err;
1322
1323 br = netdev_priv(dev);
1324
1bc844ee
NA
1325 if (entry->ifindex != br->dev->ifindex) {
1326 pdev = __dev_get_by_index(net, entry->ifindex);
1327 if (!pdev)
1328 return -ENODEV;
e44deb2f 1329
1bc844ee
NA
1330 p = br_port_get_rtnl(pdev);
1331 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1332 return -EINVAL;
1333 vg = nbp_vlan_group(p);
1334 } else {
1335 vg = br_vlan_group(br);
1336 }
e44deb2f 1337
f59783f5
NA
1338 /* If vlan filtering is enabled and VLAN is not specified
1339 * delete mdb entry on all vlans configured on the port.
1340 */
1f51445a 1341 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
2594e906
NA
1342 list_for_each_entry(v, &vg->vlan_list, vlist) {
1343 entry->vid = v->vid;
88d4bd18 1344 err = __br_mdb_del(br, entry, mdb_attrs);
e44deb2f
SA
1345 }
1346 } else {
88d4bd18 1347 err = __br_mdb_del(br, entry, mdb_attrs);
e44deb2f
SA
1348 }
1349
cfd56754
CW
1350 return err;
1351}
1352
ee07c6e7
CW
1353void br_mdb_init(void)
1354{
c1c502b5
FW
1355 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1356 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1357 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
ee07c6e7 1358}
63233159
VY
1359
1360void br_mdb_uninit(void)
1361{
1362 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1363 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1364 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1365}