1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
23 *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 return !hlist_unhashed(&pmctx->ip4_rlist);
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
31 #if IS_ENABLED(CONFIG_IPV6)
32 *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 return !hlist_unhashed(&pmctx->ip6_rlist);
40 static size_t __br_rports_one_size(void)
42 return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
45 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
52 struct net_bridge_mcast_port *pmctx;
53 size_t size = nla_total_size(0); /* MDBA_ROUTER */
56 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
58 size += __br_rports_one_size();
60 #if IS_ENABLED(CONFIG_IPV6)
61 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
63 size += __br_rports_one_size();
70 int br_rports_fill_info(struct sk_buff *skb,
71 const struct net_bridge_mcast *brmctx)
73 u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 unsigned long ip4_timer, ip6_timer;
76 struct nlattr *nest, *port_nest;
77 struct net_bridge_port *p;
79 if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
82 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
86 list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 struct net_bridge_mcast_port *pmctx;
90 struct net_bridge_vlan *v;
92 v = br_vlan_find(nbp_vlan_group(p), vid);
95 pmctx = &v->port_mcast_ctx;
97 pmctx = &p->multicast_ctx;
100 have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
103 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
106 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
110 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 max(ip4_timer, ip6_timer)) ||
113 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 p->multicast_ctx.multicast_router) ||
116 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
119 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
121 (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 nla_nest_cancel(skb, port_nest);
125 nla_nest_end(skb, port_nest);
128 nla_nest_end(skb, nest);
131 nla_nest_cancel(skb, nest);
135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
137 e->state = flags & MDB_PG_FLAGS_PERMANENT;
139 if (flags & MDB_PG_FLAGS_OFFLOAD)
140 e->flags |= MDB_FLAGS_OFFLOAD;
141 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 e->flags |= MDB_FLAGS_FAST_LEAVE;
143 if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 e->flags |= MDB_FLAGS_STAR_EXCL;
145 if (flags & MDB_PG_FLAGS_BLOCKED)
146 e->flags |= MDB_FLAGS_BLOCKED;
147 if (flags & MDB_PG_FLAGS_OFFLOAD_FAILED)
148 e->flags |= MDB_FLAGS_OFFLOAD_FAILED;
151 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
152 struct nlattr **mdb_attrs)
154 memset(ip, 0, sizeof(struct br_ip));
155 ip->vid = entry->vid;
156 ip->proto = entry->addr.proto;
158 case htons(ETH_P_IP):
159 ip->dst.ip4 = entry->addr.u.ip4;
160 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
161 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
163 #if IS_ENABLED(CONFIG_IPV6)
164 case htons(ETH_P_IPV6):
165 ip->dst.ip6 = entry->addr.u.ip6;
166 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
167 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
171 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
176 static int __mdb_fill_srcs(struct sk_buff *skb,
177 struct net_bridge_port_group *p)
179 struct net_bridge_group_src *ent;
180 struct nlattr *nest, *nest_ent;
182 if (hlist_empty(&p->src_list))
185 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
189 hlist_for_each_entry_rcu(ent, &p->src_list, node,
190 lockdep_is_held(&p->key.port->br->multicast_lock)) {
191 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
194 switch (ent->addr.proto) {
195 case htons(ETH_P_IP):
196 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
197 ent->addr.src.ip4)) {
198 nla_nest_cancel(skb, nest_ent);
202 #if IS_ENABLED(CONFIG_IPV6)
203 case htons(ETH_P_IPV6):
204 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
205 &ent->addr.src.ip6)) {
206 nla_nest_cancel(skb, nest_ent);
212 nla_nest_cancel(skb, nest_ent);
215 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
216 br_timer_value(&ent->timer))) {
217 nla_nest_cancel(skb, nest_ent);
220 nla_nest_end(skb, nest_ent);
223 nla_nest_end(skb, nest);
228 nla_nest_cancel(skb, nest);
232 static int __mdb_fill_info(struct sk_buff *skb,
233 struct net_bridge_mdb_entry *mp,
234 struct net_bridge_port_group *p)
236 bool dump_srcs_mode = false;
237 struct timer_list *mtimer;
238 struct nlattr *nest_ent;
239 struct br_mdb_entry e;
243 memset(&e, 0, sizeof(e));
245 ifindex = p->key.port->dev->ifindex;
249 ifindex = mp->br->dev->ifindex;
253 __mdb_entry_fill_flags(&e, flags);
255 e.vid = mp->addr.vid;
256 if (mp->addr.proto == htons(ETH_P_IP)) {
257 e.addr.u.ip4 = mp->addr.dst.ip4;
258 #if IS_ENABLED(CONFIG_IPV6)
259 } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
260 e.addr.u.ip6 = mp->addr.dst.ip6;
263 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
264 e.state = MDB_PERMANENT;
266 e.addr.proto = mp->addr.proto;
267 nest_ent = nla_nest_start_noflag(skb,
268 MDBA_MDB_ENTRY_INFO);
272 if (nla_put_nohdr(skb, sizeof(e), &e) ||
274 MDBA_MDB_EATTR_TIMER,
275 br_timer_value(mtimer)))
278 switch (mp->addr.proto) {
279 case htons(ETH_P_IP):
280 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
281 if (mp->addr.src.ip4) {
282 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
288 #if IS_ENABLED(CONFIG_IPV6)
289 case htons(ETH_P_IPV6):
290 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
291 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
292 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
300 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
303 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
305 if (dump_srcs_mode &&
306 (__mdb_fill_srcs(skb, p) ||
307 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
311 nla_nest_end(skb, nest_ent);
316 nla_nest_cancel(skb, nest_ent);
320 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
321 struct net_device *dev)
323 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
324 struct net_bridge *br = netdev_priv(dev);
325 struct net_bridge_mdb_entry *mp;
326 struct nlattr *nest, *nest2;
328 nest = nla_nest_start_noflag(skb, MDBA_MDB);
332 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
333 struct net_bridge_port_group *p;
334 struct net_bridge_port_group __rcu **pp;
339 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
345 if (!s_pidx && mp->host_joined) {
346 err = __mdb_fill_info(skb, mp, NULL);
348 nla_nest_cancel(skb, nest2);
353 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
360 err = __mdb_fill_info(skb, mp, p);
362 nla_nest_end(skb, nest2);
370 nla_nest_end(skb, nest2);
378 nla_nest_end(skb, nest);
382 int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
383 struct netlink_callback *cb)
385 struct net_bridge *br = netdev_priv(dev);
386 struct br_port_msg *bpm;
387 struct nlmsghdr *nlh;
390 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
391 cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
396 bpm = nlmsg_data(nlh);
397 memset(bpm, 0, sizeof(*bpm));
398 bpm->ifindex = dev->ifindex;
402 err = br_mdb_fill_info(skb, cb, dev);
405 err = br_rports_fill_info(skb, &br->multicast_ctx);
415 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
416 struct net_device *dev,
417 struct net_bridge_mdb_entry *mp,
418 struct net_bridge_port_group *pg,
421 struct nlmsghdr *nlh;
422 struct br_port_msg *bpm;
423 struct nlattr *nest, *nest2;
425 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
429 bpm = nlmsg_data(nlh);
430 memset(bpm, 0, sizeof(*bpm));
431 bpm->family = AF_BRIDGE;
432 bpm->ifindex = dev->ifindex;
433 nest = nla_nest_start_noflag(skb, MDBA_MDB);
436 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
440 if (__mdb_fill_info(skb, mp, pg))
443 nla_nest_end(skb, nest2);
444 nla_nest_end(skb, nest);
449 nla_nest_end(skb, nest);
451 nlmsg_cancel(skb, nlh);
455 static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
457 struct net_bridge_group_src *ent;
458 size_t nlmsg_size, addr_size = 0;
460 /* MDBA_MDB_ENTRY_INFO */
461 nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
462 /* MDBA_MDB_EATTR_TIMER */
463 nla_total_size(sizeof(u32));
468 /* MDBA_MDB_EATTR_RTPROT */
469 nlmsg_size += nla_total_size(sizeof(u8));
471 switch (pg->key.addr.proto) {
472 case htons(ETH_P_IP):
473 /* MDBA_MDB_EATTR_SOURCE */
474 if (pg->key.addr.src.ip4)
475 nlmsg_size += nla_total_size(sizeof(__be32));
476 if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
478 addr_size = sizeof(__be32);
480 #if IS_ENABLED(CONFIG_IPV6)
481 case htons(ETH_P_IPV6):
482 /* MDBA_MDB_EATTR_SOURCE */
483 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
484 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
485 if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
487 addr_size = sizeof(struct in6_addr);
492 /* MDBA_MDB_EATTR_GROUP_MODE */
493 nlmsg_size += nla_total_size(sizeof(u8));
495 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
496 if (!hlist_empty(&pg->src_list))
497 nlmsg_size += nla_total_size(0);
499 hlist_for_each_entry(ent, &pg->src_list, node) {
500 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
501 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
503 nlmsg_size += nla_total_size(0) +
504 nla_total_size(addr_size) +
505 nla_total_size(sizeof(u32));
511 static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
513 return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
518 /* Port group entry */
519 rtnl_mdb_nlmsg_pg_size(pg);
522 static void __br_mdb_notify(struct net_device *dev,
523 struct net_bridge_mdb_entry *mp,
524 struct net_bridge_port_group *pg,
525 int type, bool notify_switchdev)
527 struct net *net = dev_net(dev);
531 if (notify_switchdev)
532 br_switchdev_mdb_notify(dev, mp, pg, type);
534 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
538 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
544 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
547 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
550 void br_mdb_notify(struct net_device *dev,
551 struct net_bridge_mdb_entry *mp,
552 struct net_bridge_port_group *pg,
555 __br_mdb_notify(dev, mp, pg, type, true);
558 void br_mdb_flag_change_notify(struct net_device *dev,
559 struct net_bridge_mdb_entry *mp,
560 struct net_bridge_port_group *pg)
562 __br_mdb_notify(dev, mp, pg, RTM_NEWMDB, false);
565 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
566 struct net_device *dev,
567 int ifindex, u16 vid, u32 pid,
568 u32 seq, int type, unsigned int flags)
570 struct nlattr *nest, *port_nest;
571 struct br_port_msg *bpm;
572 struct nlmsghdr *nlh;
574 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
578 bpm = nlmsg_data(nlh);
579 memset(bpm, 0, sizeof(*bpm));
580 bpm->family = AF_BRIDGE;
581 bpm->ifindex = dev->ifindex;
582 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
586 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
589 if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
590 nla_nest_cancel(skb, port_nest);
593 if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
594 nla_nest_cancel(skb, port_nest);
597 nla_nest_end(skb, port_nest);
599 nla_nest_end(skb, nest);
604 nla_nest_end(skb, nest);
606 nlmsg_cancel(skb, nlh);
610 static inline size_t rtnl_rtr_nlmsg_size(void)
612 return NLMSG_ALIGN(sizeof(struct br_port_msg))
613 + nla_total_size(sizeof(__u32))
614 + nla_total_size(sizeof(u16));
617 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
620 struct net *net = dev_net(dev);
626 ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
627 vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
629 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
633 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
640 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
644 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
647 static const struct nla_policy
648 br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
649 [MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
650 sizeof(struct in_addr),
651 sizeof(struct in6_addr)),
654 static const struct nla_policy
655 br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
656 [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
659 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
660 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
661 sizeof(struct in_addr),
662 sizeof(struct in6_addr)),
663 [MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
665 [MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
666 [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
669 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
670 struct netlink_ext_ack *extack)
673 case htons(ETH_P_IP):
674 if (nla_len(attr) != sizeof(struct in_addr)) {
675 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
678 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
679 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
683 #if IS_ENABLED(CONFIG_IPV6)
684 case htons(ETH_P_IPV6): {
687 if (nla_len(attr) != sizeof(struct in6_addr)) {
688 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
691 src = nla_get_in6_addr(attr);
692 if (ipv6_addr_is_multicast(&src)) {
693 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
700 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
707 static struct net_bridge_mcast *
708 __br_mdb_choose_context(struct net_bridge *br,
709 const struct br_mdb_entry *entry,
710 struct netlink_ext_ack *extack)
712 struct net_bridge_mcast *brmctx = NULL;
713 struct net_bridge_vlan *v;
715 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
716 brmctx = &br->multicast_ctx;
721 NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
725 v = br_vlan_find(br_vlan_group(br), entry->vid);
727 NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
730 if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
731 NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
734 brmctx = &v->br_mcast_ctx;
739 static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
740 struct net_bridge_mdb_entry *mp,
741 struct net_bridge_port_group *pg,
742 struct net_bridge_mcast *brmctx,
745 unsigned long now = jiffies;
748 pg->rt_protocol = cfg->rt_protocol;
749 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
750 mod_timer(&pg->timer,
751 now + brmctx->multicast_membership_interval);
753 timer_delete(&pg->timer);
755 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
760 static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
761 struct net_bridge_mdb_entry *mp,
762 struct net_bridge_mcast *brmctx,
764 struct netlink_ext_ack *extack)
766 struct net_bridge_port_group __rcu **pp;
767 struct net_bridge_port_group *p;
768 unsigned long now = jiffies;
770 for (pp = &mp->ports;
771 (p = mlock_dereference(*pp, cfg->br)) != NULL;
773 if (p->key.port == cfg->p) {
774 if (!(cfg->nlflags & NLM_F_REPLACE)) {
775 NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
778 return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
781 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
785 p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
786 MCAST_INCLUDE, cfg->rt_protocol, extack);
790 rcu_assign_pointer(*pp, p);
791 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
793 now + brmctx->multicast_membership_interval);
794 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
796 /* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
797 * proper replication.
799 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
800 struct net_bridge_mdb_entry *star_mp;
801 struct br_ip star_group;
803 star_group = p->key.addr;
804 memset(&star_group.src, 0, sizeof(star_group.src));
805 star_mp = br_mdb_ip_get(cfg->br, &star_group);
807 br_multicast_sg_add_exclude_ports(star_mp, p);
813 static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
814 struct br_ip *src_ip,
815 struct net_bridge_mcast *brmctx,
816 struct netlink_ext_ack *extack)
818 struct net_bridge_mdb_entry *sgmp;
819 struct br_mdb_config sg_cfg;
824 sg_ip.src = src_ip->src;
825 sgmp = br_multicast_new_group(cfg->br, &sg_ip);
827 NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
828 return PTR_ERR(sgmp);
831 if (cfg->entry->state == MDB_PERMANENT)
832 flags |= MDB_PG_FLAGS_PERMANENT;
833 if (cfg->filter_mode == MCAST_EXCLUDE)
834 flags |= MDB_PG_FLAGS_BLOCKED;
836 memset(&sg_cfg, 0, sizeof(sg_cfg));
839 sg_cfg.entry = cfg->entry;
840 sg_cfg.group = sg_ip;
841 sg_cfg.src_entry = true;
842 sg_cfg.filter_mode = MCAST_INCLUDE;
843 sg_cfg.rt_protocol = cfg->rt_protocol;
844 sg_cfg.nlflags = cfg->nlflags;
845 return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
848 static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
849 struct net_bridge_port_group *pg,
850 struct net_bridge_mcast *brmctx,
851 struct br_mdb_src_entry *src,
852 struct netlink_ext_ack *extack)
854 struct net_bridge_group_src *ent;
855 unsigned long now = jiffies;
858 ent = br_multicast_find_group_src(pg, &src->addr);
860 ent = br_multicast_new_group_src(pg, &src->addr);
862 NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
865 } else if (!(cfg->nlflags & NLM_F_REPLACE)) {
866 NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
870 if (cfg->filter_mode == MCAST_INCLUDE &&
871 cfg->entry->state == MDB_TEMPORARY)
872 mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
874 timer_delete(&ent->timer);
876 /* Install a (S, G) forwarding entry for the source. */
877 err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
881 ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
886 __br_multicast_del_group_src(ent);
890 static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
891 struct br_mdb_src_entry *src)
893 struct net_bridge_group_src *ent;
895 ent = br_multicast_find_group_src(pg, &src->addr);
896 if (WARN_ON_ONCE(!ent))
898 br_multicast_del_group_src(ent, false);
901 static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
902 struct net_bridge_port_group *pg,
903 struct net_bridge_mcast *brmctx,
904 struct netlink_ext_ack *extack)
908 for (i = 0; i < cfg->num_src_entries; i++) {
909 err = br_mdb_add_group_src(cfg, pg, brmctx,
910 &cfg->src_entries[i], extack);
912 goto err_del_group_srcs;
918 for (i--; i >= 0; i--)
919 br_mdb_del_group_src(pg, &cfg->src_entries[i]);
923 static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
924 struct net_bridge_port_group *pg,
925 struct net_bridge_mcast *brmctx,
926 struct netlink_ext_ack *extack)
928 struct net_bridge_group_src *ent;
929 struct hlist_node *tmp;
932 hlist_for_each_entry(ent, &pg->src_list, node)
933 ent->flags |= BR_SGRP_F_DELETE;
935 err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
937 goto err_clear_delete;
939 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
940 if (ent->flags & BR_SGRP_F_DELETE)
941 br_multicast_del_group_src(ent, false);
947 hlist_for_each_entry(ent, &pg->src_list, node)
948 ent->flags &= ~BR_SGRP_F_DELETE;
952 static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
953 struct net_bridge_mdb_entry *mp,
954 struct net_bridge_port_group *pg,
955 struct net_bridge_mcast *brmctx,
957 struct netlink_ext_ack *extack)
959 unsigned long now = jiffies;
962 err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
967 pg->filter_mode = cfg->filter_mode;
968 pg->rt_protocol = cfg->rt_protocol;
969 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
970 cfg->filter_mode == MCAST_EXCLUDE)
971 mod_timer(&pg->timer,
972 now + brmctx->multicast_membership_interval);
974 timer_delete(&pg->timer);
976 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
978 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
979 br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
984 static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
985 struct net_bridge_mdb_entry *mp,
986 struct net_bridge_mcast *brmctx,
988 struct netlink_ext_ack *extack)
990 struct net_bridge_port_group __rcu **pp;
991 struct net_bridge_port_group *p;
992 unsigned long now = jiffies;
995 for (pp = &mp->ports;
996 (p = mlock_dereference(*pp, cfg->br)) != NULL;
998 if (p->key.port == cfg->p) {
999 if (!(cfg->nlflags & NLM_F_REPLACE)) {
1000 NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
1003 return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
1006 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
1010 p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
1011 cfg->filter_mode, cfg->rt_protocol,
1016 err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
1018 goto err_del_port_group;
1020 rcu_assign_pointer(*pp, p);
1021 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1022 cfg->filter_mode == MCAST_EXCLUDE)
1023 mod_timer(&p->timer,
1024 now + brmctx->multicast_membership_interval);
1025 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
1026 /* If we are adding a new EXCLUDE port group (*, G), it needs to be
1027 * also added to all (S, G) entries for proper replication.
1029 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
1030 cfg->filter_mode == MCAST_EXCLUDE)
1031 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1036 br_multicast_del_port_group(p);
1040 static int br_mdb_add_group(const struct br_mdb_config *cfg,
1041 struct netlink_ext_ack *extack)
1043 struct br_mdb_entry *entry = cfg->entry;
1044 struct net_bridge_port *port = cfg->p;
1045 struct net_bridge_mdb_entry *mp;
1046 struct net_bridge *br = cfg->br;
1047 struct net_bridge_mcast *brmctx;
1048 struct br_ip group = cfg->group;
1049 unsigned char flags = 0;
1051 brmctx = __br_mdb_choose_context(br, entry, extack);
1055 mp = br_multicast_new_group(br, &group);
1061 if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) {
1062 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1066 br_multicast_host_join(brmctx, mp, false);
1067 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1072 if (entry->state == MDB_PERMANENT)
1073 flags |= MDB_PG_FLAGS_PERMANENT;
1075 if (br_multicast_is_star_g(&group))
1076 return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1078 return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1081 static int __br_mdb_add(const struct br_mdb_config *cfg,
1082 struct netlink_ext_ack *extack)
1086 spin_lock_bh(&cfg->br->multicast_lock);
1087 ret = br_mdb_add_group(cfg, extack);
1088 spin_unlock_bh(&cfg->br->multicast_lock);
1093 static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1094 struct br_mdb_src_entry *src,
1096 struct netlink_ext_ack *extack)
1098 struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1101 err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
1102 br_mdbe_src_list_entry_pol, extack);
1106 if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1109 if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1112 src->addr.proto = proto;
1113 nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
1114 nla_len(tb[MDBE_SRCATTR_ADDRESS]));
1119 static int br_mdb_config_src_list_init(struct nlattr *src_list,
1120 struct br_mdb_config *cfg,
1121 struct netlink_ext_ack *extack)
1123 struct nlattr *src_entry;
1127 nla_for_each_nested(src_entry, src_list, rem)
1128 cfg->num_src_entries++;
1130 if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1131 NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1132 PG_SRC_ENT_LIMIT - 1);
1136 cfg->src_entries = kcalloc(cfg->num_src_entries,
1137 sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1138 if (!cfg->src_entries)
1141 nla_for_each_nested(src_entry, src_list, rem) {
1142 err = br_mdb_config_src_entry_init(src_entry,
1143 &cfg->src_entries[i],
1144 cfg->entry->addr.proto,
1147 goto err_src_entry_init;
1154 kfree(cfg->src_entries);
1158 static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1160 kfree(cfg->src_entries);
1163 static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1164 struct br_mdb_config *cfg,
1165 struct netlink_ext_ack *extack)
1167 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1170 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
1171 br_mdbe_attrs_pol, extack);
1175 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1176 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1177 cfg->entry->addr.proto, extack))
1180 __mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
1182 if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1184 NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1187 if (!br_multicast_is_star_g(&cfg->group)) {
1188 NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1191 cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1193 cfg->filter_mode = MCAST_EXCLUDE;
1196 if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1198 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1201 if (!br_multicast_is_star_g(&cfg->group)) {
1202 NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1205 if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1206 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1209 err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
1215 if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1216 NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1220 if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1222 NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1225 cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
1231 static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
1232 struct nlattr *tb[], u16 nlmsg_flags,
1233 struct netlink_ext_ack *extack)
1235 struct net *net = dev_net(dev);
1237 memset(cfg, 0, sizeof(*cfg));
1238 cfg->filter_mode = MCAST_EXCLUDE;
1239 cfg->rt_protocol = RTPROT_STATIC;
1240 cfg->nlflags = nlmsg_flags;
1242 cfg->br = netdev_priv(dev);
1244 if (!netif_running(cfg->br->dev)) {
1245 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1249 if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
1250 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1254 cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
1256 if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1257 struct net_device *pdev;
1259 pdev = __dev_get_by_index(net, cfg->entry->ifindex);
1261 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1265 cfg->p = br_port_get_rtnl(pdev);
1267 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1271 if (cfg->p->br != cfg->br) {
1272 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1277 if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
1278 ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
1279 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
1283 if (tb[MDBA_SET_ENTRY_ATTRS])
1284 return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
1287 __mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
1292 static void br_mdb_config_fini(struct br_mdb_config *cfg)
1294 br_mdb_config_src_list_fini(cfg);
1297 int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
1298 struct netlink_ext_ack *extack)
1300 struct net_bridge_vlan_group *vg;
1301 struct net_bridge_vlan *v;
1302 struct br_mdb_config cfg;
1305 err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
1310 /* host join errors which can happen before creating the group */
1311 if (!cfg.p && !br_group_is_l2(&cfg.group)) {
1312 /* don't allow any flags for host-joined IP groups */
1313 if (cfg.entry->state) {
1314 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1317 if (!br_multicast_is_star_g(&cfg.group)) {
1318 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1323 if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1324 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1329 if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1330 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1333 vg = nbp_vlan_group(cfg.p);
1335 vg = br_vlan_group(cfg.br);
1338 /* If vlan filtering is enabled and VLAN is not specified
1339 * install mdb entry on all vlans configured on the port.
1341 if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1342 list_for_each_entry(v, &vg->vlan_list, vlist) {
1343 cfg.entry->vid = v->vid;
1344 cfg.group.vid = v->vid;
1345 err = __br_mdb_add(&cfg, extack);
1350 err = __br_mdb_add(&cfg, extack);
1354 br_mdb_config_fini(&cfg);
1358 static int __br_mdb_del(const struct br_mdb_config *cfg)
1360 struct br_mdb_entry *entry = cfg->entry;
1361 struct net_bridge *br = cfg->br;
1362 struct net_bridge_mdb_entry *mp;
1363 struct net_bridge_port_group *p;
1364 struct net_bridge_port_group __rcu **pp;
1365 struct br_ip ip = cfg->group;
1368 spin_lock_bh(&br->multicast_lock);
1369 mp = br_mdb_ip_get(br, &ip);
1374 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1375 br_multicast_host_leave(mp, false);
1377 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1378 if (!mp->ports && netif_running(br->dev))
1379 mod_timer(&mp->timer, jiffies);
1383 for (pp = &mp->ports;
1384 (p = mlock_dereference(*pp, br)) != NULL;
1386 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1389 br_multicast_del_pg(mp, p, pp);
1395 spin_unlock_bh(&br->multicast_lock);
1399 int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1400 struct netlink_ext_ack *extack)
1402 struct net_bridge_vlan_group *vg;
1403 struct net_bridge_vlan *v;
1404 struct br_mdb_config cfg;
1407 err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
1412 vg = nbp_vlan_group(cfg.p);
1414 vg = br_vlan_group(cfg.br);
1416 /* If vlan filtering is enabled and VLAN is not specified
1417 * delete mdb entry on all vlans configured on the port.
1419 if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1420 list_for_each_entry(v, &vg->vlan_list, vlist) {
1421 cfg.entry->vid = v->vid;
1422 cfg.group.vid = v->vid;
1423 err = __br_mdb_del(&cfg);
1426 err = __br_mdb_del(&cfg);
1429 br_mdb_config_fini(&cfg);
1433 struct br_mdb_flush_desc {
1441 static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
1442 [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
1443 [MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
1446 static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
1447 struct nlattr *tb[],
1448 struct netlink_ext_ack *extack)
1450 struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
1451 struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1454 desc->port_ifindex = entry->ifindex;
1455 desc->vid = entry->vid;
1456 desc->state = entry->state;
1458 if (!tb[MDBA_SET_ENTRY_ATTRS])
1461 err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1462 tb[MDBA_SET_ENTRY_ATTRS],
1463 br_mdbe_attrs_del_bulk_pol, extack);
1467 if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
1468 desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
1470 if (mdbe_attrs[MDBE_ATTR_RTPROT])
1471 desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
1476 static void br_mdb_flush_host(struct net_bridge *br,
1477 struct net_bridge_mdb_entry *mp,
1478 const struct br_mdb_flush_desc *desc)
1482 if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
1485 if (desc->rt_protocol)
1488 state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
1489 if (desc->state_mask && (state & desc->state_mask) != desc->state)
1492 br_multicast_host_leave(mp, true);
1493 if (!mp->ports && netif_running(br->dev))
1494 mod_timer(&mp->timer, jiffies);
1497 static void br_mdb_flush_pgs(struct net_bridge *br,
1498 struct net_bridge_mdb_entry *mp,
1499 const struct br_mdb_flush_desc *desc)
1501 struct net_bridge_port_group __rcu **pp;
1502 struct net_bridge_port_group *p;
1504 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
1507 if (desc->port_ifindex &&
1508 desc->port_ifindex != p->key.port->dev->ifindex) {
1513 if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
1518 state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
1519 if (desc->state_mask &&
1520 (state & desc->state_mask) != desc->state) {
1525 br_multicast_del_pg(mp, p, pp);
1529 static void br_mdb_flush(struct net_bridge *br,
1530 const struct br_mdb_flush_desc *desc)
1532 struct net_bridge_mdb_entry *mp;
1534 spin_lock_bh(&br->multicast_lock);
1536 /* Safe variant is not needed because entries are removed from the list
1537 * upon group timer expiration or bridge deletion.
1539 hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
1540 if (desc->vid && desc->vid != mp->addr.vid)
1543 br_mdb_flush_host(br, mp, desc);
1544 br_mdb_flush_pgs(br, mp, desc);
1547 spin_unlock_bh(&br->multicast_lock);
1550 int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
1551 struct netlink_ext_ack *extack)
1553 struct net_bridge *br = netdev_priv(dev);
1554 struct br_mdb_flush_desc desc = {};
1557 err = br_mdb_flush_desc_init(&desc, tb, extack);
1561 br_mdb_flush(br, &desc);
1566 static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
1567 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
1568 sizeof(struct in_addr),
1569 sizeof(struct in6_addr)),
1572 static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
1573 struct br_ip *group, struct netlink_ext_ack *extack)
1575 struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
1576 struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1579 if (!tb[MDBA_GET_ENTRY_ATTRS]) {
1580 __mdb_entry_to_br_ip(entry, group, NULL);
1584 err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1585 tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol,
1590 if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
1591 !is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE],
1592 entry->addr.proto, extack))
1595 __mdb_entry_to_br_ip(entry, group, mdbe_attrs);
1600 static struct sk_buff *
1601 br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
1603 struct net_bridge_port_group *pg;
1606 nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
1609 /* MDBA_MDB_ENTRY */
1612 if (mp->host_joined)
1613 nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
1615 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1616 pg = mlock_dereference(pg->next, mp->br))
1617 nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
1619 return nlmsg_new(nlmsg_size, GFP_ATOMIC);
1622 static int br_mdb_get_reply_fill(struct sk_buff *skb,
1623 struct net_bridge_mdb_entry *mp, u32 portid,
1626 struct nlattr *mdb_nest, *mdb_entry_nest;
1627 struct net_bridge_port_group *pg;
1628 struct br_port_msg *bpm;
1629 struct nlmsghdr *nlh;
1632 nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
1636 bpm = nlmsg_data(nlh);
1637 memset(bpm, 0, sizeof(*bpm));
1638 bpm->family = AF_BRIDGE;
1639 bpm->ifindex = mp->br->dev->ifindex;
1640 mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
1645 mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
1646 if (!mdb_entry_nest) {
1651 if (mp->host_joined) {
1652 err = __mdb_fill_info(skb, mp, NULL);
1657 for (pg = mlock_dereference(mp->ports, mp->br); pg;
1658 pg = mlock_dereference(pg->next, mp->br)) {
1659 err = __mdb_fill_info(skb, mp, pg);
1664 nla_nest_end(skb, mdb_entry_nest);
1665 nla_nest_end(skb, mdb_nest);
1666 nlmsg_end(skb, nlh);
1671 nlmsg_cancel(skb, nlh);
1675 int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
1676 struct netlink_ext_ack *extack)
1678 struct net_bridge *br = netdev_priv(dev);
1679 struct net_bridge_mdb_entry *mp;
1680 struct sk_buff *skb;
1684 err = br_mdb_get_parse(dev, tb, &group, extack);
1688 /* Hold the multicast lock to ensure that the MDB entry does not change
1689 * between the time the reply size is determined and when the reply is
1692 spin_lock_bh(&br->multicast_lock);
1694 mp = br_mdb_ip_get(br, &group);
1695 if (!mp || (!mp->ports && !mp->host_joined)) {
1696 NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
1701 skb = br_mdb_get_reply_alloc(mp);
1707 err = br_mdb_get_reply_fill(skb, mp, portid, seq);
1709 NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
1713 spin_unlock_bh(&br->multicast_lock);
1715 return rtnl_unicast(skb, dev_net(dev), portid);
1720 spin_unlock_bh(&br->multicast_lock);