Merge branch 's390-qeth-fixes'
[linux-2.6-block.git] / net / bridge / br_multicast.c
1 /*
2  * Bridge multicast support.
3  *
4  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/if_ether.h>
16 #include <linux/igmp.h>
17 #include <linux/in.h>
18 #include <linux/jhash.h>
19 #include <linux/kernel.h>
20 #include <linux/log2.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter_bridge.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/skbuff.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/inetdevice.h>
29 #include <linux/mroute.h>
30 #include <net/ip.h>
31 #include <net/switchdev.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <linux/icmpv6.h>
34 #include <net/ipv6.h>
35 #include <net/mld.h>
36 #include <net/ip6_checksum.h>
37 #include <net/addrconf.h>
38 #endif
39
40 #include "br_private.h"
41
42 static const struct rhashtable_params br_mdb_rht_params = {
43         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
44         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
45         .key_len = sizeof(struct br_ip),
46         .automatic_shrinking = true,
47 };
48
49 static void br_multicast_start_querier(struct net_bridge *br,
50                                        struct bridge_mcast_own_query *query);
51 static void br_multicast_add_router(struct net_bridge *br,
52                                     struct net_bridge_port *port);
53 static void br_ip4_multicast_leave_group(struct net_bridge *br,
54                                          struct net_bridge_port *port,
55                                          __be32 group,
56                                          __u16 vid,
57                                          const unsigned char *src);
58
59 static void __del_port_router(struct net_bridge_port *p);
60 #if IS_ENABLED(CONFIG_IPV6)
61 static void br_ip6_multicast_leave_group(struct net_bridge *br,
62                                          struct net_bridge_port *port,
63                                          const struct in6_addr *group,
64                                          __u16 vid, const unsigned char *src);
65 #endif
66
67 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
68                                                       struct br_ip *dst)
69 {
70         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
71 }
72
73 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
74                                            struct br_ip *dst)
75 {
76         struct net_bridge_mdb_entry *ent;
77
78         lockdep_assert_held_once(&br->multicast_lock);
79
80         rcu_read_lock();
81         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
82         rcu_read_unlock();
83
84         return ent;
85 }
86
87 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
88                                                    __be32 dst, __u16 vid)
89 {
90         struct br_ip br_dst;
91
92         memset(&br_dst, 0, sizeof(br_dst));
93         br_dst.u.ip4 = dst;
94         br_dst.proto = htons(ETH_P_IP);
95         br_dst.vid = vid;
96
97         return br_mdb_ip_get(br, &br_dst);
98 }
99
100 #if IS_ENABLED(CONFIG_IPV6)
101 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
102                                                    const struct in6_addr *dst,
103                                                    __u16 vid)
104 {
105         struct br_ip br_dst;
106
107         memset(&br_dst, 0, sizeof(br_dst));
108         br_dst.u.ip6 = *dst;
109         br_dst.proto = htons(ETH_P_IPV6);
110         br_dst.vid = vid;
111
112         return br_mdb_ip_get(br, &br_dst);
113 }
114 #endif
115
116 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
117                                         struct sk_buff *skb, u16 vid)
118 {
119         struct br_ip ip;
120
121         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
122                 return NULL;
123
124         if (BR_INPUT_SKB_CB(skb)->igmp)
125                 return NULL;
126
127         memset(&ip, 0, sizeof(ip));
128         ip.proto = skb->protocol;
129         ip.vid = vid;
130
131         switch (skb->protocol) {
132         case htons(ETH_P_IP):
133                 ip.u.ip4 = ip_hdr(skb)->daddr;
134                 break;
135 #if IS_ENABLED(CONFIG_IPV6)
136         case htons(ETH_P_IPV6):
137                 ip.u.ip6 = ipv6_hdr(skb)->daddr;
138                 break;
139 #endif
140         default:
141                 return NULL;
142         }
143
144         return br_mdb_ip_get_rcu(br, &ip);
145 }
146
147 static void br_multicast_group_expired(struct timer_list *t)
148 {
149         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
150         struct net_bridge *br = mp->br;
151
152         spin_lock(&br->multicast_lock);
153         if (!netif_running(br->dev) || timer_pending(&mp->timer))
154                 goto out;
155
156         mp->host_joined = false;
157         br_mdb_notify(br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
158
159         if (mp->ports)
160                 goto out;
161
162         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
163                                br_mdb_rht_params);
164         hlist_del_rcu(&mp->mdb_node);
165
166         kfree_rcu(mp, rcu);
167
168 out:
169         spin_unlock(&br->multicast_lock);
170 }
171
172 static void br_multicast_del_pg(struct net_bridge *br,
173                                 struct net_bridge_port_group *pg)
174 {
175         struct net_bridge_mdb_entry *mp;
176         struct net_bridge_port_group *p;
177         struct net_bridge_port_group __rcu **pp;
178
179         mp = br_mdb_ip_get(br, &pg->addr);
180         if (WARN_ON(!mp))
181                 return;
182
183         for (pp = &mp->ports;
184              (p = mlock_dereference(*pp, br)) != NULL;
185              pp = &p->next) {
186                 if (p != pg)
187                         continue;
188
189                 rcu_assign_pointer(*pp, p->next);
190                 hlist_del_init(&p->mglist);
191                 del_timer(&p->timer);
192                 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
193                               p->flags);
194                 kfree_rcu(p, rcu);
195
196                 if (!mp->ports && !mp->host_joined &&
197                     netif_running(br->dev))
198                         mod_timer(&mp->timer, jiffies);
199
200                 return;
201         }
202
203         WARN_ON(1);
204 }
205
206 static void br_multicast_port_group_expired(struct timer_list *t)
207 {
208         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
209         struct net_bridge *br = pg->port->br;
210
211         spin_lock(&br->multicast_lock);
212         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
213             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
214                 goto out;
215
216         br_multicast_del_pg(br, pg);
217
218 out:
219         spin_unlock(&br->multicast_lock);
220 }
221
222 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
223                                                     __be32 group,
224                                                     u8 *igmp_type)
225 {
226         struct igmpv3_query *ihv3;
227         size_t igmp_hdr_size;
228         struct sk_buff *skb;
229         struct igmphdr *ih;
230         struct ethhdr *eth;
231         struct iphdr *iph;
232
233         igmp_hdr_size = sizeof(*ih);
234         if (br->multicast_igmp_version == 3)
235                 igmp_hdr_size = sizeof(*ihv3);
236         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
237                                                  igmp_hdr_size + 4);
238         if (!skb)
239                 goto out;
240
241         skb->protocol = htons(ETH_P_IP);
242
243         skb_reset_mac_header(skb);
244         eth = eth_hdr(skb);
245
246         ether_addr_copy(eth->h_source, br->dev->dev_addr);
247         eth->h_dest[0] = 1;
248         eth->h_dest[1] = 0;
249         eth->h_dest[2] = 0x5e;
250         eth->h_dest[3] = 0;
251         eth->h_dest[4] = 0;
252         eth->h_dest[5] = 1;
253         eth->h_proto = htons(ETH_P_IP);
254         skb_put(skb, sizeof(*eth));
255
256         skb_set_network_header(skb, skb->len);
257         iph = ip_hdr(skb);
258
259         iph->version = 4;
260         iph->ihl = 6;
261         iph->tos = 0xc0;
262         iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
263         iph->id = 0;
264         iph->frag_off = htons(IP_DF);
265         iph->ttl = 1;
266         iph->protocol = IPPROTO_IGMP;
267         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
268                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
269         iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
270         ((u8 *)&iph[1])[0] = IPOPT_RA;
271         ((u8 *)&iph[1])[1] = 4;
272         ((u8 *)&iph[1])[2] = 0;
273         ((u8 *)&iph[1])[3] = 0;
274         ip_send_check(iph);
275         skb_put(skb, 24);
276
277         skb_set_transport_header(skb, skb->len);
278         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
279
280         switch (br->multicast_igmp_version) {
281         case 2:
282                 ih = igmp_hdr(skb);
283                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
284                 ih->code = (group ? br->multicast_last_member_interval :
285                                     br->multicast_query_response_interval) /
286                            (HZ / IGMP_TIMER_SCALE);
287                 ih->group = group;
288                 ih->csum = 0;
289                 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
290                 break;
291         case 3:
292                 ihv3 = igmpv3_query_hdr(skb);
293                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
294                 ihv3->code = (group ? br->multicast_last_member_interval :
295                                       br->multicast_query_response_interval) /
296                              (HZ / IGMP_TIMER_SCALE);
297                 ihv3->group = group;
298                 ihv3->qqic = br->multicast_query_interval / HZ;
299                 ihv3->nsrcs = 0;
300                 ihv3->resv = 0;
301                 ihv3->suppress = 0;
302                 ihv3->qrv = 2;
303                 ihv3->csum = 0;
304                 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
305                 break;
306         }
307
308         skb_put(skb, igmp_hdr_size);
309         __skb_pull(skb, sizeof(*eth));
310
311 out:
312         return skb;
313 }
314
315 #if IS_ENABLED(CONFIG_IPV6)
316 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
317                                                     const struct in6_addr *grp,
318                                                     u8 *igmp_type)
319 {
320         struct mld2_query *mld2q;
321         unsigned long interval;
322         struct ipv6hdr *ip6h;
323         struct mld_msg *mldq;
324         size_t mld_hdr_size;
325         struct sk_buff *skb;
326         struct ethhdr *eth;
327         u8 *hopopt;
328
329         mld_hdr_size = sizeof(*mldq);
330         if (br->multicast_mld_version == 2)
331                 mld_hdr_size = sizeof(*mld2q);
332         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
333                                                  8 + mld_hdr_size);
334         if (!skb)
335                 goto out;
336
337         skb->protocol = htons(ETH_P_IPV6);
338
339         /* Ethernet header */
340         skb_reset_mac_header(skb);
341         eth = eth_hdr(skb);
342
343         ether_addr_copy(eth->h_source, br->dev->dev_addr);
344         eth->h_proto = htons(ETH_P_IPV6);
345         skb_put(skb, sizeof(*eth));
346
347         /* IPv6 header + HbH option */
348         skb_set_network_header(skb, skb->len);
349         ip6h = ipv6_hdr(skb);
350
351         *(__force __be32 *)ip6h = htonl(0x60000000);
352         ip6h->payload_len = htons(8 + mld_hdr_size);
353         ip6h->nexthdr = IPPROTO_HOPOPTS;
354         ip6h->hop_limit = 1;
355         ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
356         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
357                                &ip6h->saddr)) {
358                 kfree_skb(skb);
359                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
360                 return NULL;
361         }
362
363         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
364         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
365
366         hopopt = (u8 *)(ip6h + 1);
367         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
368         hopopt[1] = 0;                          /* length of HbH */
369         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
370         hopopt[3] = 2;                          /* Length of RA Option */
371         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
372         hopopt[5] = 0;
373         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
374         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
375
376         skb_put(skb, sizeof(*ip6h) + 8);
377
378         /* ICMPv6 */
379         skb_set_transport_header(skb, skb->len);
380         interval = ipv6_addr_any(grp) ?
381                         br->multicast_query_response_interval :
382                         br->multicast_last_member_interval;
383         *igmp_type = ICMPV6_MGM_QUERY;
384         switch (br->multicast_mld_version) {
385         case 1:
386                 mldq = (struct mld_msg *)icmp6_hdr(skb);
387                 mldq->mld_type = ICMPV6_MGM_QUERY;
388                 mldq->mld_code = 0;
389                 mldq->mld_cksum = 0;
390                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
391                 mldq->mld_reserved = 0;
392                 mldq->mld_mca = *grp;
393                 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
394                                                   sizeof(*mldq), IPPROTO_ICMPV6,
395                                                   csum_partial(mldq,
396                                                                sizeof(*mldq),
397                                                                0));
398                 break;
399         case 2:
400                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
401                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
402                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
403                 mld2q->mld2q_code = 0;
404                 mld2q->mld2q_cksum = 0;
405                 mld2q->mld2q_resv1 = 0;
406                 mld2q->mld2q_resv2 = 0;
407                 mld2q->mld2q_suppress = 0;
408                 mld2q->mld2q_qrv = 2;
409                 mld2q->mld2q_nsrcs = 0;
410                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
411                 mld2q->mld2q_mca = *grp;
412                 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
413                                                      sizeof(*mld2q),
414                                                      IPPROTO_ICMPV6,
415                                                      csum_partial(mld2q,
416                                                                   sizeof(*mld2q),
417                                                                   0));
418                 break;
419         }
420         skb_put(skb, mld_hdr_size);
421
422         __skb_pull(skb, sizeof(*eth));
423
424 out:
425         return skb;
426 }
427 #endif
428
429 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
430                                                 struct br_ip *addr,
431                                                 u8 *igmp_type)
432 {
433         switch (addr->proto) {
434         case htons(ETH_P_IP):
435                 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
436 #if IS_ENABLED(CONFIG_IPV6)
437         case htons(ETH_P_IPV6):
438                 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
439                                                     igmp_type);
440 #endif
441         }
442         return NULL;
443 }
444
445 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
446                                                     struct br_ip *group)
447 {
448         struct net_bridge_mdb_entry *mp;
449         int err;
450
451         mp = br_mdb_ip_get(br, group);
452         if (mp)
453                 return mp;
454
455         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
456                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
457                 return ERR_PTR(-E2BIG);
458         }
459
460         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
461         if (unlikely(!mp))
462                 return ERR_PTR(-ENOMEM);
463
464         mp->br = br;
465         mp->addr = *group;
466         timer_setup(&mp->timer, br_multicast_group_expired, 0);
467         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
468                                             br_mdb_rht_params);
469         if (err) {
470                 kfree(mp);
471                 mp = ERR_PTR(err);
472         } else {
473                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
474         }
475
476         return mp;
477 }
478
479 struct net_bridge_port_group *br_multicast_new_port_group(
480                         struct net_bridge_port *port,
481                         struct br_ip *group,
482                         struct net_bridge_port_group __rcu *next,
483                         unsigned char flags,
484                         const unsigned char *src)
485 {
486         struct net_bridge_port_group *p;
487
488         p = kzalloc(sizeof(*p), GFP_ATOMIC);
489         if (unlikely(!p))
490                 return NULL;
491
492         p->addr = *group;
493         p->port = port;
494         p->flags = flags;
495         rcu_assign_pointer(p->next, next);
496         hlist_add_head(&p->mglist, &port->mglist);
497         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
498
499         if (src)
500                 memcpy(p->eth_addr, src, ETH_ALEN);
501         else
502                 eth_broadcast_addr(p->eth_addr);
503
504         return p;
505 }
506
507 static bool br_port_group_equal(struct net_bridge_port_group *p,
508                                 struct net_bridge_port *port,
509                                 const unsigned char *src)
510 {
511         if (p->port != port)
512                 return false;
513
514         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
515                 return true;
516
517         return ether_addr_equal(src, p->eth_addr);
518 }
519
520 static int br_multicast_add_group(struct net_bridge *br,
521                                   struct net_bridge_port *port,
522                                   struct br_ip *group,
523                                   const unsigned char *src)
524 {
525         struct net_bridge_port_group __rcu **pp;
526         struct net_bridge_port_group *p;
527         struct net_bridge_mdb_entry *mp;
528         unsigned long now = jiffies;
529         int err;
530
531         spin_lock(&br->multicast_lock);
532         if (!netif_running(br->dev) ||
533             (port && port->state == BR_STATE_DISABLED))
534                 goto out;
535
536         mp = br_multicast_new_group(br, group);
537         err = PTR_ERR(mp);
538         if (IS_ERR(mp))
539                 goto err;
540
541         if (!port) {
542                 if (!mp->host_joined) {
543                         mp->host_joined = true;
544                         br_mdb_notify(br->dev, NULL, &mp->addr, RTM_NEWMDB, 0);
545                 }
546                 mod_timer(&mp->timer, now + br->multicast_membership_interval);
547                 goto out;
548         }
549
550         for (pp = &mp->ports;
551              (p = mlock_dereference(*pp, br)) != NULL;
552              pp = &p->next) {
553                 if (br_port_group_equal(p, port, src))
554                         goto found;
555                 if ((unsigned long)p->port < (unsigned long)port)
556                         break;
557         }
558
559         p = br_multicast_new_port_group(port, group, *pp, 0, src);
560         if (unlikely(!p))
561                 goto err;
562         rcu_assign_pointer(*pp, p);
563         br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
564
565 found:
566         mod_timer(&p->timer, now + br->multicast_membership_interval);
567 out:
568         err = 0;
569
570 err:
571         spin_unlock(&br->multicast_lock);
572         return err;
573 }
574
575 static int br_ip4_multicast_add_group(struct net_bridge *br,
576                                       struct net_bridge_port *port,
577                                       __be32 group,
578                                       __u16 vid,
579                                       const unsigned char *src)
580 {
581         struct br_ip br_group;
582
583         if (ipv4_is_local_multicast(group))
584                 return 0;
585
586         memset(&br_group, 0, sizeof(br_group));
587         br_group.u.ip4 = group;
588         br_group.proto = htons(ETH_P_IP);
589         br_group.vid = vid;
590
591         return br_multicast_add_group(br, port, &br_group, src);
592 }
593
594 #if IS_ENABLED(CONFIG_IPV6)
595 static int br_ip6_multicast_add_group(struct net_bridge *br,
596                                       struct net_bridge_port *port,
597                                       const struct in6_addr *group,
598                                       __u16 vid,
599                                       const unsigned char *src)
600 {
601         struct br_ip br_group;
602
603         if (ipv6_addr_is_ll_all_nodes(group))
604                 return 0;
605
606         memset(&br_group, 0, sizeof(br_group));
607         br_group.u.ip6 = *group;
608         br_group.proto = htons(ETH_P_IPV6);
609         br_group.vid = vid;
610
611         return br_multicast_add_group(br, port, &br_group, src);
612 }
613 #endif
614
615 static void br_multicast_router_expired(struct timer_list *t)
616 {
617         struct net_bridge_port *port =
618                         from_timer(port, t, multicast_router_timer);
619         struct net_bridge *br = port->br;
620
621         spin_lock(&br->multicast_lock);
622         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
623             port->multicast_router == MDB_RTR_TYPE_PERM ||
624             timer_pending(&port->multicast_router_timer))
625                 goto out;
626
627         __del_port_router(port);
628 out:
629         spin_unlock(&br->multicast_lock);
630 }
631
632 static void br_mc_router_state_change(struct net_bridge *p,
633                                       bool is_mc_router)
634 {
635         struct switchdev_attr attr = {
636                 .orig_dev = p->dev,
637                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
638                 .flags = SWITCHDEV_F_DEFER,
639                 .u.mrouter = is_mc_router,
640         };
641
642         switchdev_port_attr_set(p->dev, &attr);
643 }
644
645 static void br_multicast_local_router_expired(struct timer_list *t)
646 {
647         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
648
649         spin_lock(&br->multicast_lock);
650         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
651             br->multicast_router == MDB_RTR_TYPE_PERM ||
652             timer_pending(&br->multicast_router_timer))
653                 goto out;
654
655         br_mc_router_state_change(br, false);
656 out:
657         spin_unlock(&br->multicast_lock);
658 }
659
660 static void br_multicast_querier_expired(struct net_bridge *br,
661                                          struct bridge_mcast_own_query *query)
662 {
663         spin_lock(&br->multicast_lock);
664         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
665                 goto out;
666
667         br_multicast_start_querier(br, query);
668
669 out:
670         spin_unlock(&br->multicast_lock);
671 }
672
673 static void br_ip4_multicast_querier_expired(struct timer_list *t)
674 {
675         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
676
677         br_multicast_querier_expired(br, &br->ip4_own_query);
678 }
679
680 #if IS_ENABLED(CONFIG_IPV6)
681 static void br_ip6_multicast_querier_expired(struct timer_list *t)
682 {
683         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
684
685         br_multicast_querier_expired(br, &br->ip6_own_query);
686 }
687 #endif
688
689 static void br_multicast_select_own_querier(struct net_bridge *br,
690                                             struct br_ip *ip,
691                                             struct sk_buff *skb)
692 {
693         if (ip->proto == htons(ETH_P_IP))
694                 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
695 #if IS_ENABLED(CONFIG_IPV6)
696         else
697                 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
698 #endif
699 }
700
701 static void __br_multicast_send_query(struct net_bridge *br,
702                                       struct net_bridge_port *port,
703                                       struct br_ip *ip)
704 {
705         struct sk_buff *skb;
706         u8 igmp_type;
707
708         skb = br_multicast_alloc_query(br, ip, &igmp_type);
709         if (!skb)
710                 return;
711
712         if (port) {
713                 skb->dev = port->dev;
714                 br_multicast_count(br, port, skb, igmp_type,
715                                    BR_MCAST_DIR_TX);
716                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
717                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
718                         br_dev_queue_push_xmit);
719         } else {
720                 br_multicast_select_own_querier(br, ip, skb);
721                 br_multicast_count(br, port, skb, igmp_type,
722                                    BR_MCAST_DIR_RX);
723                 netif_rx(skb);
724         }
725 }
726
727 static void br_multicast_send_query(struct net_bridge *br,
728                                     struct net_bridge_port *port,
729                                     struct bridge_mcast_own_query *own_query)
730 {
731         struct bridge_mcast_other_query *other_query = NULL;
732         struct br_ip br_group;
733         unsigned long time;
734
735         if (!netif_running(br->dev) ||
736             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
737             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
738                 return;
739
740         memset(&br_group.u, 0, sizeof(br_group.u));
741
742         if (port ? (own_query == &port->ip4_own_query) :
743                    (own_query == &br->ip4_own_query)) {
744                 other_query = &br->ip4_other_query;
745                 br_group.proto = htons(ETH_P_IP);
746 #if IS_ENABLED(CONFIG_IPV6)
747         } else {
748                 other_query = &br->ip6_other_query;
749                 br_group.proto = htons(ETH_P_IPV6);
750 #endif
751         }
752
753         if (!other_query || timer_pending(&other_query->timer))
754                 return;
755
756         __br_multicast_send_query(br, port, &br_group);
757
758         time = jiffies;
759         time += own_query->startup_sent < br->multicast_startup_query_count ?
760                 br->multicast_startup_query_interval :
761                 br->multicast_query_interval;
762         mod_timer(&own_query->timer, time);
763 }
764
765 static void
766 br_multicast_port_query_expired(struct net_bridge_port *port,
767                                 struct bridge_mcast_own_query *query)
768 {
769         struct net_bridge *br = port->br;
770
771         spin_lock(&br->multicast_lock);
772         if (port->state == BR_STATE_DISABLED ||
773             port->state == BR_STATE_BLOCKING)
774                 goto out;
775
776         if (query->startup_sent < br->multicast_startup_query_count)
777                 query->startup_sent++;
778
779         br_multicast_send_query(port->br, port, query);
780
781 out:
782         spin_unlock(&br->multicast_lock);
783 }
784
785 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
786 {
787         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
788
789         br_multicast_port_query_expired(port, &port->ip4_own_query);
790 }
791
792 #if IS_ENABLED(CONFIG_IPV6)
793 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
794 {
795         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
796
797         br_multicast_port_query_expired(port, &port->ip6_own_query);
798 }
799 #endif
800
801 static void br_mc_disabled_update(struct net_device *dev, bool value)
802 {
803         struct switchdev_attr attr = {
804                 .orig_dev = dev,
805                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
806                 .flags = SWITCHDEV_F_DEFER,
807                 .u.mc_disabled = !value,
808         };
809
810         switchdev_port_attr_set(dev, &attr);
811 }
812
813 int br_multicast_add_port(struct net_bridge_port *port)
814 {
815         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
816
817         timer_setup(&port->multicast_router_timer,
818                     br_multicast_router_expired, 0);
819         timer_setup(&port->ip4_own_query.timer,
820                     br_ip4_multicast_port_query_expired, 0);
821 #if IS_ENABLED(CONFIG_IPV6)
822         timer_setup(&port->ip6_own_query.timer,
823                     br_ip6_multicast_port_query_expired, 0);
824 #endif
825         br_mc_disabled_update(port->dev,
826                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
827
828         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
829         if (!port->mcast_stats)
830                 return -ENOMEM;
831
832         return 0;
833 }
834
835 void br_multicast_del_port(struct net_bridge_port *port)
836 {
837         struct net_bridge *br = port->br;
838         struct net_bridge_port_group *pg;
839         struct hlist_node *n;
840
841         /* Take care of the remaining groups, only perm ones should be left */
842         spin_lock_bh(&br->multicast_lock);
843         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
844                 br_multicast_del_pg(br, pg);
845         spin_unlock_bh(&br->multicast_lock);
846         del_timer_sync(&port->multicast_router_timer);
847         free_percpu(port->mcast_stats);
848 }
849
850 static void br_multicast_enable(struct bridge_mcast_own_query *query)
851 {
852         query->startup_sent = 0;
853
854         if (try_to_del_timer_sync(&query->timer) >= 0 ||
855             del_timer(&query->timer))
856                 mod_timer(&query->timer, jiffies);
857 }
858
859 static void __br_multicast_enable_port(struct net_bridge_port *port)
860 {
861         struct net_bridge *br = port->br;
862
863         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
864                 return;
865
866         br_multicast_enable(&port->ip4_own_query);
867 #if IS_ENABLED(CONFIG_IPV6)
868         br_multicast_enable(&port->ip6_own_query);
869 #endif
870         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
871             hlist_unhashed(&port->rlist))
872                 br_multicast_add_router(br, port);
873 }
874
875 void br_multicast_enable_port(struct net_bridge_port *port)
876 {
877         struct net_bridge *br = port->br;
878
879         spin_lock(&br->multicast_lock);
880         __br_multicast_enable_port(port);
881         spin_unlock(&br->multicast_lock);
882 }
883
884 void br_multicast_disable_port(struct net_bridge_port *port)
885 {
886         struct net_bridge *br = port->br;
887         struct net_bridge_port_group *pg;
888         struct hlist_node *n;
889
890         spin_lock(&br->multicast_lock);
891         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
892                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
893                         br_multicast_del_pg(br, pg);
894
895         __del_port_router(port);
896
897         del_timer(&port->multicast_router_timer);
898         del_timer(&port->ip4_own_query.timer);
899 #if IS_ENABLED(CONFIG_IPV6)
900         del_timer(&port->ip6_own_query.timer);
901 #endif
902         spin_unlock(&br->multicast_lock);
903 }
904
905 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
906                                          struct net_bridge_port *port,
907                                          struct sk_buff *skb,
908                                          u16 vid)
909 {
910         const unsigned char *src;
911         struct igmpv3_report *ih;
912         struct igmpv3_grec *grec;
913         int i;
914         int len;
915         int num;
916         int type;
917         int err = 0;
918         __be32 group;
919
920         ih = igmpv3_report_hdr(skb);
921         num = ntohs(ih->ngrec);
922         len = skb_transport_offset(skb) + sizeof(*ih);
923
924         for (i = 0; i < num; i++) {
925                 len += sizeof(*grec);
926                 if (!ip_mc_may_pull(skb, len))
927                         return -EINVAL;
928
929                 grec = (void *)(skb->data + len - sizeof(*grec));
930                 group = grec->grec_mca;
931                 type = grec->grec_type;
932
933                 len += ntohs(grec->grec_nsrcs) * 4;
934                 if (!ip_mc_may_pull(skb, len))
935                         return -EINVAL;
936
937                 /* We treat this as an IGMPv2 report for now. */
938                 switch (type) {
939                 case IGMPV3_MODE_IS_INCLUDE:
940                 case IGMPV3_MODE_IS_EXCLUDE:
941                 case IGMPV3_CHANGE_TO_INCLUDE:
942                 case IGMPV3_CHANGE_TO_EXCLUDE:
943                 case IGMPV3_ALLOW_NEW_SOURCES:
944                 case IGMPV3_BLOCK_OLD_SOURCES:
945                         break;
946
947                 default:
948                         continue;
949                 }
950
951                 src = eth_hdr(skb)->h_source;
952                 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
953                      type == IGMPV3_MODE_IS_INCLUDE) &&
954                     ntohs(grec->grec_nsrcs) == 0) {
955                         br_ip4_multicast_leave_group(br, port, group, vid, src);
956                 } else {
957                         err = br_ip4_multicast_add_group(br, port, group, vid,
958                                                          src);
959                         if (err)
960                                 break;
961                 }
962         }
963
964         return err;
965 }
966
967 #if IS_ENABLED(CONFIG_IPV6)
968 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
969                                         struct net_bridge_port *port,
970                                         struct sk_buff *skb,
971                                         u16 vid)
972 {
973         unsigned int nsrcs_offset;
974         const unsigned char *src;
975         struct icmp6hdr *icmp6h;
976         struct mld2_grec *grec;
977         unsigned int grec_len;
978         int i;
979         int len;
980         int num;
981         int err = 0;
982
983         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
984                 return -EINVAL;
985
986         icmp6h = icmp6_hdr(skb);
987         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
988         len = skb_transport_offset(skb) + sizeof(*icmp6h);
989
990         for (i = 0; i < num; i++) {
991                 __be16 *nsrcs, _nsrcs;
992
993                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
994
995                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
996                     nsrcs_offset + sizeof(_nsrcs))
997                         return -EINVAL;
998
999                 nsrcs = skb_header_pointer(skb, nsrcs_offset,
1000                                            sizeof(_nsrcs), &_nsrcs);
1001                 if (!nsrcs)
1002                         return -EINVAL;
1003
1004                 grec_len = struct_size(grec, grec_src, ntohs(*nsrcs));
1005
1006                 if (!ipv6_mc_may_pull(skb, len + grec_len))
1007                         return -EINVAL;
1008
1009                 grec = (struct mld2_grec *)(skb->data + len);
1010                 len += grec_len;
1011
1012                 /* We treat these as MLDv1 reports for now. */
1013                 switch (grec->grec_type) {
1014                 case MLD2_MODE_IS_INCLUDE:
1015                 case MLD2_MODE_IS_EXCLUDE:
1016                 case MLD2_CHANGE_TO_INCLUDE:
1017                 case MLD2_CHANGE_TO_EXCLUDE:
1018                 case MLD2_ALLOW_NEW_SOURCES:
1019                 case MLD2_BLOCK_OLD_SOURCES:
1020                         break;
1021
1022                 default:
1023                         continue;
1024                 }
1025
1026                 src = eth_hdr(skb)->h_source;
1027                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1028                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1029                     ntohs(*nsrcs) == 0) {
1030                         br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1031                                                      vid, src);
1032                 } else {
1033                         err = br_ip6_multicast_add_group(br, port,
1034                                                          &grec->grec_mca, vid,
1035                                                          src);
1036                         if (err)
1037                                 break;
1038                 }
1039         }
1040
1041         return err;
1042 }
1043 #endif
1044
1045 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1046                                             struct net_bridge_port *port,
1047                                             __be32 saddr)
1048 {
1049         if (!timer_pending(&br->ip4_own_query.timer) &&
1050             !timer_pending(&br->ip4_other_query.timer))
1051                 goto update;
1052
1053         if (!br->ip4_querier.addr.u.ip4)
1054                 goto update;
1055
1056         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1057                 goto update;
1058
1059         return false;
1060
1061 update:
1062         br->ip4_querier.addr.u.ip4 = saddr;
1063
1064         /* update protected by general multicast_lock by caller */
1065         rcu_assign_pointer(br->ip4_querier.port, port);
1066
1067         return true;
1068 }
1069
1070 #if IS_ENABLED(CONFIG_IPV6)
1071 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1072                                             struct net_bridge_port *port,
1073                                             struct in6_addr *saddr)
1074 {
1075         if (!timer_pending(&br->ip6_own_query.timer) &&
1076             !timer_pending(&br->ip6_other_query.timer))
1077                 goto update;
1078
1079         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1080                 goto update;
1081
1082         return false;
1083
1084 update:
1085         br->ip6_querier.addr.u.ip6 = *saddr;
1086
1087         /* update protected by general multicast_lock by caller */
1088         rcu_assign_pointer(br->ip6_querier.port, port);
1089
1090         return true;
1091 }
1092 #endif
1093
1094 static bool br_multicast_select_querier(struct net_bridge *br,
1095                                         struct net_bridge_port *port,
1096                                         struct br_ip *saddr)
1097 {
1098         switch (saddr->proto) {
1099         case htons(ETH_P_IP):
1100                 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1101 #if IS_ENABLED(CONFIG_IPV6)
1102         case htons(ETH_P_IPV6):
1103                 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1104 #endif
1105         }
1106
1107         return false;
1108 }
1109
1110 static void
1111 br_multicast_update_query_timer(struct net_bridge *br,
1112                                 struct bridge_mcast_other_query *query,
1113                                 unsigned long max_delay)
1114 {
1115         if (!timer_pending(&query->timer))
1116                 query->delay_time = jiffies + max_delay;
1117
1118         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1119 }
1120
1121 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1122                                            bool is_mc_router)
1123 {
1124         struct switchdev_attr attr = {
1125                 .orig_dev = p->dev,
1126                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1127                 .flags = SWITCHDEV_F_DEFER,
1128                 .u.mrouter = is_mc_router,
1129         };
1130
1131         switchdev_port_attr_set(p->dev, &attr);
1132 }
1133
1134 /*
1135  * Add port to router_list
1136  *  list is maintained ordered by pointer value
1137  *  and locked by br->multicast_lock and RCU
1138  */
1139 static void br_multicast_add_router(struct net_bridge *br,
1140                                     struct net_bridge_port *port)
1141 {
1142         struct net_bridge_port *p;
1143         struct hlist_node *slot = NULL;
1144
1145         if (!hlist_unhashed(&port->rlist))
1146                 return;
1147
1148         hlist_for_each_entry(p, &br->router_list, rlist) {
1149                 if ((unsigned long) port >= (unsigned long) p)
1150                         break;
1151                 slot = &p->rlist;
1152         }
1153
1154         if (slot)
1155                 hlist_add_behind_rcu(&port->rlist, slot);
1156         else
1157                 hlist_add_head_rcu(&port->rlist, &br->router_list);
1158         br_rtr_notify(br->dev, port, RTM_NEWMDB);
1159         br_port_mc_router_state_change(port, true);
1160 }
1161
1162 static void br_multicast_mark_router(struct net_bridge *br,
1163                                      struct net_bridge_port *port)
1164 {
1165         unsigned long now = jiffies;
1166
1167         if (!port) {
1168                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1169                         if (!timer_pending(&br->multicast_router_timer))
1170                                 br_mc_router_state_change(br, true);
1171                         mod_timer(&br->multicast_router_timer,
1172                                   now + br->multicast_querier_interval);
1173                 }
1174                 return;
1175         }
1176
1177         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1178             port->multicast_router == MDB_RTR_TYPE_PERM)
1179                 return;
1180
1181         br_multicast_add_router(br, port);
1182
1183         mod_timer(&port->multicast_router_timer,
1184                   now + br->multicast_querier_interval);
1185 }
1186
1187 static void br_multicast_query_received(struct net_bridge *br,
1188                                         struct net_bridge_port *port,
1189                                         struct bridge_mcast_other_query *query,
1190                                         struct br_ip *saddr,
1191                                         unsigned long max_delay)
1192 {
1193         if (!br_multicast_select_querier(br, port, saddr))
1194                 return;
1195
1196         br_multicast_update_query_timer(br, query, max_delay);
1197         br_multicast_mark_router(br, port);
1198 }
1199
1200 static void br_ip4_multicast_query(struct net_bridge *br,
1201                                    struct net_bridge_port *port,
1202                                    struct sk_buff *skb,
1203                                    u16 vid)
1204 {
1205         unsigned int transport_len = ip_transport_len(skb);
1206         const struct iphdr *iph = ip_hdr(skb);
1207         struct igmphdr *ih = igmp_hdr(skb);
1208         struct net_bridge_mdb_entry *mp;
1209         struct igmpv3_query *ih3;
1210         struct net_bridge_port_group *p;
1211         struct net_bridge_port_group __rcu **pp;
1212         struct br_ip saddr;
1213         unsigned long max_delay;
1214         unsigned long now = jiffies;
1215         __be32 group;
1216
1217         spin_lock(&br->multicast_lock);
1218         if (!netif_running(br->dev) ||
1219             (port && port->state == BR_STATE_DISABLED))
1220                 goto out;
1221
1222         group = ih->group;
1223
1224         if (transport_len == sizeof(*ih)) {
1225                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1226
1227                 if (!max_delay) {
1228                         max_delay = 10 * HZ;
1229                         group = 0;
1230                 }
1231         } else if (transport_len >= sizeof(*ih3)) {
1232                 ih3 = igmpv3_query_hdr(skb);
1233                 if (ih3->nsrcs)
1234                         goto out;
1235
1236                 max_delay = ih3->code ?
1237                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1238         } else {
1239                 goto out;
1240         }
1241
1242         if (!group) {
1243                 saddr.proto = htons(ETH_P_IP);
1244                 saddr.u.ip4 = iph->saddr;
1245
1246                 br_multicast_query_received(br, port, &br->ip4_other_query,
1247                                             &saddr, max_delay);
1248                 goto out;
1249         }
1250
1251         mp = br_mdb_ip4_get(br, group, vid);
1252         if (!mp)
1253                 goto out;
1254
1255         max_delay *= br->multicast_last_member_count;
1256
1257         if (mp->host_joined &&
1258             (timer_pending(&mp->timer) ?
1259              time_after(mp->timer.expires, now + max_delay) :
1260              try_to_del_timer_sync(&mp->timer) >= 0))
1261                 mod_timer(&mp->timer, now + max_delay);
1262
1263         for (pp = &mp->ports;
1264              (p = mlock_dereference(*pp, br)) != NULL;
1265              pp = &p->next) {
1266                 if (timer_pending(&p->timer) ?
1267                     time_after(p->timer.expires, now + max_delay) :
1268                     try_to_del_timer_sync(&p->timer) >= 0)
1269                         mod_timer(&p->timer, now + max_delay);
1270         }
1271
1272 out:
1273         spin_unlock(&br->multicast_lock);
1274 }
1275
1276 #if IS_ENABLED(CONFIG_IPV6)
1277 static int br_ip6_multicast_query(struct net_bridge *br,
1278                                   struct net_bridge_port *port,
1279                                   struct sk_buff *skb,
1280                                   u16 vid)
1281 {
1282         unsigned int transport_len = ipv6_transport_len(skb);
1283         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1284         struct mld_msg *mld;
1285         struct net_bridge_mdb_entry *mp;
1286         struct mld2_query *mld2q;
1287         struct net_bridge_port_group *p;
1288         struct net_bridge_port_group __rcu **pp;
1289         struct br_ip saddr;
1290         unsigned long max_delay;
1291         unsigned long now = jiffies;
1292         unsigned int offset = skb_transport_offset(skb);
1293         const struct in6_addr *group = NULL;
1294         bool is_general_query;
1295         int err = 0;
1296
1297         spin_lock(&br->multicast_lock);
1298         if (!netif_running(br->dev) ||
1299             (port && port->state == BR_STATE_DISABLED))
1300                 goto out;
1301
1302         if (transport_len == sizeof(*mld)) {
1303                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1304                         err = -EINVAL;
1305                         goto out;
1306                 }
1307                 mld = (struct mld_msg *) icmp6_hdr(skb);
1308                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1309                 if (max_delay)
1310                         group = &mld->mld_mca;
1311         } else {
1312                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1313                         err = -EINVAL;
1314                         goto out;
1315                 }
1316                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1317                 if (!mld2q->mld2q_nsrcs)
1318                         group = &mld2q->mld2q_mca;
1319
1320                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1321         }
1322
1323         is_general_query = group && ipv6_addr_any(group);
1324
1325         if (is_general_query) {
1326                 saddr.proto = htons(ETH_P_IPV6);
1327                 saddr.u.ip6 = ip6h->saddr;
1328
1329                 br_multicast_query_received(br, port, &br->ip6_other_query,
1330                                             &saddr, max_delay);
1331                 goto out;
1332         } else if (!group) {
1333                 goto out;
1334         }
1335
1336         mp = br_mdb_ip6_get(br, group, vid);
1337         if (!mp)
1338                 goto out;
1339
1340         max_delay *= br->multicast_last_member_count;
1341         if (mp->host_joined &&
1342             (timer_pending(&mp->timer) ?
1343              time_after(mp->timer.expires, now + max_delay) :
1344              try_to_del_timer_sync(&mp->timer) >= 0))
1345                 mod_timer(&mp->timer, now + max_delay);
1346
1347         for (pp = &mp->ports;
1348              (p = mlock_dereference(*pp, br)) != NULL;
1349              pp = &p->next) {
1350                 if (timer_pending(&p->timer) ?
1351                     time_after(p->timer.expires, now + max_delay) :
1352                     try_to_del_timer_sync(&p->timer) >= 0)
1353                         mod_timer(&p->timer, now + max_delay);
1354         }
1355
1356 out:
1357         spin_unlock(&br->multicast_lock);
1358         return err;
1359 }
1360 #endif
1361
1362 static void
1363 br_multicast_leave_group(struct net_bridge *br,
1364                          struct net_bridge_port *port,
1365                          struct br_ip *group,
1366                          struct bridge_mcast_other_query *other_query,
1367                          struct bridge_mcast_own_query *own_query,
1368                          const unsigned char *src)
1369 {
1370         struct net_bridge_mdb_entry *mp;
1371         struct net_bridge_port_group *p;
1372         unsigned long now;
1373         unsigned long time;
1374
1375         spin_lock(&br->multicast_lock);
1376         if (!netif_running(br->dev) ||
1377             (port && port->state == BR_STATE_DISABLED))
1378                 goto out;
1379
1380         mp = br_mdb_ip_get(br, group);
1381         if (!mp)
1382                 goto out;
1383
1384         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1385                 struct net_bridge_port_group __rcu **pp;
1386
1387                 for (pp = &mp->ports;
1388                      (p = mlock_dereference(*pp, br)) != NULL;
1389                      pp = &p->next) {
1390                         if (!br_port_group_equal(p, port, src))
1391                                 continue;
1392
1393                         rcu_assign_pointer(*pp, p->next);
1394                         hlist_del_init(&p->mglist);
1395                         del_timer(&p->timer);
1396                         kfree_rcu(p, rcu);
1397                         br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1398                                       p->flags);
1399
1400                         if (!mp->ports && !mp->host_joined &&
1401                             netif_running(br->dev))
1402                                 mod_timer(&mp->timer, jiffies);
1403                 }
1404                 goto out;
1405         }
1406
1407         if (timer_pending(&other_query->timer))
1408                 goto out;
1409
1410         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1411                 __br_multicast_send_query(br, port, &mp->addr);
1412
1413                 time = jiffies + br->multicast_last_member_count *
1414                                  br->multicast_last_member_interval;
1415
1416                 mod_timer(&own_query->timer, time);
1417
1418                 for (p = mlock_dereference(mp->ports, br);
1419                      p != NULL;
1420                      p = mlock_dereference(p->next, br)) {
1421                         if (!br_port_group_equal(p, port, src))
1422                                 continue;
1423
1424                         if (!hlist_unhashed(&p->mglist) &&
1425                             (timer_pending(&p->timer) ?
1426                              time_after(p->timer.expires, time) :
1427                              try_to_del_timer_sync(&p->timer) >= 0)) {
1428                                 mod_timer(&p->timer, time);
1429                         }
1430
1431                         break;
1432                 }
1433         }
1434
1435         now = jiffies;
1436         time = now + br->multicast_last_member_count *
1437                      br->multicast_last_member_interval;
1438
1439         if (!port) {
1440                 if (mp->host_joined &&
1441                     (timer_pending(&mp->timer) ?
1442                      time_after(mp->timer.expires, time) :
1443                      try_to_del_timer_sync(&mp->timer) >= 0)) {
1444                         mod_timer(&mp->timer, time);
1445                 }
1446
1447                 goto out;
1448         }
1449
1450         for (p = mlock_dereference(mp->ports, br);
1451              p != NULL;
1452              p = mlock_dereference(p->next, br)) {
1453                 if (p->port != port)
1454                         continue;
1455
1456                 if (!hlist_unhashed(&p->mglist) &&
1457                     (timer_pending(&p->timer) ?
1458                      time_after(p->timer.expires, time) :
1459                      try_to_del_timer_sync(&p->timer) >= 0)) {
1460                         mod_timer(&p->timer, time);
1461                 }
1462
1463                 break;
1464         }
1465 out:
1466         spin_unlock(&br->multicast_lock);
1467 }
1468
1469 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1470                                          struct net_bridge_port *port,
1471                                          __be32 group,
1472                                          __u16 vid,
1473                                          const unsigned char *src)
1474 {
1475         struct br_ip br_group;
1476         struct bridge_mcast_own_query *own_query;
1477
1478         if (ipv4_is_local_multicast(group))
1479                 return;
1480
1481         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1482
1483         memset(&br_group, 0, sizeof(br_group));
1484         br_group.u.ip4 = group;
1485         br_group.proto = htons(ETH_P_IP);
1486         br_group.vid = vid;
1487
1488         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1489                                  own_query, src);
1490 }
1491
1492 #if IS_ENABLED(CONFIG_IPV6)
1493 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1494                                          struct net_bridge_port *port,
1495                                          const struct in6_addr *group,
1496                                          __u16 vid,
1497                                          const unsigned char *src)
1498 {
1499         struct br_ip br_group;
1500         struct bridge_mcast_own_query *own_query;
1501
1502         if (ipv6_addr_is_ll_all_nodes(group))
1503                 return;
1504
1505         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1506
1507         memset(&br_group, 0, sizeof(br_group));
1508         br_group.u.ip6 = *group;
1509         br_group.proto = htons(ETH_P_IPV6);
1510         br_group.vid = vid;
1511
1512         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1513                                  own_query, src);
1514 }
1515 #endif
1516
1517 static void br_multicast_err_count(const struct net_bridge *br,
1518                                    const struct net_bridge_port *p,
1519                                    __be16 proto)
1520 {
1521         struct bridge_mcast_stats __percpu *stats;
1522         struct bridge_mcast_stats *pstats;
1523
1524         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1525                 return;
1526
1527         if (p)
1528                 stats = p->mcast_stats;
1529         else
1530                 stats = br->mcast_stats;
1531         if (WARN_ON(!stats))
1532                 return;
1533
1534         pstats = this_cpu_ptr(stats);
1535
1536         u64_stats_update_begin(&pstats->syncp);
1537         switch (proto) {
1538         case htons(ETH_P_IP):
1539                 pstats->mstats.igmp_parse_errors++;
1540                 break;
1541 #if IS_ENABLED(CONFIG_IPV6)
1542         case htons(ETH_P_IPV6):
1543                 pstats->mstats.mld_parse_errors++;
1544                 break;
1545 #endif
1546         }
1547         u64_stats_update_end(&pstats->syncp);
1548 }
1549
1550 static void br_multicast_pim(struct net_bridge *br,
1551                              struct net_bridge_port *port,
1552                              const struct sk_buff *skb)
1553 {
1554         unsigned int offset = skb_transport_offset(skb);
1555         struct pimhdr *pimhdr, _pimhdr;
1556
1557         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1558         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1559             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1560                 return;
1561
1562         br_multicast_mark_router(br, port);
1563 }
1564
1565 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1566                                     struct net_bridge_port *port,
1567                                     struct sk_buff *skb)
1568 {
1569         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1570             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1571                 return -ENOMSG;
1572
1573         br_multicast_mark_router(br, port);
1574
1575         return 0;
1576 }
1577
1578 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1579                                  struct net_bridge_port *port,
1580                                  struct sk_buff *skb,
1581                                  u16 vid)
1582 {
1583         const unsigned char *src;
1584         struct igmphdr *ih;
1585         int err;
1586
1587         err = ip_mc_check_igmp(skb);
1588
1589         if (err == -ENOMSG) {
1590                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1591                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1592                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1593                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1594                                 br_multicast_pim(br, port, skb);
1595                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1596                         br_ip4_multicast_mrd_rcv(br, port, skb);
1597                 }
1598
1599                 return 0;
1600         } else if (err < 0) {
1601                 br_multicast_err_count(br, port, skb->protocol);
1602                 return err;
1603         }
1604
1605         ih = igmp_hdr(skb);
1606         src = eth_hdr(skb)->h_source;
1607         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1608
1609         switch (ih->type) {
1610         case IGMP_HOST_MEMBERSHIP_REPORT:
1611         case IGMPV2_HOST_MEMBERSHIP_REPORT:
1612                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1613                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1614                 break;
1615         case IGMPV3_HOST_MEMBERSHIP_REPORT:
1616                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1617                 break;
1618         case IGMP_HOST_MEMBERSHIP_QUERY:
1619                 br_ip4_multicast_query(br, port, skb, vid);
1620                 break;
1621         case IGMP_HOST_LEAVE_MESSAGE:
1622                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1623                 break;
1624         }
1625
1626         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1627                            BR_MCAST_DIR_RX);
1628
1629         return err;
1630 }
1631
1632 #if IS_ENABLED(CONFIG_IPV6)
1633 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1634                                     struct net_bridge_port *port,
1635                                     struct sk_buff *skb)
1636 {
1637         int ret;
1638
1639         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1640                 return -ENOMSG;
1641
1642         ret = ipv6_mc_check_icmpv6(skb);
1643         if (ret < 0)
1644                 return ret;
1645
1646         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1647                 return -ENOMSG;
1648
1649         br_multicast_mark_router(br, port);
1650
1651         return 0;
1652 }
1653
1654 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1655                                  struct net_bridge_port *port,
1656                                  struct sk_buff *skb,
1657                                  u16 vid)
1658 {
1659         const unsigned char *src;
1660         struct mld_msg *mld;
1661         int err;
1662
1663         err = ipv6_mc_check_mld(skb);
1664
1665         if (err == -ENOMSG) {
1666                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1667                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1668
1669                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1670                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
1671
1672                         if (err < 0 && err != -ENOMSG) {
1673                                 br_multicast_err_count(br, port, skb->protocol);
1674                                 return err;
1675                         }
1676                 }
1677
1678                 return 0;
1679         } else if (err < 0) {
1680                 br_multicast_err_count(br, port, skb->protocol);
1681                 return err;
1682         }
1683
1684         mld = (struct mld_msg *)skb_transport_header(skb);
1685         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1686
1687         switch (mld->mld_type) {
1688         case ICMPV6_MGM_REPORT:
1689                 src = eth_hdr(skb)->h_source;
1690                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1691                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1692                                                  src);
1693                 break;
1694         case ICMPV6_MLD2_REPORT:
1695                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1696                 break;
1697         case ICMPV6_MGM_QUERY:
1698                 err = br_ip6_multicast_query(br, port, skb, vid);
1699                 break;
1700         case ICMPV6_MGM_REDUCTION:
1701                 src = eth_hdr(skb)->h_source;
1702                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1703                 break;
1704         }
1705
1706         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1707                            BR_MCAST_DIR_RX);
1708
1709         return err;
1710 }
1711 #endif
1712
1713 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1714                      struct sk_buff *skb, u16 vid)
1715 {
1716         int ret = 0;
1717
1718         BR_INPUT_SKB_CB(skb)->igmp = 0;
1719         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1720
1721         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1722                 return 0;
1723
1724         switch (skb->protocol) {
1725         case htons(ETH_P_IP):
1726                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1727                 break;
1728 #if IS_ENABLED(CONFIG_IPV6)
1729         case htons(ETH_P_IPV6):
1730                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1731                 break;
1732 #endif
1733         }
1734
1735         return ret;
1736 }
1737
1738 static void br_multicast_query_expired(struct net_bridge *br,
1739                                        struct bridge_mcast_own_query *query,
1740                                        struct bridge_mcast_querier *querier)
1741 {
1742         spin_lock(&br->multicast_lock);
1743         if (query->startup_sent < br->multicast_startup_query_count)
1744                 query->startup_sent++;
1745
1746         RCU_INIT_POINTER(querier->port, NULL);
1747         br_multicast_send_query(br, NULL, query);
1748         spin_unlock(&br->multicast_lock);
1749 }
1750
1751 static void br_ip4_multicast_query_expired(struct timer_list *t)
1752 {
1753         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1754
1755         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1756 }
1757
1758 #if IS_ENABLED(CONFIG_IPV6)
1759 static void br_ip6_multicast_query_expired(struct timer_list *t)
1760 {
1761         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1762
1763         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1764 }
1765 #endif
1766
1767 void br_multicast_init(struct net_bridge *br)
1768 {
1769         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1770
1771         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1772         br->multicast_last_member_count = 2;
1773         br->multicast_startup_query_count = 2;
1774
1775         br->multicast_last_member_interval = HZ;
1776         br->multicast_query_response_interval = 10 * HZ;
1777         br->multicast_startup_query_interval = 125 * HZ / 4;
1778         br->multicast_query_interval = 125 * HZ;
1779         br->multicast_querier_interval = 255 * HZ;
1780         br->multicast_membership_interval = 260 * HZ;
1781
1782         br->ip4_other_query.delay_time = 0;
1783         br->ip4_querier.port = NULL;
1784         br->multicast_igmp_version = 2;
1785 #if IS_ENABLED(CONFIG_IPV6)
1786         br->multicast_mld_version = 1;
1787         br->ip6_other_query.delay_time = 0;
1788         br->ip6_querier.port = NULL;
1789 #endif
1790         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1791         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1792
1793         spin_lock_init(&br->multicast_lock);
1794         timer_setup(&br->multicast_router_timer,
1795                     br_multicast_local_router_expired, 0);
1796         timer_setup(&br->ip4_other_query.timer,
1797                     br_ip4_multicast_querier_expired, 0);
1798         timer_setup(&br->ip4_own_query.timer,
1799                     br_ip4_multicast_query_expired, 0);
1800 #if IS_ENABLED(CONFIG_IPV6)
1801         timer_setup(&br->ip6_other_query.timer,
1802                     br_ip6_multicast_querier_expired, 0);
1803         timer_setup(&br->ip6_own_query.timer,
1804                     br_ip6_multicast_query_expired, 0);
1805 #endif
1806         INIT_HLIST_HEAD(&br->mdb_list);
1807 }
1808
1809 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1810 {
1811         struct in_device *in_dev = in_dev_get(br->dev);
1812
1813         if (!in_dev)
1814                 return;
1815
1816         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1817         in_dev_put(in_dev);
1818 }
1819
1820 #if IS_ENABLED(CONFIG_IPV6)
1821 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1822 {
1823         struct in6_addr addr;
1824
1825         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1826         ipv6_dev_mc_inc(br->dev, &addr);
1827 }
1828 #else
1829 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1830 {
1831 }
1832 #endif
1833
1834 static void br_multicast_join_snoopers(struct net_bridge *br)
1835 {
1836         br_ip4_multicast_join_snoopers(br);
1837         br_ip6_multicast_join_snoopers(br);
1838 }
1839
1840 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1841 {
1842         struct in_device *in_dev = in_dev_get(br->dev);
1843
1844         if (WARN_ON(!in_dev))
1845                 return;
1846
1847         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1848         in_dev_put(in_dev);
1849 }
1850
1851 #if IS_ENABLED(CONFIG_IPV6)
1852 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1853 {
1854         struct in6_addr addr;
1855
1856         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1857         ipv6_dev_mc_dec(br->dev, &addr);
1858 }
1859 #else
1860 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1861 {
1862 }
1863 #endif
1864
1865 static void br_multicast_leave_snoopers(struct net_bridge *br)
1866 {
1867         br_ip4_multicast_leave_snoopers(br);
1868         br_ip6_multicast_leave_snoopers(br);
1869 }
1870
1871 static void __br_multicast_open(struct net_bridge *br,
1872                                 struct bridge_mcast_own_query *query)
1873 {
1874         query->startup_sent = 0;
1875
1876         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1877                 return;
1878
1879         mod_timer(&query->timer, jiffies);
1880 }
1881
1882 void br_multicast_open(struct net_bridge *br)
1883 {
1884         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1885                 br_multicast_join_snoopers(br);
1886
1887         __br_multicast_open(br, &br->ip4_own_query);
1888 #if IS_ENABLED(CONFIG_IPV6)
1889         __br_multicast_open(br, &br->ip6_own_query);
1890 #endif
1891 }
1892
1893 void br_multicast_stop(struct net_bridge *br)
1894 {
1895         del_timer_sync(&br->multicast_router_timer);
1896         del_timer_sync(&br->ip4_other_query.timer);
1897         del_timer_sync(&br->ip4_own_query.timer);
1898 #if IS_ENABLED(CONFIG_IPV6)
1899         del_timer_sync(&br->ip6_other_query.timer);
1900         del_timer_sync(&br->ip6_own_query.timer);
1901 #endif
1902
1903         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1904                 br_multicast_leave_snoopers(br);
1905 }
1906
1907 void br_multicast_dev_del(struct net_bridge *br)
1908 {
1909         struct net_bridge_mdb_entry *mp;
1910         struct hlist_node *tmp;
1911
1912         spin_lock_bh(&br->multicast_lock);
1913         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1914                 del_timer(&mp->timer);
1915                 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1916                                        br_mdb_rht_params);
1917                 hlist_del_rcu(&mp->mdb_node);
1918                 kfree_rcu(mp, rcu);
1919         }
1920         spin_unlock_bh(&br->multicast_lock);
1921
1922         rcu_barrier();
1923 }
1924
1925 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1926 {
1927         int err = -EINVAL;
1928
1929         spin_lock_bh(&br->multicast_lock);
1930
1931         switch (val) {
1932         case MDB_RTR_TYPE_DISABLED:
1933         case MDB_RTR_TYPE_PERM:
1934                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1935                 del_timer(&br->multicast_router_timer);
1936                 br->multicast_router = val;
1937                 err = 0;
1938                 break;
1939         case MDB_RTR_TYPE_TEMP_QUERY:
1940                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1941                         br_mc_router_state_change(br, false);
1942                 br->multicast_router = val;
1943                 err = 0;
1944                 break;
1945         }
1946
1947         spin_unlock_bh(&br->multicast_lock);
1948
1949         return err;
1950 }
1951
1952 static void __del_port_router(struct net_bridge_port *p)
1953 {
1954         if (hlist_unhashed(&p->rlist))
1955                 return;
1956         hlist_del_init_rcu(&p->rlist);
1957         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1958         br_port_mc_router_state_change(p, false);
1959
1960         /* don't allow timer refresh */
1961         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1962                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1963 }
1964
1965 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1966 {
1967         struct net_bridge *br = p->br;
1968         unsigned long now = jiffies;
1969         int err = -EINVAL;
1970
1971         spin_lock(&br->multicast_lock);
1972         if (p->multicast_router == val) {
1973                 /* Refresh the temp router port timer */
1974                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1975                         mod_timer(&p->multicast_router_timer,
1976                                   now + br->multicast_querier_interval);
1977                 err = 0;
1978                 goto unlock;
1979         }
1980         switch (val) {
1981         case MDB_RTR_TYPE_DISABLED:
1982                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
1983                 __del_port_router(p);
1984                 del_timer(&p->multicast_router_timer);
1985                 break;
1986         case MDB_RTR_TYPE_TEMP_QUERY:
1987                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1988                 __del_port_router(p);
1989                 break;
1990         case MDB_RTR_TYPE_PERM:
1991                 p->multicast_router = MDB_RTR_TYPE_PERM;
1992                 del_timer(&p->multicast_router_timer);
1993                 br_multicast_add_router(br, p);
1994                 break;
1995         case MDB_RTR_TYPE_TEMP:
1996                 p->multicast_router = MDB_RTR_TYPE_TEMP;
1997                 br_multicast_mark_router(br, p);
1998                 break;
1999         default:
2000                 goto unlock;
2001         }
2002         err = 0;
2003 unlock:
2004         spin_unlock(&br->multicast_lock);
2005
2006         return err;
2007 }
2008
2009 static void br_multicast_start_querier(struct net_bridge *br,
2010                                        struct bridge_mcast_own_query *query)
2011 {
2012         struct net_bridge_port *port;
2013
2014         __br_multicast_open(br, query);
2015
2016         rcu_read_lock();
2017         list_for_each_entry_rcu(port, &br->port_list, list) {
2018                 if (port->state == BR_STATE_DISABLED ||
2019                     port->state == BR_STATE_BLOCKING)
2020                         continue;
2021
2022                 if (query == &br->ip4_own_query)
2023                         br_multicast_enable(&port->ip4_own_query);
2024 #if IS_ENABLED(CONFIG_IPV6)
2025                 else
2026                         br_multicast_enable(&port->ip6_own_query);
2027 #endif
2028         }
2029         rcu_read_unlock();
2030 }
2031
2032 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2033 {
2034         struct net_bridge_port *port;
2035
2036         spin_lock_bh(&br->multicast_lock);
2037         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2038                 goto unlock;
2039
2040         br_mc_disabled_update(br->dev, val);
2041         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2042         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2043                 br_multicast_leave_snoopers(br);
2044                 goto unlock;
2045         }
2046
2047         if (!netif_running(br->dev))
2048                 goto unlock;
2049
2050         br_multicast_open(br);
2051         list_for_each_entry(port, &br->port_list, list)
2052                 __br_multicast_enable_port(port);
2053
2054 unlock:
2055         spin_unlock_bh(&br->multicast_lock);
2056
2057         return 0;
2058 }
2059
2060 bool br_multicast_enabled(const struct net_device *dev)
2061 {
2062         struct net_bridge *br = netdev_priv(dev);
2063
2064         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2065 }
2066 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2067
2068 bool br_multicast_router(const struct net_device *dev)
2069 {
2070         struct net_bridge *br = netdev_priv(dev);
2071         bool is_router;
2072
2073         spin_lock_bh(&br->multicast_lock);
2074         is_router = br_multicast_is_router(br);
2075         spin_unlock_bh(&br->multicast_lock);
2076         return is_router;
2077 }
2078 EXPORT_SYMBOL_GPL(br_multicast_router);
2079
2080 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2081 {
2082         unsigned long max_delay;
2083
2084         val = !!val;
2085
2086         spin_lock_bh(&br->multicast_lock);
2087         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2088                 goto unlock;
2089
2090         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2091         if (!val)
2092                 goto unlock;
2093
2094         max_delay = br->multicast_query_response_interval;
2095
2096         if (!timer_pending(&br->ip4_other_query.timer))
2097                 br->ip4_other_query.delay_time = jiffies + max_delay;
2098
2099         br_multicast_start_querier(br, &br->ip4_own_query);
2100
2101 #if IS_ENABLED(CONFIG_IPV6)
2102         if (!timer_pending(&br->ip6_other_query.timer))
2103                 br->ip6_other_query.delay_time = jiffies + max_delay;
2104
2105         br_multicast_start_querier(br, &br->ip6_own_query);
2106 #endif
2107
2108 unlock:
2109         spin_unlock_bh(&br->multicast_lock);
2110
2111         return 0;
2112 }
2113
2114 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2115 {
2116         /* Currently we support only version 2 and 3 */
2117         switch (val) {
2118         case 2:
2119         case 3:
2120                 break;
2121         default:
2122                 return -EINVAL;
2123         }
2124
2125         spin_lock_bh(&br->multicast_lock);
2126         br->multicast_igmp_version = val;
2127         spin_unlock_bh(&br->multicast_lock);
2128
2129         return 0;
2130 }
2131
2132 #if IS_ENABLED(CONFIG_IPV6)
2133 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2134 {
2135         /* Currently we support version 1 and 2 */
2136         switch (val) {
2137         case 1:
2138         case 2:
2139                 break;
2140         default:
2141                 return -EINVAL;
2142         }
2143
2144         spin_lock_bh(&br->multicast_lock);
2145         br->multicast_mld_version = val;
2146         spin_unlock_bh(&br->multicast_lock);
2147
2148         return 0;
2149 }
2150 #endif
2151
2152 /**
2153  * br_multicast_list_adjacent - Returns snooped multicast addresses
2154  * @dev:        The bridge port adjacent to which to retrieve addresses
2155  * @br_ip_list: The list to store found, snooped multicast IP addresses in
2156  *
2157  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2158  * snooping feature on all bridge ports of dev's bridge device, excluding
2159  * the addresses from dev itself.
2160  *
2161  * Returns the number of items added to br_ip_list.
2162  *
2163  * Notes:
2164  * - br_ip_list needs to be initialized by caller
2165  * - br_ip_list might contain duplicates in the end
2166  *   (needs to be taken care of by caller)
2167  * - br_ip_list needs to be freed by caller
2168  */
2169 int br_multicast_list_adjacent(struct net_device *dev,
2170                                struct list_head *br_ip_list)
2171 {
2172         struct net_bridge *br;
2173         struct net_bridge_port *port;
2174         struct net_bridge_port_group *group;
2175         struct br_ip_list *entry;
2176         int count = 0;
2177
2178         rcu_read_lock();
2179         if (!br_ip_list || !netif_is_bridge_port(dev))
2180                 goto unlock;
2181
2182         port = br_port_get_rcu(dev);
2183         if (!port || !port->br)
2184                 goto unlock;
2185
2186         br = port->br;
2187
2188         list_for_each_entry_rcu(port, &br->port_list, list) {
2189                 if (!port->dev || port->dev == dev)
2190                         continue;
2191
2192                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2193                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2194                         if (!entry)
2195                                 goto unlock;
2196
2197                         entry->addr = group->addr;
2198                         list_add(&entry->list, br_ip_list);
2199                         count++;
2200                 }
2201         }
2202
2203 unlock:
2204         rcu_read_unlock();
2205         return count;
2206 }
2207 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2208
2209 /**
2210  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2211  * @dev: The bridge port providing the bridge on which to check for a querier
2212  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2213  *
2214  * Checks whether the given interface has a bridge on top and if so returns
2215  * true if a valid querier exists anywhere on the bridged link layer.
2216  * Otherwise returns false.
2217  */
2218 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2219 {
2220         struct net_bridge *br;
2221         struct net_bridge_port *port;
2222         struct ethhdr eth;
2223         bool ret = false;
2224
2225         rcu_read_lock();
2226         if (!netif_is_bridge_port(dev))
2227                 goto unlock;
2228
2229         port = br_port_get_rcu(dev);
2230         if (!port || !port->br)
2231                 goto unlock;
2232
2233         br = port->br;
2234
2235         memset(&eth, 0, sizeof(eth));
2236         eth.h_proto = htons(proto);
2237
2238         ret = br_multicast_querier_exists(br, &eth);
2239
2240 unlock:
2241         rcu_read_unlock();
2242         return ret;
2243 }
2244 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2245
2246 /**
2247  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2248  * @dev: The bridge port adjacent to which to check for a querier
2249  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2250  *
2251  * Checks whether the given interface has a bridge on top and if so returns
2252  * true if a selected querier is behind one of the other ports of this
2253  * bridge. Otherwise returns false.
2254  */
2255 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2256 {
2257         struct net_bridge *br;
2258         struct net_bridge_port *port;
2259         bool ret = false;
2260
2261         rcu_read_lock();
2262         if (!netif_is_bridge_port(dev))
2263                 goto unlock;
2264
2265         port = br_port_get_rcu(dev);
2266         if (!port || !port->br)
2267                 goto unlock;
2268
2269         br = port->br;
2270
2271         switch (proto) {
2272         case ETH_P_IP:
2273                 if (!timer_pending(&br->ip4_other_query.timer) ||
2274                     rcu_dereference(br->ip4_querier.port) == port)
2275                         goto unlock;
2276                 break;
2277 #if IS_ENABLED(CONFIG_IPV6)
2278         case ETH_P_IPV6:
2279                 if (!timer_pending(&br->ip6_other_query.timer) ||
2280                     rcu_dereference(br->ip6_querier.port) == port)
2281                         goto unlock;
2282                 break;
2283 #endif
2284         default:
2285                 goto unlock;
2286         }
2287
2288         ret = true;
2289 unlock:
2290         rcu_read_unlock();
2291         return ret;
2292 }
2293 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2294
2295 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2296                                const struct sk_buff *skb, u8 type, u8 dir)
2297 {
2298         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2299         __be16 proto = skb->protocol;
2300         unsigned int t_len;
2301
2302         u64_stats_update_begin(&pstats->syncp);
2303         switch (proto) {
2304         case htons(ETH_P_IP):
2305                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2306                 switch (type) {
2307                 case IGMP_HOST_MEMBERSHIP_REPORT:
2308                         pstats->mstats.igmp_v1reports[dir]++;
2309                         break;
2310                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2311                         pstats->mstats.igmp_v2reports[dir]++;
2312                         break;
2313                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2314                         pstats->mstats.igmp_v3reports[dir]++;
2315                         break;
2316                 case IGMP_HOST_MEMBERSHIP_QUERY:
2317                         if (t_len != sizeof(struct igmphdr)) {
2318                                 pstats->mstats.igmp_v3queries[dir]++;
2319                         } else {
2320                                 unsigned int offset = skb_transport_offset(skb);
2321                                 struct igmphdr *ih, _ihdr;
2322
2323                                 ih = skb_header_pointer(skb, offset,
2324                                                         sizeof(_ihdr), &_ihdr);
2325                                 if (!ih)
2326                                         break;
2327                                 if (!ih->code)
2328                                         pstats->mstats.igmp_v1queries[dir]++;
2329                                 else
2330                                         pstats->mstats.igmp_v2queries[dir]++;
2331                         }
2332                         break;
2333                 case IGMP_HOST_LEAVE_MESSAGE:
2334                         pstats->mstats.igmp_leaves[dir]++;
2335                         break;
2336                 }
2337                 break;
2338 #if IS_ENABLED(CONFIG_IPV6)
2339         case htons(ETH_P_IPV6):
2340                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2341                         sizeof(struct ipv6hdr);
2342                 t_len -= skb_network_header_len(skb);
2343                 switch (type) {
2344                 case ICMPV6_MGM_REPORT:
2345                         pstats->mstats.mld_v1reports[dir]++;
2346                         break;
2347                 case ICMPV6_MLD2_REPORT:
2348                         pstats->mstats.mld_v2reports[dir]++;
2349                         break;
2350                 case ICMPV6_MGM_QUERY:
2351                         if (t_len != sizeof(struct mld_msg))
2352                                 pstats->mstats.mld_v2queries[dir]++;
2353                         else
2354                                 pstats->mstats.mld_v1queries[dir]++;
2355                         break;
2356                 case ICMPV6_MGM_REDUCTION:
2357                         pstats->mstats.mld_leaves[dir]++;
2358                         break;
2359                 }
2360                 break;
2361 #endif /* CONFIG_IPV6 */
2362         }
2363         u64_stats_update_end(&pstats->syncp);
2364 }
2365
2366 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2367                         const struct sk_buff *skb, u8 type, u8 dir)
2368 {
2369         struct bridge_mcast_stats __percpu *stats;
2370
2371         /* if multicast_disabled is true then igmp type can't be set */
2372         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2373                 return;
2374
2375         if (p)
2376                 stats = p->mcast_stats;
2377         else
2378                 stats = br->mcast_stats;
2379         if (WARN_ON(!stats))
2380                 return;
2381
2382         br_mcast_stats_add(stats, skb, type, dir);
2383 }
2384
2385 int br_multicast_init_stats(struct net_bridge *br)
2386 {
2387         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2388         if (!br->mcast_stats)
2389                 return -ENOMEM;
2390
2391         return 0;
2392 }
2393
2394 void br_multicast_uninit_stats(struct net_bridge *br)
2395 {
2396         free_percpu(br->mcast_stats);
2397 }
2398
2399 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2400 {
2401         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2402         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2403 }
2404
2405 void br_multicast_get_stats(const struct net_bridge *br,
2406                             const struct net_bridge_port *p,
2407                             struct br_mcast_stats *dest)
2408 {
2409         struct bridge_mcast_stats __percpu *stats;
2410         struct br_mcast_stats tdst;
2411         int i;
2412
2413         memset(dest, 0, sizeof(*dest));
2414         if (p)
2415                 stats = p->mcast_stats;
2416         else
2417                 stats = br->mcast_stats;
2418         if (WARN_ON(!stats))
2419                 return;
2420
2421         memset(&tdst, 0, sizeof(tdst));
2422         for_each_possible_cpu(i) {
2423                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2424                 struct br_mcast_stats temp;
2425                 unsigned int start;
2426
2427                 do {
2428                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2429                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2430                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2431
2432                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2433                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2434                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2435                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2436                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2437                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2438                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2439                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2440
2441                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2442                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2443                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2444                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2445                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2446                 tdst.mld_parse_errors += temp.mld_parse_errors;
2447         }
2448         memcpy(dest, &tdst, sizeof(*dest));
2449 }
2450
2451 int br_mdb_hash_init(struct net_bridge *br)
2452 {
2453         return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2454 }
2455
2456 void br_mdb_hash_fini(struct net_bridge *br)
2457 {
2458         rhashtable_destroy(&br->mdb_hash_tbl);
2459 }