Merge git://git.infradead.org/intel-iommu
[linux-2.6-block.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
113aa838 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
1da177e4
LT
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
f77f13e2 25 * Relax this requirement to work with older peers.
1da177e4
LT
26 *
27 */
28
1da177e4
LT
29#include <asm/uaccess.h>
30#include <linux/types.h>
4fc268d2 31#include <linux/capability.h>
1da177e4
LT
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
46f25dff 48#include <linux/if_ether.h>
5a0e3ad6 49#include <linux/slab.h>
457c4cbc 50#include <net/net_namespace.h>
1da177e4
LT
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
14c85021 54#include <net/route.h>
1da177e4
LT
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
709b46e8 62#include <linux/compat.h>
bc3b2d7f 63#include <linux/export.h>
c5441932 64#include <net/ip_tunnels.h>
1da177e4 65#include <net/checksum.h>
dc5fc579 66#include <net/netlink.h>
f0ad0860 67#include <net/fib_rules.h>
d67b8c61 68#include <linux/netconf.h>
ccbb0aa6 69#include <net/nexthop.h>
1da177e4 70
f0ad0860
PM
71struct ipmr_rule {
72 struct fib_rule common;
73};
74
75struct ipmr_result {
76 struct mr_table *mrt;
77};
78
1da177e4 79/* Big lock, protecting vif table, mrt cache and mroute socket state.
a8cb16dd 80 * Note that the changes are semaphored via rtnl_lock.
1da177e4
LT
81 */
82
83static DEFINE_RWLOCK(mrt_lock);
84
7ef8f65d 85/* Multicast router control variables */
1da177e4 86
1da177e4
LT
87/* Special spinlock for queue of unresolved entries */
88static DEFINE_SPINLOCK(mfc_unres_lock);
89
90/* We return to original Alan's scheme. Hash table of resolved
a8cb16dd
ED
91 * entries is changed only in process context and protected
92 * with weak lock mrt_lock. Queue of unresolved entries is protected
93 * with strong spinlock mfc_unres_lock.
94 *
95 * In this case data path is free of exclusive locks at all.
1da177e4
LT
96 */
97
e18b890b 98static struct kmem_cache *mrt_cachep __read_mostly;
1da177e4 99
f0ad0860 100static struct mr_table *ipmr_new_table(struct net *net, u32 id);
acbb219d
FR
101static void ipmr_free_table(struct mr_table *mrt);
102
c4854ec8
RR
103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache,
105 int local);
0c12295a 106static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 107 struct sk_buff *pkt, vifi_t vifi, int assert);
cb6a4e46
PM
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 struct mfc_cache *c, struct rtmsg *rtm);
8cd3ac9f
ND
110static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 int cmd);
0e615e96 112static void mroute_clean_tables(struct mr_table *mrt, bool all);
f0ad0860
PM
113static void ipmr_expire_process(unsigned long arg);
114
115#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
116#define ipmr_for_each_table(mrt, net) \
117 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118
119static struct mr_table *ipmr_get_table(struct net *net, u32 id)
120{
121 struct mr_table *mrt;
122
123 ipmr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128}
129
da91981b 130static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
131 struct mr_table **mrt)
132{
f0ad0860 133 int err;
95f4a45d
HFS
134 struct ipmr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
f0ad0860 139
da91981b
DM
140 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
141 flowi4_to_flowi(flp4), 0, &arg);
f0ad0860
PM
142 if (err < 0)
143 return err;
144 *mrt = res.mrt;
145 return 0;
146}
147
148static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
150{
151 struct ipmr_result *res = arg->result;
152 struct mr_table *mrt;
1da177e4 153
f0ad0860
PM
154 switch (rule->action) {
155 case FR_ACT_TO_TBL:
156 break;
157 case FR_ACT_UNREACHABLE:
158 return -ENETUNREACH;
159 case FR_ACT_PROHIBIT:
160 return -EACCES;
161 case FR_ACT_BLACKHOLE:
162 default:
163 return -EINVAL;
164 }
165
166 mrt = ipmr_get_table(rule->fr_net, rule->table);
51456b29 167 if (!mrt)
f0ad0860
PM
168 return -EAGAIN;
169 res->mrt = mrt;
170 return 0;
171}
172
173static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
174{
175 return 1;
176}
177
178static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
179 FRA_GENERIC_POLICY,
180};
181
182static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb)
184{
185 return 0;
186}
187
188static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
189 struct nlattr **tb)
190{
191 return 1;
192}
193
194static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh)
196{
197 frh->dst_len = 0;
198 frh->src_len = 0;
199 frh->tos = 0;
200 return 0;
201}
202
04a6f82c 203static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
25239cee 204 .family = RTNL_FAMILY_IPMR,
f0ad0860
PM
205 .rule_size = sizeof(struct ipmr_rule),
206 .addr_size = sizeof(u32),
207 .action = ipmr_rule_action,
208 .match = ipmr_rule_match,
209 .configure = ipmr_rule_configure,
210 .compare = ipmr_rule_compare,
f0ad0860
PM
211 .fill = ipmr_rule_fill,
212 .nlgroup = RTNLGRP_IPV4_RULE,
213 .policy = ipmr_rule_policy,
214 .owner = THIS_MODULE,
215};
216
217static int __net_init ipmr_rules_init(struct net *net)
218{
219 struct fib_rules_ops *ops;
220 struct mr_table *mrt;
221 int err;
222
223 ops = fib_rules_register(&ipmr_rules_ops_template, net);
224 if (IS_ERR(ops))
225 return PTR_ERR(ops);
226
227 INIT_LIST_HEAD(&net->ipv4.mr_tables);
228
229 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
1113ebbc
NA
230 if (IS_ERR(mrt)) {
231 err = PTR_ERR(mrt);
f0ad0860
PM
232 goto err1;
233 }
234
235 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
236 if (err < 0)
237 goto err2;
238
239 net->ipv4.mr_rules_ops = ops;
240 return 0;
241
242err2:
f243e5a7 243 ipmr_free_table(mrt);
f0ad0860
PM
244err1:
245 fib_rules_unregister(ops);
246 return err;
247}
248
249static void __net_exit ipmr_rules_exit(struct net *net)
250{
251 struct mr_table *mrt, *next;
252
ed785309 253 rtnl_lock();
035320d5
ED
254 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
255 list_del(&mrt->list);
acbb219d 256 ipmr_free_table(mrt);
035320d5 257 }
f0ad0860 258 fib_rules_unregister(net->ipv4.mr_rules_ops);
419df12f 259 rtnl_unlock();
f0ad0860
PM
260}
261#else
262#define ipmr_for_each_table(mrt, net) \
263 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
264
265static struct mr_table *ipmr_get_table(struct net *net, u32 id)
266{
267 return net->ipv4.mrt;
268}
269
da91981b 270static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
271 struct mr_table **mrt)
272{
273 *mrt = net->ipv4.mrt;
274 return 0;
275}
276
277static int __net_init ipmr_rules_init(struct net *net)
278{
1113ebbc
NA
279 struct mr_table *mrt;
280
281 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
282 if (IS_ERR(mrt))
283 return PTR_ERR(mrt);
284 net->ipv4.mrt = mrt;
285 return 0;
f0ad0860
PM
286}
287
288static void __net_exit ipmr_rules_exit(struct net *net)
289{
ed785309 290 rtnl_lock();
acbb219d 291 ipmr_free_table(net->ipv4.mrt);
ed785309
WC
292 net->ipv4.mrt = NULL;
293 rtnl_unlock();
f0ad0860
PM
294}
295#endif
296
297static struct mr_table *ipmr_new_table(struct net *net, u32 id)
298{
299 struct mr_table *mrt;
300 unsigned int i;
1da177e4 301
1113ebbc
NA
302 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
303 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
304 return ERR_PTR(-EINVAL);
305
f0ad0860 306 mrt = ipmr_get_table(net, id);
00db4124 307 if (mrt)
f0ad0860
PM
308 return mrt;
309
310 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
51456b29 311 if (!mrt)
1113ebbc 312 return ERR_PTR(-ENOMEM);
8de53dfb 313 write_pnet(&mrt->net, net);
f0ad0860
PM
314 mrt->id = id;
315
316 /* Forwarding cache */
317 for (i = 0; i < MFC_LINES; i++)
318 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
319
320 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
321
322 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
323 (unsigned long)mrt);
324
f0ad0860 325 mrt->mroute_reg_vif_num = -1;
f0ad0860
PM
326#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
327 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
328#endif
329 return mrt;
330}
1da177e4 331
acbb219d
FR
332static void ipmr_free_table(struct mr_table *mrt)
333{
334 del_timer_sync(&mrt->ipmr_expire_timer);
0e615e96 335 mroute_clean_tables(mrt, true);
acbb219d
FR
336 kfree(mrt);
337}
338
1da177e4
LT
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340
d607032d
WC
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
342{
4feb88e5
BT
343 struct net *net = dev_net(dev);
344
d607032d
WC
345 dev_close(dev);
346
4feb88e5 347 dev = __dev_get_by_name(net, "tunl0");
d607032d 348 if (dev) {
5bc3eb7e 349 const struct net_device_ops *ops = dev->netdev_ops;
d607032d 350 struct ifreq ifr;
d607032d
WC
351 struct ip_tunnel_parm p;
352
353 memset(&p, 0, sizeof(p));
354 p.iph.daddr = v->vifc_rmt_addr.s_addr;
355 p.iph.saddr = v->vifc_lcl_addr.s_addr;
356 p.iph.version = 4;
357 p.iph.ihl = 5;
358 p.iph.protocol = IPPROTO_IPIP;
359 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
360 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
361
5bc3eb7e
SH
362 if (ops->ndo_do_ioctl) {
363 mm_segment_t oldfs = get_fs();
364
365 set_fs(KERNEL_DS);
366 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
367 set_fs(oldfs);
368 }
d607032d
WC
369 }
370}
371
a0b47736
NA
372/* Initialize ipmr pimreg/tunnel in_device */
373static bool ipmr_init_vif_indev(const struct net_device *dev)
374{
375 struct in_device *in_dev;
376
377 ASSERT_RTNL();
378
379 in_dev = __in_dev_get_rtnl(dev);
380 if (!in_dev)
381 return false;
382 ipv4_devconf_setall(in_dev);
383 neigh_parms_data_state_setall(in_dev->arp_parms);
384 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
385
386 return true;
387}
388
7ef8f65d 389static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
1da177e4
LT
390{
391 struct net_device *dev;
392
4feb88e5 393 dev = __dev_get_by_name(net, "tunl0");
1da177e4
LT
394
395 if (dev) {
5bc3eb7e 396 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
397 int err;
398 struct ifreq ifr;
1da177e4 399 struct ip_tunnel_parm p;
1da177e4
LT
400
401 memset(&p, 0, sizeof(p));
402 p.iph.daddr = v->vifc_rmt_addr.s_addr;
403 p.iph.saddr = v->vifc_lcl_addr.s_addr;
404 p.iph.version = 4;
405 p.iph.ihl = 5;
406 p.iph.protocol = IPPROTO_IPIP;
407 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ba93ef74 408 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
1da177e4 409
5bc3eb7e
SH
410 if (ops->ndo_do_ioctl) {
411 mm_segment_t oldfs = get_fs();
412
413 set_fs(KERNEL_DS);
414 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
415 set_fs(oldfs);
a8cb16dd 416 } else {
5bc3eb7e 417 err = -EOPNOTSUPP;
a8cb16dd 418 }
1da177e4
LT
419 dev = NULL;
420
4feb88e5
BT
421 if (err == 0 &&
422 (dev = __dev_get_by_name(net, p.name)) != NULL) {
1da177e4 423 dev->flags |= IFF_MULTICAST;
a0b47736 424 if (!ipmr_init_vif_indev(dev))
1da177e4 425 goto failure;
1da177e4
LT
426 if (dev_open(dev))
427 goto failure;
7dc00c82 428 dev_hold(dev);
1da177e4
LT
429 }
430 }
431 return dev;
432
433failure:
1da177e4
LT
434 unregister_netdevice(dev);
435 return NULL;
436}
437
c316c629 438#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
6fef4c0c 439static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 440{
4feb88e5 441 struct net *net = dev_net(dev);
f0ad0860 442 struct mr_table *mrt;
da91981b
DM
443 struct flowi4 fl4 = {
444 .flowi4_oif = dev->ifindex,
6a662719 445 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
da91981b 446 .flowi4_mark = skb->mark,
f0ad0860
PM
447 };
448 int err;
449
da91981b 450 err = ipmr_fib_lookup(net, &fl4, &mrt);
e40dbc51
BG
451 if (err < 0) {
452 kfree_skb(skb);
f0ad0860 453 return err;
e40dbc51 454 }
4feb88e5 455
1da177e4 456 read_lock(&mrt_lock);
cf3677ae
PE
457 dev->stats.tx_bytes += skb->len;
458 dev->stats.tx_packets++;
0c12295a 459 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
1da177e4
LT
460 read_unlock(&mrt_lock);
461 kfree_skb(skb);
6ed10654 462 return NETDEV_TX_OK;
1da177e4
LT
463}
464
ee9b9596
ND
465static int reg_vif_get_iflink(const struct net_device *dev)
466{
467 return 0;
468}
469
007c3838
SH
470static const struct net_device_ops reg_vif_netdev_ops = {
471 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 472 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
473};
474
1da177e4
LT
475static void reg_vif_setup(struct net_device *dev)
476{
477 dev->type = ARPHRD_PIMREG;
46f25dff 478 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4 479 dev->flags = IFF_NOARP;
70cb4a45 480 dev->netdev_ops = &reg_vif_netdev_ops;
1da177e4 481 dev->destructor = free_netdev;
403dbb97 482 dev->features |= NETIF_F_NETNS_LOCAL;
1da177e4
LT
483}
484
f0ad0860 485static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
1da177e4
LT
486{
487 struct net_device *dev;
f0ad0860 488 char name[IFNAMSIZ];
1da177e4 489
f0ad0860
PM
490 if (mrt->id == RT_TABLE_DEFAULT)
491 sprintf(name, "pimreg");
492 else
493 sprintf(name, "pimreg%u", mrt->id);
1da177e4 494
c835a677 495 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
1da177e4 496
51456b29 497 if (!dev)
1da177e4
LT
498 return NULL;
499
403dbb97
TG
500 dev_net_set(dev, net);
501
1da177e4
LT
502 if (register_netdevice(dev)) {
503 free_netdev(dev);
504 return NULL;
505 }
1da177e4 506
a0b47736 507 if (!ipmr_init_vif_indev(dev))
1da177e4 508 goto failure;
1da177e4
LT
509 if (dev_open(dev))
510 goto failure;
511
7dc00c82
WC
512 dev_hold(dev);
513
1da177e4
LT
514 return dev;
515
516failure:
1da177e4
LT
517 unregister_netdevice(dev);
518 return NULL;
519}
c316c629
NA
520
521/* called with rcu_read_lock() */
522static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
523 unsigned int pimlen)
524{
525 struct net_device *reg_dev = NULL;
526 struct iphdr *encap;
527
528 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
7ef8f65d 529 /* Check that:
c316c629
NA
530 * a. packet is really sent to a multicast group
531 * b. packet is not a NULL-REGISTER
532 * c. packet is not truncated
533 */
534 if (!ipv4_is_multicast(encap->daddr) ||
535 encap->tot_len == 0 ||
536 ntohs(encap->tot_len) + pimlen > skb->len)
537 return 1;
538
539 read_lock(&mrt_lock);
540 if (mrt->mroute_reg_vif_num >= 0)
541 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
542 read_unlock(&mrt_lock);
543
544 if (!reg_dev)
545 return 1;
546
547 skb->mac_header = skb->network_header;
548 skb_pull(skb, (u8 *)encap - skb->data);
549 skb_reset_network_header(skb);
550 skb->protocol = htons(ETH_P_IP);
551 skb->ip_summed = CHECKSUM_NONE;
552
553 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
554
555 netif_rx(skb);
556
557 return NET_RX_SUCCESS;
558}
559#else
560static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
561{
562 return NULL;
563}
1da177e4
LT
564#endif
565
2c53040f
BH
566/**
567 * vif_delete - Delete a VIF entry
7dc00c82 568 * @notify: Set to 1, if the caller is a notifier_call
1da177e4 569 */
0c12295a 570static int vif_delete(struct mr_table *mrt, int vifi, int notify,
d17fa6fa 571 struct list_head *head)
1da177e4
LT
572{
573 struct vif_device *v;
574 struct net_device *dev;
575 struct in_device *in_dev;
576
0c12295a 577 if (vifi < 0 || vifi >= mrt->maxvif)
1da177e4
LT
578 return -EADDRNOTAVAIL;
579
0c12295a 580 v = &mrt->vif_table[vifi];
1da177e4
LT
581
582 write_lock_bh(&mrt_lock);
583 dev = v->dev;
584 v->dev = NULL;
585
586 if (!dev) {
587 write_unlock_bh(&mrt_lock);
588 return -EADDRNOTAVAIL;
589 }
590
0c12295a
PM
591 if (vifi == mrt->mroute_reg_vif_num)
592 mrt->mroute_reg_vif_num = -1;
1da177e4 593
a8cb16dd 594 if (vifi + 1 == mrt->maxvif) {
1da177e4 595 int tmp;
a8cb16dd
ED
596
597 for (tmp = vifi - 1; tmp >= 0; tmp--) {
0c12295a 598 if (VIF_EXISTS(mrt, tmp))
1da177e4
LT
599 break;
600 }
0c12295a 601 mrt->maxvif = tmp+1;
1da177e4
LT
602 }
603
604 write_unlock_bh(&mrt_lock);
605
606 dev_set_allmulti(dev, -1);
607
a8cb16dd
ED
608 in_dev = __in_dev_get_rtnl(dev);
609 if (in_dev) {
42f811b8 610 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
d67b8c61
ND
611 inet_netconf_notify_devconf(dev_net(dev),
612 NETCONFA_MC_FORWARDING,
613 dev->ifindex, &in_dev->cnf);
1da177e4
LT
614 ip_rt_multicast_event(in_dev);
615 }
616
a8cb16dd 617 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
d17fa6fa 618 unregister_netdevice_queue(dev, head);
1da177e4
LT
619
620 dev_put(dev);
621 return 0;
622}
623
a8c9486b 624static void ipmr_cache_free_rcu(struct rcu_head *head)
5c0a66f5 625{
a8c9486b
ED
626 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
627
5c0a66f5
BT
628 kmem_cache_free(mrt_cachep, c);
629}
630
a8c9486b
ED
631static inline void ipmr_cache_free(struct mfc_cache *c)
632{
633 call_rcu(&c->rcu, ipmr_cache_free_rcu);
634}
635
1da177e4 636/* Destroy an unresolved cache entry, killing queued skbs
a8cb16dd 637 * and reporting error to netlink readers.
1da177e4 638 */
0c12295a 639static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
1da177e4 640{
8de53dfb 641 struct net *net = read_pnet(&mrt->net);
1da177e4 642 struct sk_buff *skb;
9ef1d4c7 643 struct nlmsgerr *e;
1da177e4 644
0c12295a 645 atomic_dec(&mrt->cache_resolve_queue_len);
1da177e4 646
c354e124 647 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
eddc9ec5 648 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
649 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
650 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 651 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 652 skb_trim(skb, nlh->nlmsg_len);
573ce260 653 e = nlmsg_data(nlh);
9ef1d4c7
PM
654 e->error = -ETIMEDOUT;
655 memset(&e->msg, 0, sizeof(e->msg));
2942e900 656
15e47304 657 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 658 } else {
1da177e4 659 kfree_skb(skb);
a8cb16dd 660 }
1da177e4
LT
661 }
662
5c0a66f5 663 ipmr_cache_free(c);
1da177e4
LT
664}
665
e258beb2 666/* Timer process for the unresolved queue. */
e258beb2 667static void ipmr_expire_process(unsigned long arg)
1da177e4 668{
0c12295a 669 struct mr_table *mrt = (struct mr_table *)arg;
1da177e4
LT
670 unsigned long now;
671 unsigned long expires;
862465f2 672 struct mfc_cache *c, *next;
1da177e4
LT
673
674 if (!spin_trylock(&mfc_unres_lock)) {
0c12295a 675 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
1da177e4
LT
676 return;
677 }
678
0c12295a 679 if (list_empty(&mrt->mfc_unres_queue))
1da177e4
LT
680 goto out;
681
682 now = jiffies;
683 expires = 10*HZ;
1da177e4 684
0c12295a 685 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1da177e4
LT
686 if (time_after(c->mfc_un.unres.expires, now)) {
687 unsigned long interval = c->mfc_un.unres.expires - now;
688 if (interval < expires)
689 expires = interval;
1da177e4
LT
690 continue;
691 }
692
862465f2 693 list_del(&c->list);
8cd3ac9f 694 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 695 ipmr_destroy_unres(mrt, c);
1da177e4
LT
696 }
697
0c12295a
PM
698 if (!list_empty(&mrt->mfc_unres_queue))
699 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
1da177e4
LT
700
701out:
702 spin_unlock(&mfc_unres_lock);
703}
704
705/* Fill oifs list. It is called under write locked mrt_lock. */
0c12295a 706static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
d658f8a0 707 unsigned char *ttls)
1da177e4
LT
708{
709 int vifi;
710
711 cache->mfc_un.res.minvif = MAXVIFS;
712 cache->mfc_un.res.maxvif = 0;
713 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
714
0c12295a
PM
715 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
716 if (VIF_EXISTS(mrt, vifi) &&
cf958ae3 717 ttls[vifi] && ttls[vifi] < 255) {
1da177e4
LT
718 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
719 if (cache->mfc_un.res.minvif > vifi)
720 cache->mfc_un.res.minvif = vifi;
721 if (cache->mfc_un.res.maxvif <= vifi)
722 cache->mfc_un.res.maxvif = vifi + 1;
723 }
724 }
725}
726
0c12295a
PM
727static int vif_add(struct net *net, struct mr_table *mrt,
728 struct vifctl *vifc, int mrtsock)
1da177e4
LT
729{
730 int vifi = vifc->vifc_vifi;
0c12295a 731 struct vif_device *v = &mrt->vif_table[vifi];
1da177e4
LT
732 struct net_device *dev;
733 struct in_device *in_dev;
d607032d 734 int err;
1da177e4
LT
735
736 /* Is vif busy ? */
0c12295a 737 if (VIF_EXISTS(mrt, vifi))
1da177e4
LT
738 return -EADDRINUSE;
739
740 switch (vifc->vifc_flags) {
1da177e4 741 case VIFF_REGISTER:
1973a4ea 742 if (!ipmr_pimsm_enabled())
c316c629
NA
743 return -EINVAL;
744 /* Special Purpose VIF in PIM
1da177e4
LT
745 * All the packets will be sent to the daemon
746 */
0c12295a 747 if (mrt->mroute_reg_vif_num >= 0)
1da177e4 748 return -EADDRINUSE;
f0ad0860 749 dev = ipmr_reg_vif(net, mrt);
1da177e4
LT
750 if (!dev)
751 return -ENOBUFS;
d607032d
WC
752 err = dev_set_allmulti(dev, 1);
753 if (err) {
754 unregister_netdevice(dev);
7dc00c82 755 dev_put(dev);
d607032d
WC
756 return err;
757 }
1da177e4 758 break;
e905a9ed 759 case VIFF_TUNNEL:
4feb88e5 760 dev = ipmr_new_tunnel(net, vifc);
1da177e4
LT
761 if (!dev)
762 return -ENOBUFS;
d607032d
WC
763 err = dev_set_allmulti(dev, 1);
764 if (err) {
765 ipmr_del_tunnel(dev, vifc);
7dc00c82 766 dev_put(dev);
d607032d
WC
767 return err;
768 }
1da177e4 769 break;
ee5e81f0 770 case VIFF_USE_IFINDEX:
1da177e4 771 case 0:
ee5e81f0
I
772 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
773 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
51456b29 774 if (dev && !__in_dev_get_rtnl(dev)) {
ee5e81f0
I
775 dev_put(dev);
776 return -EADDRNOTAVAIL;
777 }
a8cb16dd 778 } else {
ee5e81f0 779 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
a8cb16dd 780 }
1da177e4
LT
781 if (!dev)
782 return -EADDRNOTAVAIL;
d607032d 783 err = dev_set_allmulti(dev, 1);
7dc00c82
WC
784 if (err) {
785 dev_put(dev);
d607032d 786 return err;
7dc00c82 787 }
1da177e4
LT
788 break;
789 default:
790 return -EINVAL;
791 }
792
a8cb16dd
ED
793 in_dev = __in_dev_get_rtnl(dev);
794 if (!in_dev) {
d0490cfd 795 dev_put(dev);
1da177e4 796 return -EADDRNOTAVAIL;
d0490cfd 797 }
42f811b8 798 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
d67b8c61
ND
799 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
800 &in_dev->cnf);
1da177e4
LT
801 ip_rt_multicast_event(in_dev);
802
a8cb16dd
ED
803 /* Fill in the VIF structures */
804
c354e124
JK
805 v->rate_limit = vifc->vifc_rate_limit;
806 v->local = vifc->vifc_lcl_addr.s_addr;
807 v->remote = vifc->vifc_rmt_addr.s_addr;
808 v->flags = vifc->vifc_flags;
1da177e4
LT
809 if (!mrtsock)
810 v->flags |= VIFF_STATIC;
c354e124 811 v->threshold = vifc->vifc_threshold;
1da177e4
LT
812 v->bytes_in = 0;
813 v->bytes_out = 0;
814 v->pkt_in = 0;
815 v->pkt_out = 0;
816 v->link = dev->ifindex;
a8cb16dd 817 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
a54acb3a 818 v->link = dev_get_iflink(dev);
1da177e4
LT
819
820 /* And finish update writing critical data */
821 write_lock_bh(&mrt_lock);
c354e124 822 v->dev = dev;
a8cb16dd 823 if (v->flags & VIFF_REGISTER)
0c12295a 824 mrt->mroute_reg_vif_num = vifi;
0c12295a
PM
825 if (vifi+1 > mrt->maxvif)
826 mrt->maxvif = vifi+1;
1da177e4
LT
827 write_unlock_bh(&mrt_lock);
828 return 0;
829}
830
a8c9486b 831/* called with rcu_read_lock() */
0c12295a 832static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
4feb88e5
BT
833 __be32 origin,
834 __be32 mcastgrp)
1da177e4 835{
c354e124 836 int line = MFC_HASH(mcastgrp, origin);
1da177e4
LT
837 struct mfc_cache *c;
838
a8c9486b 839 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
862465f2
PM
840 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
841 return c;
1da177e4 842 }
862465f2 843 return NULL;
1da177e4
LT
844}
845
660b26dc
ND
846/* Look for a (*,*,oif) entry */
847static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
848 int vifi)
849{
360eb5da 850 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
660b26dc
ND
851 struct mfc_cache *c;
852
853 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
360eb5da
ND
854 if (c->mfc_origin == htonl(INADDR_ANY) &&
855 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
660b26dc
ND
856 c->mfc_un.res.ttls[vifi] < 255)
857 return c;
858
859 return NULL;
860}
861
862/* Look for a (*,G) entry */
863static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
864 __be32 mcastgrp, int vifi)
865{
360eb5da 866 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
660b26dc
ND
867 struct mfc_cache *c, *proxy;
868
360eb5da 869 if (mcastgrp == htonl(INADDR_ANY))
660b26dc
ND
870 goto skip;
871
872 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
360eb5da 873 if (c->mfc_origin == htonl(INADDR_ANY) &&
660b26dc
ND
874 c->mfc_mcastgrp == mcastgrp) {
875 if (c->mfc_un.res.ttls[vifi] < 255)
876 return c;
877
878 /* It's ok if the vifi is part of the static tree */
879 proxy = ipmr_cache_find_any_parent(mrt,
880 c->mfc_parent);
881 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
882 return c;
883 }
884
885skip:
886 return ipmr_cache_find_any_parent(mrt, vifi);
887}
888
7ef8f65d 889/* Allocate a multicast cache entry */
d658f8a0 890static struct mfc_cache *ipmr_cache_alloc(void)
1da177e4 891{
c354e124 892 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
a8c9486b
ED
893
894 if (c)
895 c->mfc_un.res.minvif = MAXVIFS;
1da177e4
LT
896 return c;
897}
898
d658f8a0 899static struct mfc_cache *ipmr_cache_alloc_unres(void)
1da177e4 900{
c354e124 901 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
a8c9486b
ED
902
903 if (c) {
904 skb_queue_head_init(&c->mfc_un.unres.unresolved);
905 c->mfc_un.unres.expires = jiffies + 10*HZ;
906 }
1da177e4
LT
907 return c;
908}
909
7ef8f65d 910/* A cache entry has gone into a resolved state from queued */
0c12295a
PM
911static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
912 struct mfc_cache *uc, struct mfc_cache *c)
1da177e4
LT
913{
914 struct sk_buff *skb;
9ef1d4c7 915 struct nlmsgerr *e;
1da177e4 916
a8cb16dd 917 /* Play the pending entries through our router */
c354e124 918 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
eddc9ec5 919 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
920 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
921
573ce260 922 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
a8cb16dd
ED
923 nlh->nlmsg_len = skb_tail_pointer(skb) -
924 (u8 *)nlh;
1da177e4
LT
925 } else {
926 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 927 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 928 skb_trim(skb, nlh->nlmsg_len);
573ce260 929 e = nlmsg_data(nlh);
9ef1d4c7
PM
930 e->error = -EMSGSIZE;
931 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 932 }
2942e900 933
15e47304 934 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 935 } else {
0c12295a 936 ip_mr_forward(net, mrt, skb, c, 0);
a8cb16dd 937 }
1da177e4
LT
938 }
939}
940
c316c629
NA
941/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
942 * expects the following bizarre scheme.
1da177e4 943 *
c316c629 944 * Called under mrt_lock.
1da177e4 945 */
0c12295a 946static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 947 struct sk_buff *pkt, vifi_t vifi, int assert)
1da177e4 948{
c9bdd4b5 949 const int ihl = ip_hdrlen(pkt);
c316c629 950 struct sock *mroute_sk;
1da177e4
LT
951 struct igmphdr *igmp;
952 struct igmpmsg *msg;
c316c629 953 struct sk_buff *skb;
1da177e4
LT
954 int ret;
955
1da177e4
LT
956 if (assert == IGMPMSG_WHOLEPKT)
957 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
958 else
1da177e4
LT
959 skb = alloc_skb(128, GFP_ATOMIC);
960
132adf54 961 if (!skb)
1da177e4
LT
962 return -ENOBUFS;
963
1da177e4
LT
964 if (assert == IGMPMSG_WHOLEPKT) {
965 /* Ugly, but we have no choice with this interface.
a8cb16dd
ED
966 * Duplicate old header, fix ihl, length etc.
967 * And all this only to mangle msg->im_msgtype and
968 * to set msg->im_mbz to "mbz" :-)
1da177e4 969 */
878c8145
ACM
970 skb_push(skb, sizeof(struct iphdr));
971 skb_reset_network_header(skb);
badff6d0 972 skb_reset_transport_header(skb);
0272ffc4 973 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 974 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1da177e4
LT
975 msg->im_msgtype = IGMPMSG_WHOLEPKT;
976 msg->im_mbz = 0;
0c12295a 977 msg->im_vif = mrt->mroute_reg_vif_num;
eddc9ec5
ACM
978 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
979 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
980 sizeof(struct iphdr));
c316c629
NA
981 } else {
982 /* Copy the IP header */
983 skb_set_network_header(skb, skb->len);
984 skb_put(skb, ihl);
985 skb_copy_to_linear_data(skb, pkt->data, ihl);
986 /* Flag to the kernel this is a route add */
987 ip_hdr(skb)->protocol = 0;
988 msg = (struct igmpmsg *)skb_network_header(skb);
989 msg->im_vif = vifi;
990 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
991 /* Add our header */
992 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
993 igmp->type = assert;
994 msg->im_msgtype = assert;
995 igmp->code = 0;
996 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
997 skb->transport_header = skb->network_header;
e905a9ed 998 }
1da177e4 999
4c968709
ED
1000 rcu_read_lock();
1001 mroute_sk = rcu_dereference(mrt->mroute_sk);
51456b29 1002 if (!mroute_sk) {
4c968709 1003 rcu_read_unlock();
1da177e4
LT
1004 kfree_skb(skb);
1005 return -EINVAL;
1006 }
1007
a8cb16dd 1008 /* Deliver to mrouted */
4c968709
ED
1009 ret = sock_queue_rcv_skb(mroute_sk, skb);
1010 rcu_read_unlock();
70a269e6 1011 if (ret < 0) {
e87cc472 1012 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1da177e4
LT
1013 kfree_skb(skb);
1014 }
1015
1016 return ret;
1017}
1018
7ef8f65d
NA
1019/* Queue a packet for resolution. It gets locked cache entry! */
1020static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1021 struct sk_buff *skb)
1da177e4 1022{
862465f2 1023 bool found = false;
1da177e4
LT
1024 int err;
1025 struct mfc_cache *c;
eddc9ec5 1026 const struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
1027
1028 spin_lock_bh(&mfc_unres_lock);
0c12295a 1029 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
e258beb2 1030 if (c->mfc_mcastgrp == iph->daddr &&
862465f2
PM
1031 c->mfc_origin == iph->saddr) {
1032 found = true;
1da177e4 1033 break;
862465f2 1034 }
1da177e4
LT
1035 }
1036
862465f2 1037 if (!found) {
a8cb16dd 1038 /* Create a new entry if allowable */
0c12295a 1039 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
d658f8a0 1040 (c = ipmr_cache_alloc_unres()) == NULL) {
1da177e4
LT
1041 spin_unlock_bh(&mfc_unres_lock);
1042
1043 kfree_skb(skb);
1044 return -ENOBUFS;
1045 }
1046
a8cb16dd 1047 /* Fill in the new cache entry */
eddc9ec5
ACM
1048 c->mfc_parent = -1;
1049 c->mfc_origin = iph->saddr;
1050 c->mfc_mcastgrp = iph->daddr;
1da177e4 1051
a8cb16dd 1052 /* Reflect first query at mrouted. */
0c12295a 1053 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
4feb88e5 1054 if (err < 0) {
e905a9ed 1055 /* If the report failed throw the cache entry
1da177e4
LT
1056 out - Brad Parker
1057 */
1058 spin_unlock_bh(&mfc_unres_lock);
1059
5c0a66f5 1060 ipmr_cache_free(c);
1da177e4
LT
1061 kfree_skb(skb);
1062 return err;
1063 }
1064
0c12295a
PM
1065 atomic_inc(&mrt->cache_resolve_queue_len);
1066 list_add(&c->list, &mrt->mfc_unres_queue);
8cd3ac9f 1067 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4 1068
278554bd
DM
1069 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1070 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1da177e4
LT
1071 }
1072
a8cb16dd 1073 /* See if we can append the packet */
a8cb16dd 1074 if (c->mfc_un.unres.unresolved.qlen > 3) {
1da177e4
LT
1075 kfree_skb(skb);
1076 err = -ENOBUFS;
1077 } else {
c354e124 1078 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1da177e4
LT
1079 err = 0;
1080 }
1081
1082 spin_unlock_bh(&mfc_unres_lock);
1083 return err;
1084}
1085
7ef8f65d 1086/* MFC cache manipulation by user space mroute daemon */
1da177e4 1087
660b26dc 1088static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1da177e4
LT
1089{
1090 int line;
862465f2 1091 struct mfc_cache *c, *next;
1da177e4 1092
c354e124 1093 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1da177e4 1094
0c12295a 1095 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1da177e4 1096 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
660b26dc
ND
1097 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1098 (parent == -1 || parent == c->mfc_parent)) {
a8c9486b 1099 list_del_rcu(&c->list);
8cd3ac9f 1100 mroute_netlink_event(mrt, c, RTM_DELROUTE);
5c0a66f5 1101 ipmr_cache_free(c);
1da177e4
LT
1102 return 0;
1103 }
1104 }
1105 return -ENOENT;
1106}
1107
0c12295a 1108static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1109 struct mfcctl *mfc, int mrtsock, int parent)
1da177e4 1110{
862465f2 1111 bool found = false;
1da177e4 1112 int line;
862465f2 1113 struct mfc_cache *uc, *c;
1da177e4 1114
a50436f2
PM
1115 if (mfc->mfcc_parent >= MAXVIFS)
1116 return -ENFILE;
1117
c354e124 1118 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1da177e4 1119
0c12295a 1120 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1da177e4 1121 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
660b26dc
ND
1122 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1123 (parent == -1 || parent == c->mfc_parent)) {
862465f2 1124 found = true;
1da177e4 1125 break;
862465f2 1126 }
1da177e4
LT
1127 }
1128
862465f2 1129 if (found) {
1da177e4
LT
1130 write_lock_bh(&mrt_lock);
1131 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1132 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1133 if (!mrtsock)
1134 c->mfc_flags |= MFC_STATIC;
1135 write_unlock_bh(&mrt_lock);
8cd3ac9f 1136 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1137 return 0;
1138 }
1139
360eb5da 1140 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
660b26dc 1141 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
1142 return -EINVAL;
1143
d658f8a0 1144 c = ipmr_cache_alloc();
51456b29 1145 if (!c)
1da177e4
LT
1146 return -ENOMEM;
1147
c354e124
JK
1148 c->mfc_origin = mfc->mfcc_origin.s_addr;
1149 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1150 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1151 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1152 if (!mrtsock)
1153 c->mfc_flags |= MFC_STATIC;
1154
a8c9486b 1155 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1da177e4 1156
7ef8f65d
NA
1157 /* Check to see if we resolved a queued list. If so we
1158 * need to send on the frames and tidy up.
1da177e4 1159 */
b0ebb739 1160 found = false;
1da177e4 1161 spin_lock_bh(&mfc_unres_lock);
0c12295a 1162 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
e258beb2 1163 if (uc->mfc_origin == c->mfc_origin &&
1da177e4 1164 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
862465f2 1165 list_del(&uc->list);
0c12295a 1166 atomic_dec(&mrt->cache_resolve_queue_len);
b0ebb739 1167 found = true;
1da177e4
LT
1168 break;
1169 }
1170 }
0c12295a
PM
1171 if (list_empty(&mrt->mfc_unres_queue))
1172 del_timer(&mrt->ipmr_expire_timer);
1da177e4
LT
1173 spin_unlock_bh(&mfc_unres_lock);
1174
b0ebb739 1175 if (found) {
0c12295a 1176 ipmr_cache_resolve(net, mrt, uc, c);
5c0a66f5 1177 ipmr_cache_free(uc);
1da177e4 1178 }
8cd3ac9f 1179 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1180 return 0;
1181}
1182
7ef8f65d 1183/* Close the multicast socket, and clear the vif tables etc */
0e615e96 1184static void mroute_clean_tables(struct mr_table *mrt, bool all)
1da177e4
LT
1185{
1186 int i;
d17fa6fa 1187 LIST_HEAD(list);
862465f2 1188 struct mfc_cache *c, *next;
e905a9ed 1189
a8cb16dd 1190 /* Shut down all active vif entries */
0c12295a 1191 for (i = 0; i < mrt->maxvif; i++) {
0e615e96
NA
1192 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1193 continue;
1194 vif_delete(mrt, i, 0, &list);
1da177e4 1195 }
d17fa6fa 1196 unregister_netdevice_many(&list);
1da177e4 1197
a8cb16dd 1198 /* Wipe the cache */
862465f2 1199 for (i = 0; i < MFC_LINES; i++) {
0c12295a 1200 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
0e615e96 1201 if (!all && (c->mfc_flags & MFC_STATIC))
1da177e4 1202 continue;
a8c9486b 1203 list_del_rcu(&c->list);
8cd3ac9f 1204 mroute_netlink_event(mrt, c, RTM_DELROUTE);
5c0a66f5 1205 ipmr_cache_free(c);
1da177e4
LT
1206 }
1207 }
1208
0c12295a 1209 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1da177e4 1210 spin_lock_bh(&mfc_unres_lock);
0c12295a 1211 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
862465f2 1212 list_del(&c->list);
8cd3ac9f 1213 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 1214 ipmr_destroy_unres(mrt, c);
1da177e4
LT
1215 }
1216 spin_unlock_bh(&mfc_unres_lock);
1217 }
1218}
1219
4c968709
ED
1220/* called from ip_ra_control(), before an RCU grace period,
1221 * we dont need to call synchronize_rcu() here
1222 */
1da177e4
LT
1223static void mrtsock_destruct(struct sock *sk)
1224{
4feb88e5 1225 struct net *net = sock_net(sk);
f0ad0860 1226 struct mr_table *mrt;
4feb88e5 1227
1da177e4 1228 rtnl_lock();
f0ad0860 1229 ipmr_for_each_table(mrt, net) {
4c968709 1230 if (sk == rtnl_dereference(mrt->mroute_sk)) {
f0ad0860 1231 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
d67b8c61
ND
1232 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1233 NETCONFA_IFINDEX_ALL,
1234 net->ipv4.devconf_all);
a9b3cd7f 1235 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
0e615e96 1236 mroute_clean_tables(mrt, false);
f0ad0860 1237 }
1da177e4
LT
1238 }
1239 rtnl_unlock();
1240}
1241
7ef8f65d
NA
1242/* Socket options and virtual interface manipulation. The whole
1243 * virtual interface system is a complete heap, but unfortunately
1244 * that's how BSD mrouted happens to think. Maybe one day with a proper
1245 * MOSPF/PIM router set up we can clean this up.
1da177e4 1246 */
e905a9ed 1247
29e97d21
NA
1248int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1249 unsigned int optlen)
1da177e4 1250{
4feb88e5 1251 struct net *net = sock_net(sk);
29e97d21 1252 int val, ret = 0, parent = 0;
f0ad0860 1253 struct mr_table *mrt;
29e97d21
NA
1254 struct vifctl vif;
1255 struct mfcctl mfc;
1256 u32 uval;
f0ad0860 1257
29e97d21
NA
1258 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1259 rtnl_lock();
5e1859fb 1260 if (sk->sk_type != SOCK_RAW ||
29e97d21
NA
1261 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1262 ret = -EOPNOTSUPP;
1263 goto out_unlock;
1264 }
5e1859fb 1265
f0ad0860 1266 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
29e97d21
NA
1267 if (!mrt) {
1268 ret = -ENOENT;
1269 goto out_unlock;
1270 }
132adf54 1271 if (optname != MRT_INIT) {
33d480ce 1272 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
29e97d21
NA
1273 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1274 ret = -EACCES;
1275 goto out_unlock;
1276 }
1da177e4
LT
1277 }
1278
132adf54
SH
1279 switch (optname) {
1280 case MRT_INIT:
42e6b89c 1281 if (optlen != sizeof(int)) {
29e97d21 1282 ret = -EINVAL;
42e6b89c
NA
1283 break;
1284 }
1285 if (rtnl_dereference(mrt->mroute_sk)) {
29e97d21 1286 ret = -EADDRINUSE;
29e97d21 1287 break;
42e6b89c 1288 }
132adf54
SH
1289
1290 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1291 if (ret == 0) {
cf778b00 1292 rcu_assign_pointer(mrt->mroute_sk, sk);
4feb88e5 1293 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
d67b8c61
ND
1294 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1295 NETCONFA_IFINDEX_ALL,
1296 net->ipv4.devconf_all);
132adf54 1297 }
29e97d21 1298 break;
132adf54 1299 case MRT_DONE:
29e97d21
NA
1300 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1301 ret = -EACCES;
1302 } else {
1303 /* We need to unlock here because mrtsock_destruct takes
1304 * care of rtnl itself and we can't change that due to
1305 * the IP_ROUTER_ALERT setsockopt which runs without it.
1306 */
1307 rtnl_unlock();
1308 ret = ip_ra_control(sk, 0, NULL);
1309 goto out;
1310 }
1311 break;
132adf54
SH
1312 case MRT_ADD_VIF:
1313 case MRT_DEL_VIF:
29e97d21
NA
1314 if (optlen != sizeof(vif)) {
1315 ret = -EINVAL;
1316 break;
1317 }
1318 if (copy_from_user(&vif, optval, sizeof(vif))) {
1319 ret = -EFAULT;
1320 break;
1321 }
1322 if (vif.vifc_vifi >= MAXVIFS) {
1323 ret = -ENFILE;
1324 break;
1325 }
c354e124 1326 if (optname == MRT_ADD_VIF) {
4c968709
ED
1327 ret = vif_add(net, mrt, &vif,
1328 sk == rtnl_dereference(mrt->mroute_sk));
132adf54 1329 } else {
0c12295a 1330 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
132adf54 1331 }
29e97d21 1332 break;
7ef8f65d
NA
1333 /* Manipulate the forwarding caches. These live
1334 * in a sort of kernel/user symbiosis.
1335 */
132adf54
SH
1336 case MRT_ADD_MFC:
1337 case MRT_DEL_MFC:
660b26dc
ND
1338 parent = -1;
1339 case MRT_ADD_MFC_PROXY:
1340 case MRT_DEL_MFC_PROXY:
29e97d21
NA
1341 if (optlen != sizeof(mfc)) {
1342 ret = -EINVAL;
1343 break;
1344 }
1345 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1346 ret = -EFAULT;
1347 break;
1348 }
660b26dc
ND
1349 if (parent == 0)
1350 parent = mfc.mfcc_parent;
660b26dc
ND
1351 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1352 ret = ipmr_mfc_delete(mrt, &mfc, parent);
132adf54 1353 else
4c968709 1354 ret = ipmr_mfc_add(net, mrt, &mfc,
660b26dc
ND
1355 sk == rtnl_dereference(mrt->mroute_sk),
1356 parent);
29e97d21 1357 break;
7ef8f65d 1358 /* Control PIM assert. */
132adf54 1359 case MRT_ASSERT:
29e97d21
NA
1360 if (optlen != sizeof(val)) {
1361 ret = -EINVAL;
1362 break;
1363 }
1364 if (get_user(val, (int __user *)optval)) {
1365 ret = -EFAULT;
1366 break;
1367 }
1368 mrt->mroute_do_assert = val;
1369 break;
132adf54 1370 case MRT_PIM:
1973a4ea 1371 if (!ipmr_pimsm_enabled()) {
29e97d21
NA
1372 ret = -ENOPROTOOPT;
1373 break;
1374 }
1375 if (optlen != sizeof(val)) {
1376 ret = -EINVAL;
1377 break;
1378 }
1379 if (get_user(val, (int __user *)optval)) {
1380 ret = -EFAULT;
1381 break;
1382 }
ba93ef74 1383
29e97d21
NA
1384 val = !!val;
1385 if (val != mrt->mroute_do_pim) {
1386 mrt->mroute_do_pim = val;
1387 mrt->mroute_do_assert = val;
1da177e4 1388 }
29e97d21 1389 break;
f0ad0860 1390 case MRT_TABLE:
29e97d21
NA
1391 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1392 ret = -ENOPROTOOPT;
1393 break;
1394 }
1395 if (optlen != sizeof(uval)) {
1396 ret = -EINVAL;
1397 break;
1398 }
1399 if (get_user(uval, (u32 __user *)optval)) {
1400 ret = -EFAULT;
1401 break;
1402 }
f0ad0860 1403
4c968709
ED
1404 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1405 ret = -EBUSY;
1406 } else {
29e97d21 1407 mrt = ipmr_new_table(net, uval);
1113ebbc
NA
1408 if (IS_ERR(mrt))
1409 ret = PTR_ERR(mrt);
5e1859fb 1410 else
29e97d21 1411 raw_sk(sk)->ipmr_table = uval;
4c968709 1412 }
29e97d21 1413 break;
7ef8f65d 1414 /* Spurious command, or MRT_VERSION which you cannot set. */
132adf54 1415 default:
29e97d21 1416 ret = -ENOPROTOOPT;
1da177e4 1417 }
29e97d21
NA
1418out_unlock:
1419 rtnl_unlock();
1420out:
1421 return ret;
1da177e4
LT
1422}
1423
7ef8f65d 1424/* Getsock opt support for the multicast routing system. */
c354e124 1425int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1da177e4
LT
1426{
1427 int olr;
1428 int val;
4feb88e5 1429 struct net *net = sock_net(sk);
f0ad0860
PM
1430 struct mr_table *mrt;
1431
5e1859fb
ED
1432 if (sk->sk_type != SOCK_RAW ||
1433 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1434 return -EOPNOTSUPP;
1435
f0ad0860 1436 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1437 if (!mrt)
f0ad0860 1438 return -ENOENT;
1da177e4 1439
fe9ef3ce
NA
1440 switch (optname) {
1441 case MRT_VERSION:
1442 val = 0x0305;
1443 break;
1444 case MRT_PIM:
1973a4ea 1445 if (!ipmr_pimsm_enabled())
fe9ef3ce
NA
1446 return -ENOPROTOOPT;
1447 val = mrt->mroute_do_pim;
1448 break;
1449 case MRT_ASSERT:
1450 val = mrt->mroute_do_assert;
1451 break;
1452 default:
1da177e4 1453 return -ENOPROTOOPT;
fe9ef3ce 1454 }
1da177e4
LT
1455
1456 if (get_user(olr, optlen))
1457 return -EFAULT;
1da177e4
LT
1458 olr = min_t(unsigned int, olr, sizeof(int));
1459 if (olr < 0)
1460 return -EINVAL;
c354e124 1461 if (put_user(olr, optlen))
1da177e4 1462 return -EFAULT;
c354e124 1463 if (copy_to_user(optval, &val, olr))
1da177e4
LT
1464 return -EFAULT;
1465 return 0;
1466}
1467
7ef8f65d 1468/* The IP multicast ioctl support routines. */
1da177e4
LT
1469int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1470{
1471 struct sioc_sg_req sr;
1472 struct sioc_vif_req vr;
1473 struct vif_device *vif;
1474 struct mfc_cache *c;
4feb88e5 1475 struct net *net = sock_net(sk);
f0ad0860
PM
1476 struct mr_table *mrt;
1477
1478 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1479 if (!mrt)
f0ad0860 1480 return -ENOENT;
e905a9ed 1481
132adf54
SH
1482 switch (cmd) {
1483 case SIOCGETVIFCNT:
c354e124 1484 if (copy_from_user(&vr, arg, sizeof(vr)))
132adf54 1485 return -EFAULT;
0c12295a 1486 if (vr.vifi >= mrt->maxvif)
132adf54
SH
1487 return -EINVAL;
1488 read_lock(&mrt_lock);
0c12295a
PM
1489 vif = &mrt->vif_table[vr.vifi];
1490 if (VIF_EXISTS(mrt, vr.vifi)) {
c354e124
JK
1491 vr.icount = vif->pkt_in;
1492 vr.ocount = vif->pkt_out;
1493 vr.ibytes = vif->bytes_in;
1494 vr.obytes = vif->bytes_out;
1da177e4 1495 read_unlock(&mrt_lock);
1da177e4 1496
c354e124 1497 if (copy_to_user(arg, &vr, sizeof(vr)))
132adf54
SH
1498 return -EFAULT;
1499 return 0;
1500 }
1501 read_unlock(&mrt_lock);
1502 return -EADDRNOTAVAIL;
1503 case SIOCGETSGCNT:
c354e124 1504 if (copy_from_user(&sr, arg, sizeof(sr)))
132adf54
SH
1505 return -EFAULT;
1506
a8c9486b 1507 rcu_read_lock();
0c12295a 1508 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
132adf54
SH
1509 if (c) {
1510 sr.pktcnt = c->mfc_un.res.pkt;
1511 sr.bytecnt = c->mfc_un.res.bytes;
1512 sr.wrong_if = c->mfc_un.res.wrong_if;
a8c9486b 1513 rcu_read_unlock();
132adf54 1514
c354e124 1515 if (copy_to_user(arg, &sr, sizeof(sr)))
132adf54
SH
1516 return -EFAULT;
1517 return 0;
1518 }
a8c9486b 1519 rcu_read_unlock();
132adf54
SH
1520 return -EADDRNOTAVAIL;
1521 default:
1522 return -ENOIOCTLCMD;
1da177e4
LT
1523 }
1524}
1525
709b46e8
EB
1526#ifdef CONFIG_COMPAT
1527struct compat_sioc_sg_req {
1528 struct in_addr src;
1529 struct in_addr grp;
1530 compat_ulong_t pktcnt;
1531 compat_ulong_t bytecnt;
1532 compat_ulong_t wrong_if;
1533};
1534
ca6b8bb0
DM
1535struct compat_sioc_vif_req {
1536 vifi_t vifi; /* Which iface */
1537 compat_ulong_t icount;
1538 compat_ulong_t ocount;
1539 compat_ulong_t ibytes;
1540 compat_ulong_t obytes;
1541};
1542
709b46e8
EB
1543int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1544{
0033d5ad 1545 struct compat_sioc_sg_req sr;
ca6b8bb0
DM
1546 struct compat_sioc_vif_req vr;
1547 struct vif_device *vif;
709b46e8
EB
1548 struct mfc_cache *c;
1549 struct net *net = sock_net(sk);
1550 struct mr_table *mrt;
1551
1552 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1553 if (!mrt)
709b46e8
EB
1554 return -ENOENT;
1555
1556 switch (cmd) {
ca6b8bb0
DM
1557 case SIOCGETVIFCNT:
1558 if (copy_from_user(&vr, arg, sizeof(vr)))
1559 return -EFAULT;
1560 if (vr.vifi >= mrt->maxvif)
1561 return -EINVAL;
1562 read_lock(&mrt_lock);
1563 vif = &mrt->vif_table[vr.vifi];
1564 if (VIF_EXISTS(mrt, vr.vifi)) {
1565 vr.icount = vif->pkt_in;
1566 vr.ocount = vif->pkt_out;
1567 vr.ibytes = vif->bytes_in;
1568 vr.obytes = vif->bytes_out;
1569 read_unlock(&mrt_lock);
1570
1571 if (copy_to_user(arg, &vr, sizeof(vr)))
1572 return -EFAULT;
1573 return 0;
1574 }
1575 read_unlock(&mrt_lock);
1576 return -EADDRNOTAVAIL;
709b46e8
EB
1577 case SIOCGETSGCNT:
1578 if (copy_from_user(&sr, arg, sizeof(sr)))
1579 return -EFAULT;
1580
1581 rcu_read_lock();
1582 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1583 if (c) {
1584 sr.pktcnt = c->mfc_un.res.pkt;
1585 sr.bytecnt = c->mfc_un.res.bytes;
1586 sr.wrong_if = c->mfc_un.res.wrong_if;
1587 rcu_read_unlock();
1588
1589 if (copy_to_user(arg, &sr, sizeof(sr)))
1590 return -EFAULT;
1591 return 0;
1592 }
1593 rcu_read_unlock();
1594 return -EADDRNOTAVAIL;
1595 default:
1596 return -ENOIOCTLCMD;
1597 }
1598}
1599#endif
1600
1da177e4
LT
1601static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1602{
351638e7 1603 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4feb88e5 1604 struct net *net = dev_net(dev);
f0ad0860 1605 struct mr_table *mrt;
1da177e4
LT
1606 struct vif_device *v;
1607 int ct;
e9dc8653 1608
1da177e4
LT
1609 if (event != NETDEV_UNREGISTER)
1610 return NOTIFY_DONE;
f0ad0860
PM
1611
1612 ipmr_for_each_table(mrt, net) {
1613 v = &mrt->vif_table[0];
1614 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1615 if (v->dev == dev)
e92036a6 1616 vif_delete(mrt, ct, 1, NULL);
f0ad0860 1617 }
1da177e4
LT
1618 }
1619 return NOTIFY_DONE;
1620}
1621
c354e124 1622static struct notifier_block ip_mr_notifier = {
1da177e4
LT
1623 .notifier_call = ipmr_device_event,
1624};
1625
7ef8f65d
NA
1626/* Encapsulate a packet by attaching a valid IPIP header to it.
1627 * This avoids tunnel drivers and other mess and gives us the speed so
1628 * important for multicast video.
1da177e4 1629 */
b6a7719a
HFS
1630static void ip_encap(struct net *net, struct sk_buff *skb,
1631 __be32 saddr, __be32 daddr)
1da177e4 1632{
8856dfa3 1633 struct iphdr *iph;
b71d1d42 1634 const struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1635
1636 skb_push(skb, sizeof(struct iphdr));
b0e380b1 1637 skb->transport_header = skb->network_header;
8856dfa3 1638 skb_reset_network_header(skb);
eddc9ec5 1639 iph = ip_hdr(skb);
1da177e4 1640
a8cb16dd 1641 iph->version = 4;
e023dd64
ACM
1642 iph->tos = old_iph->tos;
1643 iph->ttl = old_iph->ttl;
1da177e4
LT
1644 iph->frag_off = 0;
1645 iph->daddr = daddr;
1646 iph->saddr = saddr;
1647 iph->protocol = IPPROTO_IPIP;
1648 iph->ihl = 5;
1649 iph->tot_len = htons(skb->len);
b6a7719a 1650 ip_select_ident(net, skb, NULL);
1da177e4
LT
1651 ip_send_check(iph);
1652
1da177e4
LT
1653 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1654 nf_reset(skb);
1655}
1656
0c4b51f0
EB
1657static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1658 struct sk_buff *skb)
1da177e4 1659{
a8cb16dd 1660 struct ip_options *opt = &(IPCB(skb)->opt);
1da177e4 1661
73186df8
DM
1662 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1663 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1da177e4
LT
1664
1665 if (unlikely(opt->optlen))
1666 ip_forward_options(skb);
1667
13206b6b 1668 return dst_output(net, sk, skb);
1da177e4
LT
1669}
1670
7ef8f65d 1671/* Processing handlers for ipmr_forward */
1da177e4 1672
0c12295a
PM
1673static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1674 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1da177e4 1675{
eddc9ec5 1676 const struct iphdr *iph = ip_hdr(skb);
0c12295a 1677 struct vif_device *vif = &mrt->vif_table[vifi];
1da177e4
LT
1678 struct net_device *dev;
1679 struct rtable *rt;
31e4543d 1680 struct flowi4 fl4;
1da177e4
LT
1681 int encap = 0;
1682
51456b29 1683 if (!vif->dev)
1da177e4
LT
1684 goto out_free;
1685
1da177e4
LT
1686 if (vif->flags & VIFF_REGISTER) {
1687 vif->pkt_out++;
c354e124 1688 vif->bytes_out += skb->len;
cf3677ae
PE
1689 vif->dev->stats.tx_bytes += skb->len;
1690 vif->dev->stats.tx_packets++;
0c12295a 1691 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
69ebbf58 1692 goto out_free;
1da177e4 1693 }
1da177e4 1694
a8cb16dd 1695 if (vif->flags & VIFF_TUNNEL) {
31e4543d 1696 rt = ip_route_output_ports(net, &fl4, NULL,
78fbfd8a
DM
1697 vif->remote, vif->local,
1698 0, 0,
1699 IPPROTO_IPIP,
1700 RT_TOS(iph->tos), vif->link);
b23dd4fe 1701 if (IS_ERR(rt))
1da177e4
LT
1702 goto out_free;
1703 encap = sizeof(struct iphdr);
1704 } else {
31e4543d 1705 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
78fbfd8a
DM
1706 0, 0,
1707 IPPROTO_IPIP,
1708 RT_TOS(iph->tos), vif->link);
b23dd4fe 1709 if (IS_ERR(rt))
1da177e4
LT
1710 goto out_free;
1711 }
1712
d8d1f30b 1713 dev = rt->dst.dev;
1da177e4 1714
d8d1f30b 1715 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1da177e4 1716 /* Do not fragment multicasts. Alas, IPv4 does not
a8cb16dd
ED
1717 * allow to send ICMP, so that packets will disappear
1718 * to blackhole.
1da177e4 1719 */
73186df8 1720 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
1721 ip_rt_put(rt);
1722 goto out_free;
1723 }
1724
d8d1f30b 1725 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1da177e4
LT
1726
1727 if (skb_cow(skb, encap)) {
e905a9ed 1728 ip_rt_put(rt);
1da177e4
LT
1729 goto out_free;
1730 }
1731
1732 vif->pkt_out++;
c354e124 1733 vif->bytes_out += skb->len;
1da177e4 1734
adf30907 1735 skb_dst_drop(skb);
d8d1f30b 1736 skb_dst_set(skb, &rt->dst);
eddc9ec5 1737 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1738
1739 /* FIXME: forward and output firewalls used to be called here.
a8cb16dd
ED
1740 * What do we do with netfilter? -- RR
1741 */
1da177e4 1742 if (vif->flags & VIFF_TUNNEL) {
b6a7719a 1743 ip_encap(net, skb, vif->local, vif->remote);
1da177e4 1744 /* FIXME: extra output firewall step used to be here. --RR */
2f4c02d4
PE
1745 vif->dev->stats.tx_packets++;
1746 vif->dev->stats.tx_bytes += skb->len;
1da177e4
LT
1747 }
1748
1749 IPCB(skb)->flags |= IPSKB_FORWARDED;
1750
7ef8f65d 1751 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1da177e4
LT
1752 * not only before forwarding, but after forwarding on all output
1753 * interfaces. It is clear, if mrouter runs a multicasting
1754 * program, it should receive packets not depending to what interface
1755 * program is joined.
1756 * If we will not make it, the program will have to join on all
1757 * interfaces. On the other hand, multihoming host (or router, but
1758 * not mrouter) cannot join to more than one interface - it will
1759 * result in receiving multiple packets.
1760 */
29a26a56
EB
1761 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1762 net, NULL, skb, skb->dev, dev,
1da177e4
LT
1763 ipmr_forward_finish);
1764 return;
1765
1766out_free:
1767 kfree_skb(skb);
1da177e4
LT
1768}
1769
0c12295a 1770static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1da177e4
LT
1771{
1772 int ct;
0c12295a
PM
1773
1774 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1775 if (mrt->vif_table[ct].dev == dev)
1da177e4
LT
1776 break;
1777 }
1778 return ct;
1779}
1780
1781/* "local" means that we should preserve one skb (for local delivery) */
c4854ec8
RR
1782static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1783 struct sk_buff *skb, struct mfc_cache *cache,
1784 int local)
1da177e4
LT
1785{
1786 int psend = -1;
1787 int vif, ct;
660b26dc 1788 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1da177e4
LT
1789
1790 vif = cache->mfc_parent;
1791 cache->mfc_un.res.pkt++;
1792 cache->mfc_un.res.bytes += skb->len;
1793
360eb5da 1794 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
660b26dc
ND
1795 struct mfc_cache *cache_proxy;
1796
1797 /* For an (*,G) entry, we only check that the incomming
1798 * interface is part of the static tree.
1799 */
1800 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1801 if (cache_proxy &&
1802 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1803 goto forward;
1804 }
1805
7ef8f65d 1806 /* Wrong interface: drop packet and (maybe) send PIM assert. */
0c12295a 1807 if (mrt->vif_table[vif].dev != skb->dev) {
c7537967 1808 if (rt_is_output_route(skb_rtable(skb))) {
1da177e4 1809 /* It is our own packet, looped back.
a8cb16dd
ED
1810 * Very complicated situation...
1811 *
1812 * The best workaround until routing daemons will be
1813 * fixed is not to redistribute packet, if it was
1814 * send through wrong interface. It means, that
1815 * multicast applications WILL NOT work for
1816 * (S,G), which have default multicast route pointing
1817 * to wrong oif. In any case, it is not a good
1818 * idea to use multicasting applications on router.
1da177e4
LT
1819 */
1820 goto dont_forward;
1821 }
1822
1823 cache->mfc_un.res.wrong_if++;
1da177e4 1824
0c12295a 1825 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1da177e4 1826 /* pimsm uses asserts, when switching from RPT to SPT,
a8cb16dd
ED
1827 * so that we cannot check that packet arrived on an oif.
1828 * It is bad, but otherwise we would need to move pretty
1829 * large chunk of pimd to kernel. Ough... --ANK
1da177e4 1830 */
0c12295a 1831 (mrt->mroute_do_pim ||
6f9374a9 1832 cache->mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1833 time_after(jiffies,
1da177e4
LT
1834 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1835 cache->mfc_un.res.last_assert = jiffies;
0c12295a 1836 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1da177e4
LT
1837 }
1838 goto dont_forward;
1839 }
1840
660b26dc 1841forward:
0c12295a
PM
1842 mrt->vif_table[vif].pkt_in++;
1843 mrt->vif_table[vif].bytes_in += skb->len;
1da177e4 1844
7ef8f65d 1845 /* Forward the frame */
360eb5da
ND
1846 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1847 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
660b26dc
ND
1848 if (true_vifi >= 0 &&
1849 true_vifi != cache->mfc_parent &&
1850 ip_hdr(skb)->ttl >
1851 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1852 /* It's an (*,*) entry and the packet is not coming from
1853 * the upstream: forward the packet to the upstream
1854 * only.
1855 */
1856 psend = cache->mfc_parent;
1857 goto last_forward;
1858 }
1859 goto dont_forward;
1860 }
a8cb16dd
ED
1861 for (ct = cache->mfc_un.res.maxvif - 1;
1862 ct >= cache->mfc_un.res.minvif; ct--) {
660b26dc 1863 /* For (*,G) entry, don't forward to the incoming interface */
360eb5da
ND
1864 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1865 ct != true_vifi) &&
660b26dc 1866 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1da177e4
LT
1867 if (psend != -1) {
1868 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1869
1da177e4 1870 if (skb2)
0c12295a
PM
1871 ipmr_queue_xmit(net, mrt, skb2, cache,
1872 psend);
1da177e4 1873 }
c354e124 1874 psend = ct;
1da177e4
LT
1875 }
1876 }
660b26dc 1877last_forward:
1da177e4
LT
1878 if (psend != -1) {
1879 if (local) {
1880 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1881
1da177e4 1882 if (skb2)
0c12295a 1883 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1da177e4 1884 } else {
0c12295a 1885 ipmr_queue_xmit(net, mrt, skb, cache, psend);
c4854ec8 1886 return;
1da177e4
LT
1887 }
1888 }
1889
1890dont_forward:
1891 if (!local)
1892 kfree_skb(skb);
1da177e4
LT
1893}
1894
417da66f 1895static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
ee3f1aaf 1896{
417da66f
DM
1897 struct rtable *rt = skb_rtable(skb);
1898 struct iphdr *iph = ip_hdr(skb);
da91981b 1899 struct flowi4 fl4 = {
417da66f
DM
1900 .daddr = iph->daddr,
1901 .saddr = iph->saddr,
b0fe4a31 1902 .flowi4_tos = RT_TOS(iph->tos),
4fd551d7
DM
1903 .flowi4_oif = (rt_is_output_route(rt) ?
1904 skb->dev->ifindex : 0),
1905 .flowi4_iif = (rt_is_output_route(rt) ?
1fb9489b 1906 LOOPBACK_IFINDEX :
4fd551d7 1907 skb->dev->ifindex),
b4869889 1908 .flowi4_mark = skb->mark,
ee3f1aaf
DM
1909 };
1910 struct mr_table *mrt;
1911 int err;
1912
da91981b 1913 err = ipmr_fib_lookup(net, &fl4, &mrt);
ee3f1aaf
DM
1914 if (err)
1915 return ERR_PTR(err);
1916 return mrt;
1917}
1da177e4 1918
7ef8f65d
NA
1919/* Multicast packets for forwarding arrive here
1920 * Called with rcu_read_lock();
1da177e4 1921 */
1da177e4
LT
1922int ip_mr_input(struct sk_buff *skb)
1923{
1924 struct mfc_cache *cache;
4feb88e5 1925 struct net *net = dev_net(skb->dev);
511c3f92 1926 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
f0ad0860 1927 struct mr_table *mrt;
1da177e4
LT
1928
1929 /* Packet is looped back after forward, it should not be
a8cb16dd 1930 * forwarded second time, but still can be delivered locally.
1da177e4 1931 */
4c968709 1932 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1da177e4
LT
1933 goto dont_forward;
1934
417da66f 1935 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
1936 if (IS_ERR(mrt)) {
1937 kfree_skb(skb);
1938 return PTR_ERR(mrt);
e40dbc51 1939 }
1da177e4 1940 if (!local) {
4c968709
ED
1941 if (IPCB(skb)->opt.router_alert) {
1942 if (ip_call_ra_chain(skb))
1943 return 0;
1944 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1945 /* IGMPv1 (and broken IGMPv2 implementations sort of
1946 * Cisco IOS <= 11.2(8)) do not put router alert
1947 * option to IGMP packets destined to routable
1948 * groups. It is very bad, because it means
1949 * that we can forward NO IGMP messages.
1950 */
1951 struct sock *mroute_sk;
1952
1953 mroute_sk = rcu_dereference(mrt->mroute_sk);
1954 if (mroute_sk) {
1955 nf_reset(skb);
1956 raw_rcv(mroute_sk, skb);
1957 return 0;
1958 }
1da177e4
LT
1959 }
1960 }
1961
a8c9486b 1962 /* already under rcu_read_lock() */
0c12295a 1963 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
51456b29 1964 if (!cache) {
660b26dc
ND
1965 int vif = ipmr_find_vif(mrt, skb->dev);
1966
1967 if (vif >= 0)
1968 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1969 vif);
1970 }
1da177e4 1971
7ef8f65d 1972 /* No usable cache entry */
51456b29 1973 if (!cache) {
1da177e4
LT
1974 int vif;
1975
1976 if (local) {
1977 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1978 ip_local_deliver(skb);
51456b29 1979 if (!skb2)
1da177e4 1980 return -ENOBUFS;
1da177e4
LT
1981 skb = skb2;
1982 }
1983
a8c9486b 1984 read_lock(&mrt_lock);
0c12295a 1985 vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 1986 if (vif >= 0) {
0eae88f3 1987 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1da177e4
LT
1988 read_unlock(&mrt_lock);
1989
0eae88f3 1990 return err2;
1da177e4
LT
1991 }
1992 read_unlock(&mrt_lock);
1993 kfree_skb(skb);
1994 return -ENODEV;
1995 }
1996
a8c9486b 1997 read_lock(&mrt_lock);
0c12295a 1998 ip_mr_forward(net, mrt, skb, cache, local);
1da177e4
LT
1999 read_unlock(&mrt_lock);
2000
2001 if (local)
2002 return ip_local_deliver(skb);
2003
2004 return 0;
2005
2006dont_forward:
2007 if (local)
2008 return ip_local_deliver(skb);
2009 kfree_skb(skb);
2010 return 0;
2011}
2012
b1879204 2013#ifdef CONFIG_IP_PIMSM_V1
7ef8f65d 2014/* Handle IGMP messages of PIMv1 */
a8cb16dd 2015int pim_rcv_v1(struct sk_buff *skb)
b1879204
IJ
2016{
2017 struct igmphdr *pim;
4feb88e5 2018 struct net *net = dev_net(skb->dev);
f0ad0860 2019 struct mr_table *mrt;
b1879204
IJ
2020
2021 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2022 goto drop;
2023
2024 pim = igmp_hdr(skb);
2025
417da66f 2026 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2027 if (IS_ERR(mrt))
2028 goto drop;
0c12295a 2029 if (!mrt->mroute_do_pim ||
b1879204
IJ
2030 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2031 goto drop;
2032
f0ad0860 2033 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2034drop:
2035 kfree_skb(skb);
2036 }
1da177e4
LT
2037 return 0;
2038}
2039#endif
2040
2041#ifdef CONFIG_IP_PIMSM_V2
a8cb16dd 2042static int pim_rcv(struct sk_buff *skb)
1da177e4
LT
2043{
2044 struct pimreghdr *pim;
f0ad0860
PM
2045 struct net *net = dev_net(skb->dev);
2046 struct mr_table *mrt;
1da177e4 2047
b1879204 2048 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1da177e4
LT
2049 goto drop;
2050
9c70220b 2051 pim = (struct pimreghdr *)skb_transport_header(skb);
a8cb16dd
ED
2052 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2053 (pim->flags & PIM_NULL_REGISTER) ||
e905a9ed 2054 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 2055 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
2056 goto drop;
2057
417da66f 2058 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2059 if (IS_ERR(mrt))
2060 goto drop;
f0ad0860 2061 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2062drop:
2063 kfree_skb(skb);
2064 }
1da177e4
LT
2065 return 0;
2066}
2067#endif
2068
cb6a4e46
PM
2069static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2070 struct mfc_cache *c, struct rtmsg *rtm)
1da177e4
LT
2071{
2072 int ct;
2073 struct rtnexthop *nhp;
92a395e5 2074 struct nlattr *mp_attr;
adfa85e4 2075 struct rta_mfc_stats mfcs;
1da177e4 2076
7438189b 2077 /* If cache is unresolved, don't try to parse IIF and OIF */
ed0f160a 2078 if (c->mfc_parent >= MAXVIFS)
7438189b
ND
2079 return -ENOENT;
2080
92a395e5
TG
2081 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2082 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2083 return -EMSGSIZE;
1da177e4 2084
92a395e5
TG
2085 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2086 return -EMSGSIZE;
1da177e4
LT
2087
2088 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
0c12295a 2089 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
92a395e5
TG
2090 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2091 nla_nest_cancel(skb, mp_attr);
2092 return -EMSGSIZE;
2093 }
2094
1da177e4
LT
2095 nhp->rtnh_flags = 0;
2096 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
0c12295a 2097 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1da177e4
LT
2098 nhp->rtnh_len = sizeof(*nhp);
2099 }
2100 }
92a395e5
TG
2101
2102 nla_nest_end(skb, mp_attr);
2103
adfa85e4
ND
2104 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2105 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2106 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
a9a08042 2107 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
adfa85e4
ND
2108 return -EMSGSIZE;
2109
1da177e4
LT
2110 rtm->rtm_type = RTN_MULTICAST;
2111 return 1;
1da177e4
LT
2112}
2113
9a1b9496
DM
2114int ipmr_get_route(struct net *net, struct sk_buff *skb,
2115 __be32 saddr, __be32 daddr,
2116 struct rtmsg *rtm, int nowait)
1da177e4 2117{
1da177e4 2118 struct mfc_cache *cache;
9a1b9496
DM
2119 struct mr_table *mrt;
2120 int err;
1da177e4 2121
f0ad0860 2122 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2123 if (!mrt)
f0ad0860
PM
2124 return -ENOENT;
2125
a8c9486b 2126 rcu_read_lock();
9a1b9496 2127 cache = ipmr_cache_find(mrt, saddr, daddr);
51456b29 2128 if (!cache && skb->dev) {
660b26dc 2129 int vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2130
660b26dc
ND
2131 if (vif >= 0)
2132 cache = ipmr_cache_find_any(mrt, daddr, vif);
2133 }
51456b29 2134 if (!cache) {
72287490 2135 struct sk_buff *skb2;
eddc9ec5 2136 struct iphdr *iph;
1da177e4 2137 struct net_device *dev;
a8cb16dd 2138 int vif = -1;
1da177e4
LT
2139
2140 if (nowait) {
a8c9486b 2141 rcu_read_unlock();
1da177e4
LT
2142 return -EAGAIN;
2143 }
2144
2145 dev = skb->dev;
a8c9486b 2146 read_lock(&mrt_lock);
a8cb16dd
ED
2147 if (dev)
2148 vif = ipmr_find_vif(mrt, dev);
2149 if (vif < 0) {
1da177e4 2150 read_unlock(&mrt_lock);
a8c9486b 2151 rcu_read_unlock();
1da177e4
LT
2152 return -ENODEV;
2153 }
72287490
AK
2154 skb2 = skb_clone(skb, GFP_ATOMIC);
2155 if (!skb2) {
2156 read_unlock(&mrt_lock);
a8c9486b 2157 rcu_read_unlock();
72287490
AK
2158 return -ENOMEM;
2159 }
2160
e2d1bca7
ACM
2161 skb_push(skb2, sizeof(struct iphdr));
2162 skb_reset_network_header(skb2);
eddc9ec5
ACM
2163 iph = ip_hdr(skb2);
2164 iph->ihl = sizeof(struct iphdr) >> 2;
9a1b9496
DM
2165 iph->saddr = saddr;
2166 iph->daddr = daddr;
eddc9ec5 2167 iph->version = 0;
0c12295a 2168 err = ipmr_cache_unresolved(mrt, vif, skb2);
1da177e4 2169 read_unlock(&mrt_lock);
a8c9486b 2170 rcu_read_unlock();
1da177e4
LT
2171 return err;
2172 }
2173
a8c9486b 2174 read_lock(&mrt_lock);
cb6a4e46 2175 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1da177e4 2176 read_unlock(&mrt_lock);
a8c9486b 2177 rcu_read_unlock();
1da177e4
LT
2178 return err;
2179}
2180
cb6a4e46 2181static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
65886f43
ND
2182 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2183 int flags)
cb6a4e46
PM
2184{
2185 struct nlmsghdr *nlh;
2186 struct rtmsg *rtm;
1eb99af5 2187 int err;
cb6a4e46 2188
65886f43 2189 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
51456b29 2190 if (!nlh)
cb6a4e46
PM
2191 return -EMSGSIZE;
2192
2193 rtm = nlmsg_data(nlh);
2194 rtm->rtm_family = RTNL_FAMILY_IPMR;
2195 rtm->rtm_dst_len = 32;
2196 rtm->rtm_src_len = 32;
2197 rtm->rtm_tos = 0;
2198 rtm->rtm_table = mrt->id;
f3756b79
DM
2199 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2200 goto nla_put_failure;
cb6a4e46
PM
2201 rtm->rtm_type = RTN_MULTICAST;
2202 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
9a68ac72
ND
2203 if (c->mfc_flags & MFC_STATIC)
2204 rtm->rtm_protocol = RTPROT_STATIC;
2205 else
2206 rtm->rtm_protocol = RTPROT_MROUTED;
cb6a4e46
PM
2207 rtm->rtm_flags = 0;
2208
930345ea
JB
2209 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2210 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
f3756b79 2211 goto nla_put_failure;
1eb99af5
ND
2212 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2213 /* do not break the dump if cache is unresolved */
2214 if (err < 0 && err != -ENOENT)
cb6a4e46
PM
2215 goto nla_put_failure;
2216
053c095a
JB
2217 nlmsg_end(skb, nlh);
2218 return 0;
cb6a4e46
PM
2219
2220nla_put_failure:
2221 nlmsg_cancel(skb, nlh);
2222 return -EMSGSIZE;
2223}
2224
8cd3ac9f
ND
2225static size_t mroute_msgsize(bool unresolved, int maxvif)
2226{
2227 size_t len =
2228 NLMSG_ALIGN(sizeof(struct rtmsg))
2229 + nla_total_size(4) /* RTA_TABLE */
2230 + nla_total_size(4) /* RTA_SRC */
2231 + nla_total_size(4) /* RTA_DST */
2232 ;
2233
2234 if (!unresolved)
2235 len = len
2236 + nla_total_size(4) /* RTA_IIF */
2237 + nla_total_size(0) /* RTA_MULTIPATH */
2238 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2239 /* RTA_MFC_STATS */
a9a08042 2240 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
8cd3ac9f
ND
2241 ;
2242
2243 return len;
2244}
2245
2246static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2247 int cmd)
2248{
2249 struct net *net = read_pnet(&mrt->net);
2250 struct sk_buff *skb;
2251 int err = -ENOBUFS;
2252
2253 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2254 GFP_ATOMIC);
51456b29 2255 if (!skb)
8cd3ac9f
ND
2256 goto errout;
2257
65886f43 2258 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
8cd3ac9f
ND
2259 if (err < 0)
2260 goto errout;
2261
2262 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2263 return;
2264
2265errout:
2266 kfree_skb(skb);
2267 if (err < 0)
2268 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2269}
2270
cb6a4e46
PM
2271static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2272{
2273 struct net *net = sock_net(skb->sk);
2274 struct mr_table *mrt;
2275 struct mfc_cache *mfc;
2276 unsigned int t = 0, s_t;
2277 unsigned int h = 0, s_h;
2278 unsigned int e = 0, s_e;
2279
2280 s_t = cb->args[0];
2281 s_h = cb->args[1];
2282 s_e = cb->args[2];
2283
a8c9486b 2284 rcu_read_lock();
cb6a4e46
PM
2285 ipmr_for_each_table(mrt, net) {
2286 if (t < s_t)
2287 goto next_table;
2288 if (t > s_t)
2289 s_h = 0;
2290 for (h = s_h; h < MFC_LINES; h++) {
a8c9486b 2291 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
cb6a4e46
PM
2292 if (e < s_e)
2293 goto next_entry;
2294 if (ipmr_fill_mroute(mrt, skb,
15e47304 2295 NETLINK_CB(cb->skb).portid,
cb6a4e46 2296 cb->nlh->nlmsg_seq,
65886f43
ND
2297 mfc, RTM_NEWROUTE,
2298 NLM_F_MULTI) < 0)
cb6a4e46
PM
2299 goto done;
2300next_entry:
2301 e++;
2302 }
2303 e = s_e = 0;
2304 }
1eb99af5
ND
2305 spin_lock_bh(&mfc_unres_lock);
2306 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2307 if (e < s_e)
2308 goto next_entry2;
2309 if (ipmr_fill_mroute(mrt, skb,
2310 NETLINK_CB(cb->skb).portid,
2311 cb->nlh->nlmsg_seq,
65886f43
ND
2312 mfc, RTM_NEWROUTE,
2313 NLM_F_MULTI) < 0) {
1eb99af5
ND
2314 spin_unlock_bh(&mfc_unres_lock);
2315 goto done;
2316 }
2317next_entry2:
2318 e++;
2319 }
2320 spin_unlock_bh(&mfc_unres_lock);
2321 e = s_e = 0;
cb6a4e46
PM
2322 s_h = 0;
2323next_table:
2324 t++;
2325 }
2326done:
a8c9486b 2327 rcu_read_unlock();
cb6a4e46
PM
2328
2329 cb->args[2] = e;
2330 cb->args[1] = h;
2331 cb->args[0] = t;
2332
2333 return skb->len;
2334}
2335
ccbb0aa6
NA
2336static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2337 [RTA_SRC] = { .type = NLA_U32 },
2338 [RTA_DST] = { .type = NLA_U32 },
2339 [RTA_IIF] = { .type = NLA_U32 },
2340 [RTA_TABLE] = { .type = NLA_U32 },
2341 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2342};
2343
2344static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2345{
2346 switch (rtm_protocol) {
2347 case RTPROT_STATIC:
2348 case RTPROT_MROUTED:
2349 return true;
2350 }
2351 return false;
2352}
2353
2354static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2355{
2356 struct rtnexthop *rtnh = nla_data(nla);
2357 int remaining = nla_len(nla), vifi = 0;
2358
2359 while (rtnh_ok(rtnh, remaining)) {
2360 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2361 if (++vifi == MAXVIFS)
2362 break;
2363 rtnh = rtnh_next(rtnh, &remaining);
2364 }
2365
2366 return remaining > 0 ? -EINVAL : vifi;
2367}
2368
2369/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2370static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2371 struct mfcctl *mfcc, int *mrtsock,
2372 struct mr_table **mrtret)
2373{
2374 struct net_device *dev = NULL;
2375 u32 tblid = RT_TABLE_DEFAULT;
2376 struct mr_table *mrt;
2377 struct nlattr *attr;
2378 struct rtmsg *rtm;
2379 int ret, rem;
2380
2381 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
2382 if (ret < 0)
2383 goto out;
2384 rtm = nlmsg_data(nlh);
2385
2386 ret = -EINVAL;
2387 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2388 rtm->rtm_type != RTN_MULTICAST ||
2389 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2390 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2391 goto out;
2392
2393 memset(mfcc, 0, sizeof(*mfcc));
2394 mfcc->mfcc_parent = -1;
2395 ret = 0;
2396 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2397 switch (nla_type(attr)) {
2398 case RTA_SRC:
2399 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2400 break;
2401 case RTA_DST:
2402 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2403 break;
2404 case RTA_IIF:
2405 dev = __dev_get_by_index(net, nla_get_u32(attr));
2406 if (!dev) {
2407 ret = -ENODEV;
2408 goto out;
2409 }
2410 break;
2411 case RTA_MULTIPATH:
2412 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2413 ret = -EINVAL;
2414 goto out;
2415 }
2416 break;
2417 case RTA_PREFSRC:
2418 ret = 1;
2419 break;
2420 case RTA_TABLE:
2421 tblid = nla_get_u32(attr);
2422 break;
2423 }
2424 }
2425 mrt = ipmr_get_table(net, tblid);
2426 if (!mrt) {
2427 ret = -ENOENT;
2428 goto out;
2429 }
2430 *mrtret = mrt;
2431 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2432 if (dev)
2433 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2434
2435out:
2436 return ret;
2437}
2438
2439/* takes care of both newroute and delroute */
2440static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
2441{
2442 struct net *net = sock_net(skb->sk);
2443 int ret, mrtsock, parent;
2444 struct mr_table *tbl;
2445 struct mfcctl mfcc;
2446
2447 mrtsock = 0;
2448 tbl = NULL;
2449 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
2450 if (ret < 0)
2451 return ret;
2452
2453 parent = ret ? mfcc.mfcc_parent : -1;
2454 if (nlh->nlmsg_type == RTM_NEWROUTE)
2455 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2456 else
2457 return ipmr_mfc_delete(tbl, &mfcc, parent);
2458}
2459
e905a9ed 2460#ifdef CONFIG_PROC_FS
7ef8f65d
NA
2461/* The /proc interfaces to multicast routing :
2462 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
1da177e4
LT
2463 */
2464struct ipmr_vif_iter {
f6bb4514 2465 struct seq_net_private p;
f0ad0860 2466 struct mr_table *mrt;
1da177e4
LT
2467 int ct;
2468};
2469
f6bb4514
BT
2470static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2471 struct ipmr_vif_iter *iter,
1da177e4
LT
2472 loff_t pos)
2473{
f0ad0860 2474 struct mr_table *mrt = iter->mrt;
0c12295a
PM
2475
2476 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2477 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2478 continue;
e905a9ed 2479 if (pos-- == 0)
0c12295a 2480 return &mrt->vif_table[iter->ct];
1da177e4
LT
2481 }
2482 return NULL;
2483}
2484
2485static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
ba93ef74 2486 __acquires(mrt_lock)
1da177e4 2487{
f0ad0860 2488 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2489 struct net *net = seq_file_net(seq);
f0ad0860
PM
2490 struct mr_table *mrt;
2491
2492 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2493 if (!mrt)
f0ad0860
PM
2494 return ERR_PTR(-ENOENT);
2495
2496 iter->mrt = mrt;
f6bb4514 2497
1da177e4 2498 read_lock(&mrt_lock);
f6bb4514 2499 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2500 : SEQ_START_TOKEN;
2501}
2502
2503static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2504{
2505 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2506 struct net *net = seq_file_net(seq);
f0ad0860 2507 struct mr_table *mrt = iter->mrt;
1da177e4
LT
2508
2509 ++*pos;
2510 if (v == SEQ_START_TOKEN)
f6bb4514 2511 return ipmr_vif_seq_idx(net, iter, 0);
e905a9ed 2512
0c12295a
PM
2513 while (++iter->ct < mrt->maxvif) {
2514 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2515 continue;
0c12295a 2516 return &mrt->vif_table[iter->ct];
1da177e4
LT
2517 }
2518 return NULL;
2519}
2520
2521static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
ba93ef74 2522 __releases(mrt_lock)
1da177e4
LT
2523{
2524 read_unlock(&mrt_lock);
2525}
2526
2527static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2528{
f0ad0860
PM
2529 struct ipmr_vif_iter *iter = seq->private;
2530 struct mr_table *mrt = iter->mrt;
f6bb4514 2531
1da177e4 2532 if (v == SEQ_START_TOKEN) {
e905a9ed 2533 seq_puts(seq,
1da177e4
LT
2534 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2535 } else {
2536 const struct vif_device *vif = v;
2537 const char *name = vif->dev ? vif->dev->name : "none";
2538
2539 seq_printf(seq,
2540 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
0c12295a 2541 vif - mrt->vif_table,
e905a9ed 2542 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
2543 vif->bytes_out, vif->pkt_out,
2544 vif->flags, vif->local, vif->remote);
2545 }
2546 return 0;
2547}
2548
f690808e 2549static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4
LT
2550 .start = ipmr_vif_seq_start,
2551 .next = ipmr_vif_seq_next,
2552 .stop = ipmr_vif_seq_stop,
2553 .show = ipmr_vif_seq_show,
2554};
2555
2556static int ipmr_vif_open(struct inode *inode, struct file *file)
2557{
f6bb4514
BT
2558 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2559 sizeof(struct ipmr_vif_iter));
1da177e4
LT
2560}
2561
9a32144e 2562static const struct file_operations ipmr_vif_fops = {
1da177e4
LT
2563 .owner = THIS_MODULE,
2564 .open = ipmr_vif_open,
2565 .read = seq_read,
2566 .llseek = seq_lseek,
f6bb4514 2567 .release = seq_release_net,
1da177e4
LT
2568};
2569
2570struct ipmr_mfc_iter {
f6bb4514 2571 struct seq_net_private p;
f0ad0860 2572 struct mr_table *mrt;
862465f2 2573 struct list_head *cache;
1da177e4
LT
2574 int ct;
2575};
2576
2577
f6bb4514
BT
2578static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2579 struct ipmr_mfc_iter *it, loff_t pos)
1da177e4 2580{
f0ad0860 2581 struct mr_table *mrt = it->mrt;
1da177e4
LT
2582 struct mfc_cache *mfc;
2583
a8c9486b 2584 rcu_read_lock();
862465f2 2585 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
0c12295a 2586 it->cache = &mrt->mfc_cache_array[it->ct];
a8c9486b 2587 list_for_each_entry_rcu(mfc, it->cache, list)
e905a9ed 2588 if (pos-- == 0)
1da177e4 2589 return mfc;
862465f2 2590 }
a8c9486b 2591 rcu_read_unlock();
1da177e4 2592
1da177e4 2593 spin_lock_bh(&mfc_unres_lock);
0c12295a 2594 it->cache = &mrt->mfc_unres_queue;
862465f2 2595 list_for_each_entry(mfc, it->cache, list)
e258beb2 2596 if (pos-- == 0)
1da177e4
LT
2597 return mfc;
2598 spin_unlock_bh(&mfc_unres_lock);
2599
2600 it->cache = NULL;
2601 return NULL;
2602}
2603
2604
2605static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2606{
2607 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2608 struct net *net = seq_file_net(seq);
f0ad0860 2609 struct mr_table *mrt;
f6bb4514 2610
f0ad0860 2611 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2612 if (!mrt)
f0ad0860 2613 return ERR_PTR(-ENOENT);
f6bb4514 2614
f0ad0860 2615 it->mrt = mrt;
1da177e4
LT
2616 it->cache = NULL;
2617 it->ct = 0;
f6bb4514 2618 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2619 : SEQ_START_TOKEN;
2620}
2621
2622static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2623{
2624 struct mfc_cache *mfc = v;
2625 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2626 struct net *net = seq_file_net(seq);
f0ad0860 2627 struct mr_table *mrt = it->mrt;
1da177e4
LT
2628
2629 ++*pos;
2630
2631 if (v == SEQ_START_TOKEN)
f6bb4514 2632 return ipmr_mfc_seq_idx(net, seq->private, 0);
1da177e4 2633
862465f2
PM
2634 if (mfc->list.next != it->cache)
2635 return list_entry(mfc->list.next, struct mfc_cache, list);
e905a9ed 2636
0c12295a 2637 if (it->cache == &mrt->mfc_unres_queue)
1da177e4
LT
2638 goto end_of_list;
2639
0c12295a 2640 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
1da177e4
LT
2641
2642 while (++it->ct < MFC_LINES) {
0c12295a 2643 it->cache = &mrt->mfc_cache_array[it->ct];
862465f2
PM
2644 if (list_empty(it->cache))
2645 continue;
2646 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4
LT
2647 }
2648
2649 /* exhausted cache_array, show unresolved */
a8c9486b 2650 rcu_read_unlock();
0c12295a 2651 it->cache = &mrt->mfc_unres_queue;
1da177e4 2652 it->ct = 0;
e905a9ed 2653
1da177e4 2654 spin_lock_bh(&mfc_unres_lock);
862465f2
PM
2655 if (!list_empty(it->cache))
2656 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4 2657
a8cb16dd 2658end_of_list:
1da177e4
LT
2659 spin_unlock_bh(&mfc_unres_lock);
2660 it->cache = NULL;
2661
2662 return NULL;
2663}
2664
2665static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2666{
2667 struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2668 struct mr_table *mrt = it->mrt;
1da177e4 2669
0c12295a 2670 if (it->cache == &mrt->mfc_unres_queue)
1da177e4 2671 spin_unlock_bh(&mfc_unres_lock);
0c12295a 2672 else if (it->cache == &mrt->mfc_cache_array[it->ct])
a8c9486b 2673 rcu_read_unlock();
1da177e4
LT
2674}
2675
2676static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2677{
2678 int n;
2679
2680 if (v == SEQ_START_TOKEN) {
e905a9ed 2681 seq_puts(seq,
1da177e4
LT
2682 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2683 } else {
2684 const struct mfc_cache *mfc = v;
2685 const struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2686 const struct mr_table *mrt = it->mrt;
e905a9ed 2687
0eae88f3
ED
2688 seq_printf(seq, "%08X %08X %-3hd",
2689 (__force u32) mfc->mfc_mcastgrp,
2690 (__force u32) mfc->mfc_origin,
1ea472e2 2691 mfc->mfc_parent);
1da177e4 2692
0c12295a 2693 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2
BT
2694 seq_printf(seq, " %8lu %8lu %8lu",
2695 mfc->mfc_un.res.pkt,
2696 mfc->mfc_un.res.bytes,
2697 mfc->mfc_un.res.wrong_if);
132adf54 2698 for (n = mfc->mfc_un.res.minvif;
a8cb16dd 2699 n < mfc->mfc_un.res.maxvif; n++) {
0c12295a 2700 if (VIF_EXISTS(mrt, n) &&
cf958ae3
BT
2701 mfc->mfc_un.res.ttls[n] < 255)
2702 seq_printf(seq,
e905a9ed 2703 " %2d:%-3d",
1da177e4
LT
2704 n, mfc->mfc_un.res.ttls[n]);
2705 }
1ea472e2
BT
2706 } else {
2707 /* unresolved mfc_caches don't contain
2708 * pkt, bytes and wrong_if values
2709 */
2710 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1da177e4
LT
2711 }
2712 seq_putc(seq, '\n');
2713 }
2714 return 0;
2715}
2716
f690808e 2717static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4
LT
2718 .start = ipmr_mfc_seq_start,
2719 .next = ipmr_mfc_seq_next,
2720 .stop = ipmr_mfc_seq_stop,
2721 .show = ipmr_mfc_seq_show,
2722};
2723
2724static int ipmr_mfc_open(struct inode *inode, struct file *file)
2725{
f6bb4514
BT
2726 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2727 sizeof(struct ipmr_mfc_iter));
1da177e4
LT
2728}
2729
9a32144e 2730static const struct file_operations ipmr_mfc_fops = {
1da177e4
LT
2731 .owner = THIS_MODULE,
2732 .open = ipmr_mfc_open,
2733 .read = seq_read,
2734 .llseek = seq_lseek,
f6bb4514 2735 .release = seq_release_net,
1da177e4 2736};
e905a9ed 2737#endif
1da177e4
LT
2738
2739#ifdef CONFIG_IP_PIMSM_V2
32613090 2740static const struct net_protocol pim_protocol = {
1da177e4 2741 .handler = pim_rcv,
403dbb97 2742 .netns_ok = 1,
1da177e4
LT
2743};
2744#endif
2745
7ef8f65d 2746/* Setup for IP multicast routing */
cf958ae3
BT
2747static int __net_init ipmr_net_init(struct net *net)
2748{
f0ad0860 2749 int err;
cf958ae3 2750
f0ad0860
PM
2751 err = ipmr_rules_init(net);
2752 if (err < 0)
cf958ae3 2753 goto fail;
f6bb4514
BT
2754
2755#ifdef CONFIG_PROC_FS
2756 err = -ENOMEM;
d4beaa66 2757 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
f6bb4514 2758 goto proc_vif_fail;
d4beaa66 2759 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
f6bb4514
BT
2760 goto proc_cache_fail;
2761#endif
2bb8b26c
BT
2762 return 0;
2763
f6bb4514
BT
2764#ifdef CONFIG_PROC_FS
2765proc_cache_fail:
ece31ffd 2766 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2767proc_vif_fail:
f0ad0860 2768 ipmr_rules_exit(net);
f6bb4514 2769#endif
cf958ae3
BT
2770fail:
2771 return err;
2772}
2773
2774static void __net_exit ipmr_net_exit(struct net *net)
2775{
f6bb4514 2776#ifdef CONFIG_PROC_FS
ece31ffd
G
2777 remove_proc_entry("ip_mr_cache", net->proc_net);
2778 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2779#endif
f0ad0860 2780 ipmr_rules_exit(net);
cf958ae3
BT
2781}
2782
2783static struct pernet_operations ipmr_net_ops = {
2784 .init = ipmr_net_init,
2785 .exit = ipmr_net_exit,
2786};
e905a9ed 2787
03d2f897 2788int __init ip_mr_init(void)
1da177e4 2789{
03d2f897
WC
2790 int err;
2791
1da177e4
LT
2792 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2793 sizeof(struct mfc_cache),
a8c9486b 2794 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
20c2df83 2795 NULL);
03d2f897 2796
cf958ae3
BT
2797 err = register_pernet_subsys(&ipmr_net_ops);
2798 if (err)
2799 goto reg_pernet_fail;
2800
03d2f897
WC
2801 err = register_netdevice_notifier(&ip_mr_notifier);
2802 if (err)
2803 goto reg_notif_fail;
403dbb97
TG
2804#ifdef CONFIG_IP_PIMSM_V2
2805 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
058bd4d2 2806 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
2807 err = -EAGAIN;
2808 goto add_proto_fail;
2809 }
2810#endif
c7ac8679
GR
2811 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2812 NULL, ipmr_rtm_dumproute, NULL);
ccbb0aa6
NA
2813 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2814 ipmr_rtm_route, NULL, NULL);
2815 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2816 ipmr_rtm_route, NULL, NULL);
03d2f897 2817 return 0;
f6bb4514 2818
403dbb97
TG
2819#ifdef CONFIG_IP_PIMSM_V2
2820add_proto_fail:
2821 unregister_netdevice_notifier(&ip_mr_notifier);
2822#endif
c3e38896 2823reg_notif_fail:
cf958ae3
BT
2824 unregister_pernet_subsys(&ipmr_net_ops);
2825reg_pernet_fail:
c3e38896 2826 kmem_cache_destroy(mrt_cachep);
03d2f897 2827 return err;
1da177e4 2828}