net: Embed hh_cache inside of struct neighbour.
[linux-2.6-block.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/mm.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
94 #include <net/dst.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
108 #ifdef CONFIG_SYSCTL
109 #include <linux/sysctl.h>
110 #endif
111 #include <net/atmclip.h>
112
113 #define RT_FL_TOS(oldflp4) \
114     ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115
116 #define IP_MAX_MTU      0xFFF0
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_max_size;
121 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
122 static int ip_rt_gc_interval __read_mostly      = 60 * HZ;
123 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
124 static int ip_rt_redirect_number __read_mostly  = 9;
125 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly       = HZ;
128 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
129 static int ip_rt_gc_elasticity __read_mostly    = 8;
130 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
131 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
132 static int ip_rt_min_advmss __read_mostly       = 256;
133 static int rt_chain_length_max __read_mostly    = 20;
134
135 /*
136  *      Interface to generic destination cache.
137  */
138
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int      ipv4_default_mtu(const struct dst_entry *dst);
142 static void              ipv4_dst_destroy(struct dst_entry *dst);
143 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144 static void              ipv4_link_failure(struct sk_buff *skb);
145 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
146 static int rt_garbage_collect(struct dst_ops *ops);
147
148 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149                             int how)
150 {
151 }
152
153 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154 {
155         struct rtable *rt = (struct rtable *) dst;
156         struct inet_peer *peer;
157         u32 *p = NULL;
158
159         if (!rt->peer)
160                 rt_bind_peer(rt, rt->rt_dst, 1);
161
162         peer = rt->peer;
163         if (peer) {
164                 u32 *old_p = __DST_METRICS_PTR(old);
165                 unsigned long prev, new;
166
167                 p = peer->metrics;
168                 if (inet_metrics_new(peer))
169                         memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
170
171                 new = (unsigned long) p;
172                 prev = cmpxchg(&dst->_metrics, old, new);
173
174                 if (prev != old) {
175                         p = __DST_METRICS_PTR(prev);
176                         if (prev & DST_METRICS_READ_ONLY)
177                                 p = NULL;
178                 } else {
179                         if (rt->fi) {
180                                 fib_info_put(rt->fi);
181                                 rt->fi = NULL;
182                         }
183                 }
184         }
185         return p;
186 }
187
188 static struct dst_ops ipv4_dst_ops = {
189         .family =               AF_INET,
190         .protocol =             cpu_to_be16(ETH_P_IP),
191         .gc =                   rt_garbage_collect,
192         .check =                ipv4_dst_check,
193         .default_advmss =       ipv4_default_advmss,
194         .default_mtu =          ipv4_default_mtu,
195         .cow_metrics =          ipv4_cow_metrics,
196         .destroy =              ipv4_dst_destroy,
197         .ifdown =               ipv4_dst_ifdown,
198         .negative_advice =      ipv4_negative_advice,
199         .link_failure =         ipv4_link_failure,
200         .update_pmtu =          ip_rt_update_pmtu,
201         .local_out =            __ip_local_out,
202 };
203
204 #define ECN_OR_COST(class)      TC_PRIO_##class
205
206 const __u8 ip_tos2prio[16] = {
207         TC_PRIO_BESTEFFORT,
208         ECN_OR_COST(BESTEFFORT),
209         TC_PRIO_BESTEFFORT,
210         ECN_OR_COST(BESTEFFORT),
211         TC_PRIO_BULK,
212         ECN_OR_COST(BULK),
213         TC_PRIO_BULK,
214         ECN_OR_COST(BULK),
215         TC_PRIO_INTERACTIVE,
216         ECN_OR_COST(INTERACTIVE),
217         TC_PRIO_INTERACTIVE,
218         ECN_OR_COST(INTERACTIVE),
219         TC_PRIO_INTERACTIVE_BULK,
220         ECN_OR_COST(INTERACTIVE_BULK),
221         TC_PRIO_INTERACTIVE_BULK,
222         ECN_OR_COST(INTERACTIVE_BULK)
223 };
224
225
226 /*
227  * Route cache.
228  */
229
230 /* The locking scheme is rather straight forward:
231  *
232  * 1) Read-Copy Update protects the buckets of the central route hash.
233  * 2) Only writers remove entries, and they hold the lock
234  *    as they look at rtable reference counts.
235  * 3) Only readers acquire references to rtable entries,
236  *    they do so with atomic increments and with the
237  *    lock held.
238  */
239
240 struct rt_hash_bucket {
241         struct rtable __rcu     *chain;
242 };
243
244 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
245         defined(CONFIG_PROVE_LOCKING)
246 /*
247  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
248  * The size of this table is a power of two and depends on the number of CPUS.
249  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
250  */
251 #ifdef CONFIG_LOCKDEP
252 # define RT_HASH_LOCK_SZ        256
253 #else
254 # if NR_CPUS >= 32
255 #  define RT_HASH_LOCK_SZ       4096
256 # elif NR_CPUS >= 16
257 #  define RT_HASH_LOCK_SZ       2048
258 # elif NR_CPUS >= 8
259 #  define RT_HASH_LOCK_SZ       1024
260 # elif NR_CPUS >= 4
261 #  define RT_HASH_LOCK_SZ       512
262 # else
263 #  define RT_HASH_LOCK_SZ       256
264 # endif
265 #endif
266
267 static spinlock_t       *rt_hash_locks;
268 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
269
270 static __init void rt_hash_lock_init(void)
271 {
272         int i;
273
274         rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
275                         GFP_KERNEL);
276         if (!rt_hash_locks)
277                 panic("IP: failed to allocate rt_hash_locks\n");
278
279         for (i = 0; i < RT_HASH_LOCK_SZ; i++)
280                 spin_lock_init(&rt_hash_locks[i]);
281 }
282 #else
283 # define rt_hash_lock_addr(slot) NULL
284
285 static inline void rt_hash_lock_init(void)
286 {
287 }
288 #endif
289
290 static struct rt_hash_bucket    *rt_hash_table __read_mostly;
291 static unsigned                 rt_hash_mask __read_mostly;
292 static unsigned int             rt_hash_log  __read_mostly;
293
294 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
295 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
296
297 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
298                                    int genid)
299 {
300         return jhash_3words((__force u32)daddr, (__force u32)saddr,
301                             idx, genid)
302                 & rt_hash_mask;
303 }
304
305 static inline int rt_genid(struct net *net)
306 {
307         return atomic_read(&net->ipv4.rt_genid);
308 }
309
310 #ifdef CONFIG_PROC_FS
311 struct rt_cache_iter_state {
312         struct seq_net_private p;
313         int bucket;
314         int genid;
315 };
316
317 static struct rtable *rt_cache_get_first(struct seq_file *seq)
318 {
319         struct rt_cache_iter_state *st = seq->private;
320         struct rtable *r = NULL;
321
322         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
323                 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
324                         continue;
325                 rcu_read_lock_bh();
326                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
327                 while (r) {
328                         if (dev_net(r->dst.dev) == seq_file_net(seq) &&
329                             r->rt_genid == st->genid)
330                                 return r;
331                         r = rcu_dereference_bh(r->dst.rt_next);
332                 }
333                 rcu_read_unlock_bh();
334         }
335         return r;
336 }
337
338 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
339                                           struct rtable *r)
340 {
341         struct rt_cache_iter_state *st = seq->private;
342
343         r = rcu_dereference_bh(r->dst.rt_next);
344         while (!r) {
345                 rcu_read_unlock_bh();
346                 do {
347                         if (--st->bucket < 0)
348                                 return NULL;
349                 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
350                 rcu_read_lock_bh();
351                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
352         }
353         return r;
354 }
355
356 static struct rtable *rt_cache_get_next(struct seq_file *seq,
357                                         struct rtable *r)
358 {
359         struct rt_cache_iter_state *st = seq->private;
360         while ((r = __rt_cache_get_next(seq, r)) != NULL) {
361                 if (dev_net(r->dst.dev) != seq_file_net(seq))
362                         continue;
363                 if (r->rt_genid == st->genid)
364                         break;
365         }
366         return r;
367 }
368
369 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
370 {
371         struct rtable *r = rt_cache_get_first(seq);
372
373         if (r)
374                 while (pos && (r = rt_cache_get_next(seq, r)))
375                         --pos;
376         return pos ? NULL : r;
377 }
378
379 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
380 {
381         struct rt_cache_iter_state *st = seq->private;
382         if (*pos)
383                 return rt_cache_get_idx(seq, *pos - 1);
384         st->genid = rt_genid(seq_file_net(seq));
385         return SEQ_START_TOKEN;
386 }
387
388 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
389 {
390         struct rtable *r;
391
392         if (v == SEQ_START_TOKEN)
393                 r = rt_cache_get_first(seq);
394         else
395                 r = rt_cache_get_next(seq, v);
396         ++*pos;
397         return r;
398 }
399
400 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
401 {
402         if (v && v != SEQ_START_TOKEN)
403                 rcu_read_unlock_bh();
404 }
405
406 static int rt_cache_seq_show(struct seq_file *seq, void *v)
407 {
408         if (v == SEQ_START_TOKEN)
409                 seq_printf(seq, "%-127s\n",
410                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
411                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
412                            "HHUptod\tSpecDst");
413         else {
414                 struct rtable *r = v;
415                 int len;
416
417                 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
418                               "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
419                         r->dst.dev ? r->dst.dev->name : "*",
420                         (__force u32)r->rt_dst,
421                         (__force u32)r->rt_gateway,
422                         r->rt_flags, atomic_read(&r->dst.__refcnt),
423                         r->dst.__use, 0, (__force u32)r->rt_src,
424                         dst_metric_advmss(&r->dst) + 40,
425                         dst_metric(&r->dst, RTAX_WINDOW),
426                         (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
427                               dst_metric(&r->dst, RTAX_RTTVAR)),
428                         r->rt_key_tos,
429                         -1,
430                         (r->dst.neighbour ?
431                          (r->dst.neighbour->hh.hh_output ==
432                           dev_queue_xmit) : 0),
433                         r->rt_spec_dst, &len);
434
435                 seq_printf(seq, "%*s\n", 127 - len, "");
436         }
437         return 0;
438 }
439
440 static const struct seq_operations rt_cache_seq_ops = {
441         .start  = rt_cache_seq_start,
442         .next   = rt_cache_seq_next,
443         .stop   = rt_cache_seq_stop,
444         .show   = rt_cache_seq_show,
445 };
446
447 static int rt_cache_seq_open(struct inode *inode, struct file *file)
448 {
449         return seq_open_net(inode, file, &rt_cache_seq_ops,
450                         sizeof(struct rt_cache_iter_state));
451 }
452
453 static const struct file_operations rt_cache_seq_fops = {
454         .owner   = THIS_MODULE,
455         .open    = rt_cache_seq_open,
456         .read    = seq_read,
457         .llseek  = seq_lseek,
458         .release = seq_release_net,
459 };
460
461
462 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
463 {
464         int cpu;
465
466         if (*pos == 0)
467                 return SEQ_START_TOKEN;
468
469         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
470                 if (!cpu_possible(cpu))
471                         continue;
472                 *pos = cpu+1;
473                 return &per_cpu(rt_cache_stat, cpu);
474         }
475         return NULL;
476 }
477
478 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
479 {
480         int cpu;
481
482         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
483                 if (!cpu_possible(cpu))
484                         continue;
485                 *pos = cpu+1;
486                 return &per_cpu(rt_cache_stat, cpu);
487         }
488         return NULL;
489
490 }
491
492 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
493 {
494
495 }
496
497 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
498 {
499         struct rt_cache_stat *st = v;
500
501         if (v == SEQ_START_TOKEN) {
502                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
503                 return 0;
504         }
505
506         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
507                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
508                    dst_entries_get_slow(&ipv4_dst_ops),
509                    st->in_hit,
510                    st->in_slow_tot,
511                    st->in_slow_mc,
512                    st->in_no_route,
513                    st->in_brd,
514                    st->in_martian_dst,
515                    st->in_martian_src,
516
517                    st->out_hit,
518                    st->out_slow_tot,
519                    st->out_slow_mc,
520
521                    st->gc_total,
522                    st->gc_ignored,
523                    st->gc_goal_miss,
524                    st->gc_dst_overflow,
525                    st->in_hlist_search,
526                    st->out_hlist_search
527                 );
528         return 0;
529 }
530
531 static const struct seq_operations rt_cpu_seq_ops = {
532         .start  = rt_cpu_seq_start,
533         .next   = rt_cpu_seq_next,
534         .stop   = rt_cpu_seq_stop,
535         .show   = rt_cpu_seq_show,
536 };
537
538
539 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
540 {
541         return seq_open(file, &rt_cpu_seq_ops);
542 }
543
544 static const struct file_operations rt_cpu_seq_fops = {
545         .owner   = THIS_MODULE,
546         .open    = rt_cpu_seq_open,
547         .read    = seq_read,
548         .llseek  = seq_lseek,
549         .release = seq_release,
550 };
551
552 #ifdef CONFIG_IP_ROUTE_CLASSID
553 static int rt_acct_proc_show(struct seq_file *m, void *v)
554 {
555         struct ip_rt_acct *dst, *src;
556         unsigned int i, j;
557
558         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
559         if (!dst)
560                 return -ENOMEM;
561
562         for_each_possible_cpu(i) {
563                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
564                 for (j = 0; j < 256; j++) {
565                         dst[j].o_bytes   += src[j].o_bytes;
566                         dst[j].o_packets += src[j].o_packets;
567                         dst[j].i_bytes   += src[j].i_bytes;
568                         dst[j].i_packets += src[j].i_packets;
569                 }
570         }
571
572         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
573         kfree(dst);
574         return 0;
575 }
576
577 static int rt_acct_proc_open(struct inode *inode, struct file *file)
578 {
579         return single_open(file, rt_acct_proc_show, NULL);
580 }
581
582 static const struct file_operations rt_acct_proc_fops = {
583         .owner          = THIS_MODULE,
584         .open           = rt_acct_proc_open,
585         .read           = seq_read,
586         .llseek         = seq_lseek,
587         .release        = single_release,
588 };
589 #endif
590
591 static int __net_init ip_rt_do_proc_init(struct net *net)
592 {
593         struct proc_dir_entry *pde;
594
595         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
596                         &rt_cache_seq_fops);
597         if (!pde)
598                 goto err1;
599
600         pde = proc_create("rt_cache", S_IRUGO,
601                           net->proc_net_stat, &rt_cpu_seq_fops);
602         if (!pde)
603                 goto err2;
604
605 #ifdef CONFIG_IP_ROUTE_CLASSID
606         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
607         if (!pde)
608                 goto err3;
609 #endif
610         return 0;
611
612 #ifdef CONFIG_IP_ROUTE_CLASSID
613 err3:
614         remove_proc_entry("rt_cache", net->proc_net_stat);
615 #endif
616 err2:
617         remove_proc_entry("rt_cache", net->proc_net);
618 err1:
619         return -ENOMEM;
620 }
621
622 static void __net_exit ip_rt_do_proc_exit(struct net *net)
623 {
624         remove_proc_entry("rt_cache", net->proc_net_stat);
625         remove_proc_entry("rt_cache", net->proc_net);
626 #ifdef CONFIG_IP_ROUTE_CLASSID
627         remove_proc_entry("rt_acct", net->proc_net);
628 #endif
629 }
630
631 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
632         .init = ip_rt_do_proc_init,
633         .exit = ip_rt_do_proc_exit,
634 };
635
636 static int __init ip_rt_proc_init(void)
637 {
638         return register_pernet_subsys(&ip_rt_proc_ops);
639 }
640
641 #else
642 static inline int ip_rt_proc_init(void)
643 {
644         return 0;
645 }
646 #endif /* CONFIG_PROC_FS */
647
648 static inline void rt_free(struct rtable *rt)
649 {
650         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
651 }
652
653 static inline void rt_drop(struct rtable *rt)
654 {
655         ip_rt_put(rt);
656         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
657 }
658
659 static inline int rt_fast_clean(struct rtable *rth)
660 {
661         /* Kill broadcast/multicast entries very aggresively, if they
662            collide in hash table with more useful entries */
663         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
664                 rt_is_input_route(rth) && rth->dst.rt_next;
665 }
666
667 static inline int rt_valuable(struct rtable *rth)
668 {
669         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
670                 (rth->peer && rth->peer->pmtu_expires);
671 }
672
673 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
674 {
675         unsigned long age;
676         int ret = 0;
677
678         if (atomic_read(&rth->dst.__refcnt))
679                 goto out;
680
681         age = jiffies - rth->dst.lastuse;
682         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
683             (age <= tmo2 && rt_valuable(rth)))
684                 goto out;
685         ret = 1;
686 out:    return ret;
687 }
688
689 /* Bits of score are:
690  * 31: very valuable
691  * 30: not quite useless
692  * 29..0: usage counter
693  */
694 static inline u32 rt_score(struct rtable *rt)
695 {
696         u32 score = jiffies - rt->dst.lastuse;
697
698         score = ~score & ~(3<<30);
699
700         if (rt_valuable(rt))
701                 score |= (1<<31);
702
703         if (rt_is_output_route(rt) ||
704             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
705                 score |= (1<<30);
706
707         return score;
708 }
709
710 static inline bool rt_caching(const struct net *net)
711 {
712         return net->ipv4.current_rt_cache_rebuild_count <=
713                 net->ipv4.sysctl_rt_cache_rebuild_count;
714 }
715
716 static inline bool compare_hash_inputs(const struct rtable *rt1,
717                                        const struct rtable *rt2)
718 {
719         return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
720                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
721                 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
722 }
723
724 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
725 {
726         return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
727                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
728                 (rt1->rt_mark ^ rt2->rt_mark) |
729                 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
730                 (rt1->rt_oif ^ rt2->rt_oif) |
731                 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
732 }
733
734 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
735 {
736         return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
737 }
738
739 static inline int rt_is_expired(struct rtable *rth)
740 {
741         return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
742 }
743
744 /*
745  * Perform a full scan of hash table and free all entries.
746  * Can be called by a softirq or a process.
747  * In the later case, we want to be reschedule if necessary
748  */
749 static void rt_do_flush(struct net *net, int process_context)
750 {
751         unsigned int i;
752         struct rtable *rth, *next;
753
754         for (i = 0; i <= rt_hash_mask; i++) {
755                 struct rtable __rcu **pprev;
756                 struct rtable *list;
757
758                 if (process_context && need_resched())
759                         cond_resched();
760                 rth = rcu_dereference_raw(rt_hash_table[i].chain);
761                 if (!rth)
762                         continue;
763
764                 spin_lock_bh(rt_hash_lock_addr(i));
765
766                 list = NULL;
767                 pprev = &rt_hash_table[i].chain;
768                 rth = rcu_dereference_protected(*pprev,
769                         lockdep_is_held(rt_hash_lock_addr(i)));
770
771                 while (rth) {
772                         next = rcu_dereference_protected(rth->dst.rt_next,
773                                 lockdep_is_held(rt_hash_lock_addr(i)));
774
775                         if (!net ||
776                             net_eq(dev_net(rth->dst.dev), net)) {
777                                 rcu_assign_pointer(*pprev, next);
778                                 rcu_assign_pointer(rth->dst.rt_next, list);
779                                 list = rth;
780                         } else {
781                                 pprev = &rth->dst.rt_next;
782                         }
783                         rth = next;
784                 }
785
786                 spin_unlock_bh(rt_hash_lock_addr(i));
787
788                 for (; list; list = next) {
789                         next = rcu_dereference_protected(list->dst.rt_next, 1);
790                         rt_free(list);
791                 }
792         }
793 }
794
795 /*
796  * While freeing expired entries, we compute average chain length
797  * and standard deviation, using fixed-point arithmetic.
798  * This to have an estimation of rt_chain_length_max
799  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
800  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
801  */
802
803 #define FRACT_BITS 3
804 #define ONE (1UL << FRACT_BITS)
805
806 /*
807  * Given a hash chain and an item in this hash chain,
808  * find if a previous entry has the same hash_inputs
809  * (but differs on tos, mark or oif)
810  * Returns 0 if an alias is found.
811  * Returns ONE if rth has no alias before itself.
812  */
813 static int has_noalias(const struct rtable *head, const struct rtable *rth)
814 {
815         const struct rtable *aux = head;
816
817         while (aux != rth) {
818                 if (compare_hash_inputs(aux, rth))
819                         return 0;
820                 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
821         }
822         return ONE;
823 }
824
825 /*
826  * Perturbation of rt_genid by a small quantity [1..256]
827  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
828  * many times (2^24) without giving recent rt_genid.
829  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
830  */
831 static void rt_cache_invalidate(struct net *net)
832 {
833         unsigned char shuffle;
834
835         get_random_bytes(&shuffle, sizeof(shuffle));
836         atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
837 }
838
839 /*
840  * delay < 0  : invalidate cache (fast : entries will be deleted later)
841  * delay >= 0 : invalidate & flush cache (can be long)
842  */
843 void rt_cache_flush(struct net *net, int delay)
844 {
845         rt_cache_invalidate(net);
846         if (delay >= 0)
847                 rt_do_flush(net, !in_softirq());
848 }
849
850 /* Flush previous cache invalidated entries from the cache */
851 void rt_cache_flush_batch(struct net *net)
852 {
853         rt_do_flush(net, !in_softirq());
854 }
855
856 static void rt_emergency_hash_rebuild(struct net *net)
857 {
858         if (net_ratelimit())
859                 printk(KERN_WARNING "Route hash chain too long!\n");
860         rt_cache_invalidate(net);
861 }
862
863 /*
864    Short description of GC goals.
865
866    We want to build algorithm, which will keep routing cache
867    at some equilibrium point, when number of aged off entries
868    is kept approximately equal to newly generated ones.
869
870    Current expiration strength is variable "expire".
871    We try to adjust it dynamically, so that if networking
872    is idle expires is large enough to keep enough of warm entries,
873    and when load increases it reduces to limit cache size.
874  */
875
876 static int rt_garbage_collect(struct dst_ops *ops)
877 {
878         static unsigned long expire = RT_GC_TIMEOUT;
879         static unsigned long last_gc;
880         static int rover;
881         static int equilibrium;
882         struct rtable *rth;
883         struct rtable __rcu **rthp;
884         unsigned long now = jiffies;
885         int goal;
886         int entries = dst_entries_get_fast(&ipv4_dst_ops);
887
888         /*
889          * Garbage collection is pretty expensive,
890          * do not make it too frequently.
891          */
892
893         RT_CACHE_STAT_INC(gc_total);
894
895         if (now - last_gc < ip_rt_gc_min_interval &&
896             entries < ip_rt_max_size) {
897                 RT_CACHE_STAT_INC(gc_ignored);
898                 goto out;
899         }
900
901         entries = dst_entries_get_slow(&ipv4_dst_ops);
902         /* Calculate number of entries, which we want to expire now. */
903         goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
904         if (goal <= 0) {
905                 if (equilibrium < ipv4_dst_ops.gc_thresh)
906                         equilibrium = ipv4_dst_ops.gc_thresh;
907                 goal = entries - equilibrium;
908                 if (goal > 0) {
909                         equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
910                         goal = entries - equilibrium;
911                 }
912         } else {
913                 /* We are in dangerous area. Try to reduce cache really
914                  * aggressively.
915                  */
916                 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
917                 equilibrium = entries - goal;
918         }
919
920         if (now - last_gc >= ip_rt_gc_min_interval)
921                 last_gc = now;
922
923         if (goal <= 0) {
924                 equilibrium += goal;
925                 goto work_done;
926         }
927
928         do {
929                 int i, k;
930
931                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
932                         unsigned long tmo = expire;
933
934                         k = (k + 1) & rt_hash_mask;
935                         rthp = &rt_hash_table[k].chain;
936                         spin_lock_bh(rt_hash_lock_addr(k));
937                         while ((rth = rcu_dereference_protected(*rthp,
938                                         lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
939                                 if (!rt_is_expired(rth) &&
940                                         !rt_may_expire(rth, tmo, expire)) {
941                                         tmo >>= 1;
942                                         rthp = &rth->dst.rt_next;
943                                         continue;
944                                 }
945                                 *rthp = rth->dst.rt_next;
946                                 rt_free(rth);
947                                 goal--;
948                         }
949                         spin_unlock_bh(rt_hash_lock_addr(k));
950                         if (goal <= 0)
951                                 break;
952                 }
953                 rover = k;
954
955                 if (goal <= 0)
956                         goto work_done;
957
958                 /* Goal is not achieved. We stop process if:
959
960                    - if expire reduced to zero. Otherwise, expire is halfed.
961                    - if table is not full.
962                    - if we are called from interrupt.
963                    - jiffies check is just fallback/debug loop breaker.
964                      We will not spin here for long time in any case.
965                  */
966
967                 RT_CACHE_STAT_INC(gc_goal_miss);
968
969                 if (expire == 0)
970                         break;
971
972                 expire >>= 1;
973
974                 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
975                         goto out;
976         } while (!in_softirq() && time_before_eq(jiffies, now));
977
978         if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
979                 goto out;
980         if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
981                 goto out;
982         if (net_ratelimit())
983                 printk(KERN_WARNING "dst cache overflow\n");
984         RT_CACHE_STAT_INC(gc_dst_overflow);
985         return 1;
986
987 work_done:
988         expire += ip_rt_gc_min_interval;
989         if (expire > ip_rt_gc_timeout ||
990             dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
991             dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
992                 expire = ip_rt_gc_timeout;
993 out:    return 0;
994 }
995
996 /*
997  * Returns number of entries in a hash chain that have different hash_inputs
998  */
999 static int slow_chain_length(const struct rtable *head)
1000 {
1001         int length = 0;
1002         const struct rtable *rth = head;
1003
1004         while (rth) {
1005                 length += has_noalias(head, rth);
1006                 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1007         }
1008         return length >> FRACT_BITS;
1009 }
1010
1011 static int rt_bind_neighbour(struct rtable *rt)
1012 {
1013         static const __be32 inaddr_any = 0;
1014         struct net_device *dev = rt->dst.dev;
1015         struct neigh_table *tbl = &arp_tbl;
1016         const __be32 *nexthop;
1017         struct neighbour *n;
1018
1019 #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1020         if (dev->type == ARPHRD_ATM)
1021                 tbl = clip_tbl_hook;
1022 #endif
1023         nexthop = &rt->rt_gateway;
1024         if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1025                 nexthop = &inaddr_any;
1026         n = ipv4_neigh_lookup(tbl, dev, nexthop);
1027         if (IS_ERR(n))
1028                 return PTR_ERR(n);
1029         rt->dst.neighbour = n;
1030
1031         return 0;
1032 }
1033
1034 static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1035                                      struct sk_buff *skb, int ifindex)
1036 {
1037         struct rtable   *rth, *cand;
1038         struct rtable __rcu **rthp, **candp;
1039         unsigned long   now;
1040         u32             min_score;
1041         int             chain_length;
1042         int attempts = !in_softirq();
1043
1044 restart:
1045         chain_length = 0;
1046         min_score = ~(u32)0;
1047         cand = NULL;
1048         candp = NULL;
1049         now = jiffies;
1050
1051         if (!rt_caching(dev_net(rt->dst.dev))) {
1052                 /*
1053                  * If we're not caching, just tell the caller we
1054                  * were successful and don't touch the route.  The
1055                  * caller hold the sole reference to the cache entry, and
1056                  * it will be released when the caller is done with it.
1057                  * If we drop it here, the callers have no way to resolve routes
1058                  * when we're not caching.  Instead, just point *rp at rt, so
1059                  * the caller gets a single use out of the route
1060                  * Note that we do rt_free on this new route entry, so that
1061                  * once its refcount hits zero, we are still able to reap it
1062                  * (Thanks Alexey)
1063                  * Note: To avoid expensive rcu stuff for this uncached dst,
1064                  * we set DST_NOCACHE so that dst_release() can free dst without
1065                  * waiting a grace period.
1066                  */
1067
1068                 rt->dst.flags |= DST_NOCACHE;
1069                 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1070                         int err = rt_bind_neighbour(rt);
1071                         if (err) {
1072                                 if (net_ratelimit())
1073                                         printk(KERN_WARNING
1074                                             "Neighbour table failure & not caching routes.\n");
1075                                 ip_rt_put(rt);
1076                                 return ERR_PTR(err);
1077                         }
1078                 }
1079
1080                 goto skip_hashing;
1081         }
1082
1083         rthp = &rt_hash_table[hash].chain;
1084
1085         spin_lock_bh(rt_hash_lock_addr(hash));
1086         while ((rth = rcu_dereference_protected(*rthp,
1087                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1088                 if (rt_is_expired(rth)) {
1089                         *rthp = rth->dst.rt_next;
1090                         rt_free(rth);
1091                         continue;
1092                 }
1093                 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1094                         /* Put it first */
1095                         *rthp = rth->dst.rt_next;
1096                         /*
1097                          * Since lookup is lockfree, the deletion
1098                          * must be visible to another weakly ordered CPU before
1099                          * the insertion at the start of the hash chain.
1100                          */
1101                         rcu_assign_pointer(rth->dst.rt_next,
1102                                            rt_hash_table[hash].chain);
1103                         /*
1104                          * Since lookup is lockfree, the update writes
1105                          * must be ordered for consistency on SMP.
1106                          */
1107                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1108
1109                         dst_use(&rth->dst, now);
1110                         spin_unlock_bh(rt_hash_lock_addr(hash));
1111
1112                         rt_drop(rt);
1113                         if (skb)
1114                                 skb_dst_set(skb, &rth->dst);
1115                         return rth;
1116                 }
1117
1118                 if (!atomic_read(&rth->dst.__refcnt)) {
1119                         u32 score = rt_score(rth);
1120
1121                         if (score <= min_score) {
1122                                 cand = rth;
1123                                 candp = rthp;
1124                                 min_score = score;
1125                         }
1126                 }
1127
1128                 chain_length++;
1129
1130                 rthp = &rth->dst.rt_next;
1131         }
1132
1133         if (cand) {
1134                 /* ip_rt_gc_elasticity used to be average length of chain
1135                  * length, when exceeded gc becomes really aggressive.
1136                  *
1137                  * The second limit is less certain. At the moment it allows
1138                  * only 2 entries per bucket. We will see.
1139                  */
1140                 if (chain_length > ip_rt_gc_elasticity) {
1141                         *candp = cand->dst.rt_next;
1142                         rt_free(cand);
1143                 }
1144         } else {
1145                 if (chain_length > rt_chain_length_max &&
1146                     slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1147                         struct net *net = dev_net(rt->dst.dev);
1148                         int num = ++net->ipv4.current_rt_cache_rebuild_count;
1149                         if (!rt_caching(net)) {
1150                                 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1151                                         rt->dst.dev->name, num);
1152                         }
1153                         rt_emergency_hash_rebuild(net);
1154                         spin_unlock_bh(rt_hash_lock_addr(hash));
1155
1156                         hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1157                                         ifindex, rt_genid(net));
1158                         goto restart;
1159                 }
1160         }
1161
1162         /* Try to bind route to arp only if it is output
1163            route or unicast forwarding path.
1164          */
1165         if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1166                 int err = rt_bind_neighbour(rt);
1167                 if (err) {
1168                         spin_unlock_bh(rt_hash_lock_addr(hash));
1169
1170                         if (err != -ENOBUFS) {
1171                                 rt_drop(rt);
1172                                 return ERR_PTR(err);
1173                         }
1174
1175                         /* Neighbour tables are full and nothing
1176                            can be released. Try to shrink route cache,
1177                            it is most likely it holds some neighbour records.
1178                          */
1179                         if (attempts-- > 0) {
1180                                 int saved_elasticity = ip_rt_gc_elasticity;
1181                                 int saved_int = ip_rt_gc_min_interval;
1182                                 ip_rt_gc_elasticity     = 1;
1183                                 ip_rt_gc_min_interval   = 0;
1184                                 rt_garbage_collect(&ipv4_dst_ops);
1185                                 ip_rt_gc_min_interval   = saved_int;
1186                                 ip_rt_gc_elasticity     = saved_elasticity;
1187                                 goto restart;
1188                         }
1189
1190                         if (net_ratelimit())
1191                                 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1192                         rt_drop(rt);
1193                         return ERR_PTR(-ENOBUFS);
1194                 }
1195         }
1196
1197         rt->dst.rt_next = rt_hash_table[hash].chain;
1198
1199         /*
1200          * Since lookup is lockfree, we must make sure
1201          * previous writes to rt are committed to memory
1202          * before making rt visible to other CPUS.
1203          */
1204         rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1205
1206         spin_unlock_bh(rt_hash_lock_addr(hash));
1207
1208 skip_hashing:
1209         if (skb)
1210                 skb_dst_set(skb, &rt->dst);
1211         return rt;
1212 }
1213
1214 static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1215
1216 static u32 rt_peer_genid(void)
1217 {
1218         return atomic_read(&__rt_peer_genid);
1219 }
1220
1221 void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1222 {
1223         struct inet_peer *peer;
1224
1225         peer = inet_getpeer_v4(daddr, create);
1226
1227         if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1228                 inet_putpeer(peer);
1229         else
1230                 rt->rt_peer_genid = rt_peer_genid();
1231 }
1232
1233 /*
1234  * Peer allocation may fail only in serious out-of-memory conditions.  However
1235  * we still can generate some output.
1236  * Random ID selection looks a bit dangerous because we have no chances to
1237  * select ID being unique in a reasonable period of time.
1238  * But broken packet identifier may be better than no packet at all.
1239  */
1240 static void ip_select_fb_ident(struct iphdr *iph)
1241 {
1242         static DEFINE_SPINLOCK(ip_fb_id_lock);
1243         static u32 ip_fallback_id;
1244         u32 salt;
1245
1246         spin_lock_bh(&ip_fb_id_lock);
1247         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1248         iph->id = htons(salt & 0xFFFF);
1249         ip_fallback_id = salt;
1250         spin_unlock_bh(&ip_fb_id_lock);
1251 }
1252
1253 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1254 {
1255         struct rtable *rt = (struct rtable *) dst;
1256
1257         if (rt) {
1258                 if (rt->peer == NULL)
1259                         rt_bind_peer(rt, rt->rt_dst, 1);
1260
1261                 /* If peer is attached to destination, it is never detached,
1262                    so that we need not to grab a lock to dereference it.
1263                  */
1264                 if (rt->peer) {
1265                         iph->id = htons(inet_getid(rt->peer, more));
1266                         return;
1267                 }
1268         } else
1269                 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1270                        __builtin_return_address(0));
1271
1272         ip_select_fb_ident(iph);
1273 }
1274 EXPORT_SYMBOL(__ip_select_ident);
1275
1276 static void rt_del(unsigned hash, struct rtable *rt)
1277 {
1278         struct rtable __rcu **rthp;
1279         struct rtable *aux;
1280
1281         rthp = &rt_hash_table[hash].chain;
1282         spin_lock_bh(rt_hash_lock_addr(hash));
1283         ip_rt_put(rt);
1284         while ((aux = rcu_dereference_protected(*rthp,
1285                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1286                 if (aux == rt || rt_is_expired(aux)) {
1287                         *rthp = aux->dst.rt_next;
1288                         rt_free(aux);
1289                         continue;
1290                 }
1291                 rthp = &aux->dst.rt_next;
1292         }
1293         spin_unlock_bh(rt_hash_lock_addr(hash));
1294 }
1295
1296 /* called in rcu_read_lock() section */
1297 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1298                     __be32 saddr, struct net_device *dev)
1299 {
1300         struct in_device *in_dev = __in_dev_get_rcu(dev);
1301         struct inet_peer *peer;
1302         struct net *net;
1303
1304         if (!in_dev)
1305                 return;
1306
1307         net = dev_net(dev);
1308         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1309             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1310             ipv4_is_zeronet(new_gw))
1311                 goto reject_redirect;
1312
1313         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1314                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1315                         goto reject_redirect;
1316                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1317                         goto reject_redirect;
1318         } else {
1319                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1320                         goto reject_redirect;
1321         }
1322
1323         peer = inet_getpeer_v4(daddr, 1);
1324         if (peer) {
1325                 peer->redirect_learned.a4 = new_gw;
1326
1327                 inet_putpeer(peer);
1328
1329                 atomic_inc(&__rt_peer_genid);
1330         }
1331         return;
1332
1333 reject_redirect:
1334 #ifdef CONFIG_IP_ROUTE_VERBOSE
1335         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1336                 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1337                         "  Advised path = %pI4 -> %pI4\n",
1338                        &old_gw, dev->name, &new_gw,
1339                        &saddr, &daddr);
1340 #endif
1341         ;
1342 }
1343
1344 static bool peer_pmtu_expired(struct inet_peer *peer)
1345 {
1346         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1347
1348         return orig &&
1349                time_after_eq(jiffies, orig) &&
1350                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1351 }
1352
1353 static bool peer_pmtu_cleaned(struct inet_peer *peer)
1354 {
1355         unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1356
1357         return orig &&
1358                cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1359 }
1360
1361 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1362 {
1363         struct rtable *rt = (struct rtable *)dst;
1364         struct dst_entry *ret = dst;
1365
1366         if (rt) {
1367                 if (dst->obsolete > 0) {
1368                         ip_rt_put(rt);
1369                         ret = NULL;
1370                 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1371                         unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1372                                                 rt->rt_oif,
1373                                                 rt_genid(dev_net(dst->dev)));
1374                         rt_del(hash, rt);
1375                         ret = NULL;
1376                 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1377                         dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1378                 }
1379         }
1380         return ret;
1381 }
1382
1383 /*
1384  * Algorithm:
1385  *      1. The first ip_rt_redirect_number redirects are sent
1386  *         with exponential backoff, then we stop sending them at all,
1387  *         assuming that the host ignores our redirects.
1388  *      2. If we did not see packets requiring redirects
1389  *         during ip_rt_redirect_silence, we assume that the host
1390  *         forgot redirected route and start to send redirects again.
1391  *
1392  * This algorithm is much cheaper and more intelligent than dumb load limiting
1393  * in icmp.c.
1394  *
1395  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1396  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1397  */
1398
1399 void ip_rt_send_redirect(struct sk_buff *skb)
1400 {
1401         struct rtable *rt = skb_rtable(skb);
1402         struct in_device *in_dev;
1403         struct inet_peer *peer;
1404         int log_martians;
1405
1406         rcu_read_lock();
1407         in_dev = __in_dev_get_rcu(rt->dst.dev);
1408         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1409                 rcu_read_unlock();
1410                 return;
1411         }
1412         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1413         rcu_read_unlock();
1414
1415         if (!rt->peer)
1416                 rt_bind_peer(rt, rt->rt_dst, 1);
1417         peer = rt->peer;
1418         if (!peer) {
1419                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1420                 return;
1421         }
1422
1423         /* No redirected packets during ip_rt_redirect_silence;
1424          * reset the algorithm.
1425          */
1426         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1427                 peer->rate_tokens = 0;
1428
1429         /* Too many ignored redirects; do not send anything
1430          * set dst.rate_last to the last seen redirected packet.
1431          */
1432         if (peer->rate_tokens >= ip_rt_redirect_number) {
1433                 peer->rate_last = jiffies;
1434                 return;
1435         }
1436
1437         /* Check for load limit; set rate_last to the latest sent
1438          * redirect.
1439          */
1440         if (peer->rate_tokens == 0 ||
1441             time_after(jiffies,
1442                        (peer->rate_last +
1443                         (ip_rt_redirect_load << peer->rate_tokens)))) {
1444                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1445                 peer->rate_last = jiffies;
1446                 ++peer->rate_tokens;
1447 #ifdef CONFIG_IP_ROUTE_VERBOSE
1448                 if (log_martians &&
1449                     peer->rate_tokens == ip_rt_redirect_number &&
1450                     net_ratelimit())
1451                         printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1452                                &ip_hdr(skb)->saddr, rt->rt_iif,
1453                                 &rt->rt_dst, &rt->rt_gateway);
1454 #endif
1455         }
1456 }
1457
1458 static int ip_error(struct sk_buff *skb)
1459 {
1460         struct rtable *rt = skb_rtable(skb);
1461         struct inet_peer *peer;
1462         unsigned long now;
1463         bool send;
1464         int code;
1465
1466         switch (rt->dst.error) {
1467         case EINVAL:
1468         default:
1469                 goto out;
1470         case EHOSTUNREACH:
1471                 code = ICMP_HOST_UNREACH;
1472                 break;
1473         case ENETUNREACH:
1474                 code = ICMP_NET_UNREACH;
1475                 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1476                                 IPSTATS_MIB_INNOROUTES);
1477                 break;
1478         case EACCES:
1479                 code = ICMP_PKT_FILTERED;
1480                 break;
1481         }
1482
1483         if (!rt->peer)
1484                 rt_bind_peer(rt, rt->rt_dst, 1);
1485         peer = rt->peer;
1486
1487         send = true;
1488         if (peer) {
1489                 now = jiffies;
1490                 peer->rate_tokens += now - peer->rate_last;
1491                 if (peer->rate_tokens > ip_rt_error_burst)
1492                         peer->rate_tokens = ip_rt_error_burst;
1493                 peer->rate_last = now;
1494                 if (peer->rate_tokens >= ip_rt_error_cost)
1495                         peer->rate_tokens -= ip_rt_error_cost;
1496                 else
1497                         send = false;
1498         }
1499         if (send)
1500                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1501
1502 out:    kfree_skb(skb);
1503         return 0;
1504 }
1505
1506 /*
1507  *      The last two values are not from the RFC but
1508  *      are needed for AMPRnet AX.25 paths.
1509  */
1510
1511 static const unsigned short mtu_plateau[] =
1512 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1513
1514 static inline unsigned short guess_mtu(unsigned short old_mtu)
1515 {
1516         int i;
1517
1518         for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1519                 if (old_mtu > mtu_plateau[i])
1520                         return mtu_plateau[i];
1521         return 68;
1522 }
1523
1524 unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1525                                  unsigned short new_mtu,
1526                                  struct net_device *dev)
1527 {
1528         unsigned short old_mtu = ntohs(iph->tot_len);
1529         unsigned short est_mtu = 0;
1530         struct inet_peer *peer;
1531
1532         peer = inet_getpeer_v4(iph->daddr, 1);
1533         if (peer) {
1534                 unsigned short mtu = new_mtu;
1535
1536                 if (new_mtu < 68 || new_mtu >= old_mtu) {
1537                         /* BSD 4.2 derived systems incorrectly adjust
1538                          * tot_len by the IP header length, and report
1539                          * a zero MTU in the ICMP message.
1540                          */
1541                         if (mtu == 0 &&
1542                             old_mtu >= 68 + (iph->ihl << 2))
1543                                 old_mtu -= iph->ihl << 2;
1544                         mtu = guess_mtu(old_mtu);
1545                 }
1546
1547                 if (mtu < ip_rt_min_pmtu)
1548                         mtu = ip_rt_min_pmtu;
1549                 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1550                         unsigned long pmtu_expires;
1551
1552                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1553                         if (!pmtu_expires)
1554                                 pmtu_expires = 1UL;
1555
1556                         est_mtu = mtu;
1557                         peer->pmtu_learned = mtu;
1558                         peer->pmtu_expires = pmtu_expires;
1559                 }
1560
1561                 inet_putpeer(peer);
1562
1563                 atomic_inc(&__rt_peer_genid);
1564         }
1565         return est_mtu ? : new_mtu;
1566 }
1567
1568 static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1569 {
1570         unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1571
1572         if (!expires)
1573                 return;
1574         if (time_before(jiffies, expires)) {
1575                 u32 orig_dst_mtu = dst_mtu(dst);
1576                 if (peer->pmtu_learned < orig_dst_mtu) {
1577                         if (!peer->pmtu_orig)
1578                                 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1579                         dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1580                 }
1581         } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1582                 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1583 }
1584
1585 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1586 {
1587         struct rtable *rt = (struct rtable *) dst;
1588         struct inet_peer *peer;
1589
1590         dst_confirm(dst);
1591
1592         if (!rt->peer)
1593                 rt_bind_peer(rt, rt->rt_dst, 1);
1594         peer = rt->peer;
1595         if (peer) {
1596                 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1597
1598                 if (mtu < ip_rt_min_pmtu)
1599                         mtu = ip_rt_min_pmtu;
1600                 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1601
1602                         pmtu_expires = jiffies + ip_rt_mtu_expires;
1603                         if (!pmtu_expires)
1604                                 pmtu_expires = 1UL;
1605
1606                         peer->pmtu_learned = mtu;
1607                         peer->pmtu_expires = pmtu_expires;
1608
1609                         atomic_inc(&__rt_peer_genid);
1610                         rt->rt_peer_genid = rt_peer_genid();
1611                 }
1612                 check_peer_pmtu(dst, peer);
1613         }
1614 }
1615
1616 static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1617 {
1618         struct rtable *rt = (struct rtable *) dst;
1619         __be32 orig_gw = rt->rt_gateway;
1620
1621         dst_confirm(&rt->dst);
1622
1623         neigh_release(rt->dst.neighbour);
1624         rt->dst.neighbour = NULL;
1625
1626         rt->rt_gateway = peer->redirect_learned.a4;
1627         if (rt_bind_neighbour(rt) ||
1628             !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1629                 if (rt->dst.neighbour)
1630                         neigh_event_send(rt->dst.neighbour, NULL);
1631                 rt->rt_gateway = orig_gw;
1632                 return -EAGAIN;
1633         } else {
1634                 rt->rt_flags |= RTCF_REDIRECTED;
1635                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1636                                         rt->dst.neighbour);
1637         }
1638         return 0;
1639 }
1640
1641 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1642 {
1643         struct rtable *rt = (struct rtable *) dst;
1644
1645         if (rt_is_expired(rt))
1646                 return NULL;
1647         if (rt->rt_peer_genid != rt_peer_genid()) {
1648                 struct inet_peer *peer;
1649
1650                 if (!rt->peer)
1651                         rt_bind_peer(rt, rt->rt_dst, 0);
1652
1653                 peer = rt->peer;
1654                 if (peer) {
1655                         check_peer_pmtu(dst, peer);
1656
1657                         if (peer->redirect_learned.a4 &&
1658                             peer->redirect_learned.a4 != rt->rt_gateway) {
1659                                 if (check_peer_redir(dst, peer))
1660                                         return NULL;
1661                         }
1662                 }
1663
1664                 rt->rt_peer_genid = rt_peer_genid();
1665         }
1666         return dst;
1667 }
1668
1669 static void ipv4_dst_destroy(struct dst_entry *dst)
1670 {
1671         struct rtable *rt = (struct rtable *) dst;
1672         struct inet_peer *peer = rt->peer;
1673
1674         if (rt->fi) {
1675                 fib_info_put(rt->fi);
1676                 rt->fi = NULL;
1677         }
1678         if (peer) {
1679                 rt->peer = NULL;
1680                 inet_putpeer(peer);
1681         }
1682 }
1683
1684
1685 static void ipv4_link_failure(struct sk_buff *skb)
1686 {
1687         struct rtable *rt;
1688
1689         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1690
1691         rt = skb_rtable(skb);
1692         if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1693                 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1694 }
1695
1696 static int ip_rt_bug(struct sk_buff *skb)
1697 {
1698         printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1699                 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1700                 skb->dev ? skb->dev->name : "?");
1701         kfree_skb(skb);
1702         WARN_ON(1);
1703         return 0;
1704 }
1705
1706 /*
1707    We do not cache source address of outgoing interface,
1708    because it is used only by IP RR, TS and SRR options,
1709    so that it out of fast path.
1710
1711    BTW remember: "addr" is allowed to be not aligned
1712    in IP options!
1713  */
1714
1715 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1716 {
1717         __be32 src;
1718
1719         if (rt_is_output_route(rt))
1720                 src = ip_hdr(skb)->saddr;
1721         else {
1722                 struct fib_result res;
1723                 struct flowi4 fl4;
1724                 struct iphdr *iph;
1725
1726                 iph = ip_hdr(skb);
1727
1728                 memset(&fl4, 0, sizeof(fl4));
1729                 fl4.daddr = iph->daddr;
1730                 fl4.saddr = iph->saddr;
1731                 fl4.flowi4_tos = iph->tos;
1732                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1733                 fl4.flowi4_iif = skb->dev->ifindex;
1734                 fl4.flowi4_mark = skb->mark;
1735
1736                 rcu_read_lock();
1737                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1738                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1739                 else
1740                         src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1741                                         RT_SCOPE_UNIVERSE);
1742                 rcu_read_unlock();
1743         }
1744         memcpy(addr, &src, 4);
1745 }
1746
1747 #ifdef CONFIG_IP_ROUTE_CLASSID
1748 static void set_class_tag(struct rtable *rt, u32 tag)
1749 {
1750         if (!(rt->dst.tclassid & 0xFFFF))
1751                 rt->dst.tclassid |= tag & 0xFFFF;
1752         if (!(rt->dst.tclassid & 0xFFFF0000))
1753                 rt->dst.tclassid |= tag & 0xFFFF0000;
1754 }
1755 #endif
1756
1757 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1758 {
1759         unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1760
1761         if (advmss == 0) {
1762                 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1763                                ip_rt_min_advmss);
1764                 if (advmss > 65535 - 40)
1765                         advmss = 65535 - 40;
1766         }
1767         return advmss;
1768 }
1769
1770 static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1771 {
1772         unsigned int mtu = dst->dev->mtu;
1773
1774         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1775                 const struct rtable *rt = (const struct rtable *) dst;
1776
1777                 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1778                         mtu = 576;
1779         }
1780
1781         if (mtu > IP_MAX_MTU)
1782                 mtu = IP_MAX_MTU;
1783
1784         return mtu;
1785 }
1786
1787 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1788                             struct fib_info *fi)
1789 {
1790         struct inet_peer *peer;
1791         int create = 0;
1792
1793         /* If a peer entry exists for this destination, we must hook
1794          * it up in order to get at cached metrics.
1795          */
1796         if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1797                 create = 1;
1798
1799         rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1800         if (peer) {
1801                 rt->rt_peer_genid = rt_peer_genid();
1802                 if (inet_metrics_new(peer))
1803                         memcpy(peer->metrics, fi->fib_metrics,
1804                                sizeof(u32) * RTAX_MAX);
1805                 dst_init_metrics(&rt->dst, peer->metrics, false);
1806
1807                 check_peer_pmtu(&rt->dst, peer);
1808                 if (peer->redirect_learned.a4 &&
1809                     peer->redirect_learned.a4 != rt->rt_gateway) {
1810                         rt->rt_gateway = peer->redirect_learned.a4;
1811                         rt->rt_flags |= RTCF_REDIRECTED;
1812                 }
1813         } else {
1814                 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1815                         rt->fi = fi;
1816                         atomic_inc(&fi->fib_clntref);
1817                 }
1818                 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1819         }
1820 }
1821
1822 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1823                            const struct fib_result *res,
1824                            struct fib_info *fi, u16 type, u32 itag)
1825 {
1826         struct dst_entry *dst = &rt->dst;
1827
1828         if (fi) {
1829                 if (FIB_RES_GW(*res) &&
1830                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1831                         rt->rt_gateway = FIB_RES_GW(*res);
1832                 rt_init_metrics(rt, fl4, fi);
1833 #ifdef CONFIG_IP_ROUTE_CLASSID
1834                 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1835 #endif
1836         }
1837
1838         if (dst_mtu(dst) > IP_MAX_MTU)
1839                 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1840         if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1841                 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1842
1843 #ifdef CONFIG_IP_ROUTE_CLASSID
1844 #ifdef CONFIG_IP_MULTIPLE_TABLES
1845         set_class_tag(rt, fib_rules_tclass(res));
1846 #endif
1847         set_class_tag(rt, itag);
1848 #endif
1849 }
1850
1851 static struct rtable *rt_dst_alloc(struct net_device *dev,
1852                                    bool nopolicy, bool noxfrm)
1853 {
1854         return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1855                          DST_HOST |
1856                          (nopolicy ? DST_NOPOLICY : 0) |
1857                          (noxfrm ? DST_NOXFRM : 0));
1858 }
1859
1860 /* called in rcu_read_lock() section */
1861 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1862                                 u8 tos, struct net_device *dev, int our)
1863 {
1864         unsigned int hash;
1865         struct rtable *rth;
1866         __be32 spec_dst;
1867         struct in_device *in_dev = __in_dev_get_rcu(dev);
1868         u32 itag = 0;
1869         int err;
1870
1871         /* Primary sanity checks. */
1872
1873         if (in_dev == NULL)
1874                 return -EINVAL;
1875
1876         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1877             ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1878                 goto e_inval;
1879
1880         if (ipv4_is_zeronet(saddr)) {
1881                 if (!ipv4_is_local_multicast(daddr))
1882                         goto e_inval;
1883                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1884         } else {
1885                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1886                                           &itag);
1887                 if (err < 0)
1888                         goto e_err;
1889         }
1890         rth = rt_dst_alloc(init_net.loopback_dev,
1891                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1892         if (!rth)
1893                 goto e_nobufs;
1894
1895 #ifdef CONFIG_IP_ROUTE_CLASSID
1896         rth->dst.tclassid = itag;
1897 #endif
1898         rth->dst.output = ip_rt_bug;
1899
1900         rth->rt_key_dst = daddr;
1901         rth->rt_key_src = saddr;
1902         rth->rt_genid   = rt_genid(dev_net(dev));
1903         rth->rt_flags   = RTCF_MULTICAST;
1904         rth->rt_type    = RTN_MULTICAST;
1905         rth->rt_key_tos = tos;
1906         rth->rt_dst     = daddr;
1907         rth->rt_src     = saddr;
1908         rth->rt_route_iif = dev->ifindex;
1909         rth->rt_iif     = dev->ifindex;
1910         rth->rt_oif     = 0;
1911         rth->rt_mark    = skb->mark;
1912         rth->rt_gateway = daddr;
1913         rth->rt_spec_dst= spec_dst;
1914         rth->rt_peer_genid = 0;
1915         rth->peer = NULL;
1916         rth->fi = NULL;
1917         if (our) {
1918                 rth->dst.input= ip_local_deliver;
1919                 rth->rt_flags |= RTCF_LOCAL;
1920         }
1921
1922 #ifdef CONFIG_IP_MROUTE
1923         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1924                 rth->dst.input = ip_mr_input;
1925 #endif
1926         RT_CACHE_STAT_INC(in_slow_mc);
1927
1928         hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1929         rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1930         return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1931
1932 e_nobufs:
1933         return -ENOBUFS;
1934 e_inval:
1935         return -EINVAL;
1936 e_err:
1937         return err;
1938 }
1939
1940
1941 static void ip_handle_martian_source(struct net_device *dev,
1942                                      struct in_device *in_dev,
1943                                      struct sk_buff *skb,
1944                                      __be32 daddr,
1945                                      __be32 saddr)
1946 {
1947         RT_CACHE_STAT_INC(in_martian_src);
1948 #ifdef CONFIG_IP_ROUTE_VERBOSE
1949         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1950                 /*
1951                  *      RFC1812 recommendation, if source is martian,
1952                  *      the only hint is MAC header.
1953                  */
1954                 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1955                         &daddr, &saddr, dev->name);
1956                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1957                         int i;
1958                         const unsigned char *p = skb_mac_header(skb);
1959                         printk(KERN_WARNING "ll header: ");
1960                         for (i = 0; i < dev->hard_header_len; i++, p++) {
1961                                 printk("%02x", *p);
1962                                 if (i < (dev->hard_header_len - 1))
1963                                         printk(":");
1964                         }
1965                         printk("\n");
1966                 }
1967         }
1968 #endif
1969 }
1970
1971 /* called in rcu_read_lock() section */
1972 static int __mkroute_input(struct sk_buff *skb,
1973                            const struct fib_result *res,
1974                            struct in_device *in_dev,
1975                            __be32 daddr, __be32 saddr, u32 tos,
1976                            struct rtable **result)
1977 {
1978         struct rtable *rth;
1979         int err;
1980         struct in_device *out_dev;
1981         unsigned int flags = 0;
1982         __be32 spec_dst;
1983         u32 itag;
1984
1985         /* get a working reference to the output device */
1986         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1987         if (out_dev == NULL) {
1988                 if (net_ratelimit())
1989                         printk(KERN_CRIT "Bug in ip_route_input" \
1990                                "_slow(). Please, report\n");
1991                 return -EINVAL;
1992         }
1993
1994
1995         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1996                                   in_dev->dev, &spec_dst, &itag);
1997         if (err < 0) {
1998                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1999                                          saddr);
2000
2001                 goto cleanup;
2002         }
2003
2004         if (err)
2005                 flags |= RTCF_DIRECTSRC;
2006
2007         if (out_dev == in_dev && err &&
2008             (IN_DEV_SHARED_MEDIA(out_dev) ||
2009              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2010                 flags |= RTCF_DOREDIRECT;
2011
2012         if (skb->protocol != htons(ETH_P_IP)) {
2013                 /* Not IP (i.e. ARP). Do not create route, if it is
2014                  * invalid for proxy arp. DNAT routes are always valid.
2015                  *
2016                  * Proxy arp feature have been extended to allow, ARP
2017                  * replies back to the same interface, to support
2018                  * Private VLAN switch technologies. See arp.c.
2019                  */
2020                 if (out_dev == in_dev &&
2021                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2022                         err = -EINVAL;
2023                         goto cleanup;
2024                 }
2025         }
2026
2027         rth = rt_dst_alloc(out_dev->dev,
2028                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2029                            IN_DEV_CONF_GET(out_dev, NOXFRM));
2030         if (!rth) {
2031                 err = -ENOBUFS;
2032                 goto cleanup;
2033         }
2034
2035         rth->rt_key_dst = daddr;
2036         rth->rt_key_src = saddr;
2037         rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2038         rth->rt_flags = flags;
2039         rth->rt_type = res->type;
2040         rth->rt_key_tos = tos;
2041         rth->rt_dst     = daddr;
2042         rth->rt_src     = saddr;
2043         rth->rt_route_iif = in_dev->dev->ifindex;
2044         rth->rt_iif     = in_dev->dev->ifindex;
2045         rth->rt_oif     = 0;
2046         rth->rt_mark    = skb->mark;
2047         rth->rt_gateway = daddr;
2048         rth->rt_spec_dst= spec_dst;
2049         rth->rt_peer_genid = 0;
2050         rth->peer = NULL;
2051         rth->fi = NULL;
2052
2053         rth->dst.input = ip_forward;
2054         rth->dst.output = ip_output;
2055
2056         rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2057
2058         *result = rth;
2059         err = 0;
2060  cleanup:
2061         return err;
2062 }
2063
2064 static int ip_mkroute_input(struct sk_buff *skb,
2065                             struct fib_result *res,
2066                             const struct flowi4 *fl4,
2067                             struct in_device *in_dev,
2068                             __be32 daddr, __be32 saddr, u32 tos)
2069 {
2070         struct rtable* rth = NULL;
2071         int err;
2072         unsigned hash;
2073
2074 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2075         if (res->fi && res->fi->fib_nhs > 1)
2076                 fib_select_multipath(res);
2077 #endif
2078
2079         /* create a routing cache entry */
2080         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2081         if (err)
2082                 return err;
2083
2084         /* put it into the cache */
2085         hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2086                        rt_genid(dev_net(rth->dst.dev)));
2087         rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2088         if (IS_ERR(rth))
2089                 return PTR_ERR(rth);
2090         return 0;
2091 }
2092
2093 /*
2094  *      NOTE. We drop all the packets that has local source
2095  *      addresses, because every properly looped back packet
2096  *      must have correct destination already attached by output routine.
2097  *
2098  *      Such approach solves two big problems:
2099  *      1. Not simplex devices are handled properly.
2100  *      2. IP spoofing attempts are filtered with 100% of guarantee.
2101  *      called with rcu_read_lock()
2102  */
2103
2104 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2105                                u8 tos, struct net_device *dev)
2106 {
2107         struct fib_result res;
2108         struct in_device *in_dev = __in_dev_get_rcu(dev);
2109         struct flowi4   fl4;
2110         unsigned        flags = 0;
2111         u32             itag = 0;
2112         struct rtable * rth;
2113         unsigned        hash;
2114         __be32          spec_dst;
2115         int             err = -EINVAL;
2116         struct net    * net = dev_net(dev);
2117
2118         /* IP on this device is disabled. */
2119
2120         if (!in_dev)
2121                 goto out;
2122
2123         /* Check for the most weird martians, which can be not detected
2124            by fib_lookup.
2125          */
2126
2127         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2128             ipv4_is_loopback(saddr))
2129                 goto martian_source;
2130
2131         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2132                 goto brd_input;
2133
2134         /* Accept zero addresses only to limited broadcast;
2135          * I even do not know to fix it or not. Waiting for complains :-)
2136          */
2137         if (ipv4_is_zeronet(saddr))
2138                 goto martian_source;
2139
2140         if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2141                 goto martian_destination;
2142
2143         /*
2144          *      Now we are ready to route packet.
2145          */
2146         fl4.flowi4_oif = 0;
2147         fl4.flowi4_iif = dev->ifindex;
2148         fl4.flowi4_mark = skb->mark;
2149         fl4.flowi4_tos = tos;
2150         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2151         fl4.daddr = daddr;
2152         fl4.saddr = saddr;
2153         err = fib_lookup(net, &fl4, &res);
2154         if (err != 0) {
2155                 if (!IN_DEV_FORWARD(in_dev))
2156                         goto e_hostunreach;
2157                 goto no_route;
2158         }
2159
2160         RT_CACHE_STAT_INC(in_slow_tot);
2161
2162         if (res.type == RTN_BROADCAST)
2163                 goto brd_input;
2164
2165         if (res.type == RTN_LOCAL) {
2166                 err = fib_validate_source(skb, saddr, daddr, tos,
2167                                           net->loopback_dev->ifindex,
2168                                           dev, &spec_dst, &itag);
2169                 if (err < 0)
2170                         goto martian_source_keep_err;
2171                 if (err)
2172                         flags |= RTCF_DIRECTSRC;
2173                 spec_dst = daddr;
2174                 goto local_input;
2175         }
2176
2177         if (!IN_DEV_FORWARD(in_dev))
2178                 goto e_hostunreach;
2179         if (res.type != RTN_UNICAST)
2180                 goto martian_destination;
2181
2182         err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
2183 out:    return err;
2184
2185 brd_input:
2186         if (skb->protocol != htons(ETH_P_IP))
2187                 goto e_inval;
2188
2189         if (ipv4_is_zeronet(saddr))
2190                 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2191         else {
2192                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2193                                           &itag);
2194                 if (err < 0)
2195                         goto martian_source_keep_err;
2196                 if (err)
2197                         flags |= RTCF_DIRECTSRC;
2198         }
2199         flags |= RTCF_BROADCAST;
2200         res.type = RTN_BROADCAST;
2201         RT_CACHE_STAT_INC(in_brd);
2202
2203 local_input:
2204         rth = rt_dst_alloc(net->loopback_dev,
2205                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2206         if (!rth)
2207                 goto e_nobufs;
2208
2209         rth->dst.input= ip_local_deliver;
2210         rth->dst.output= ip_rt_bug;
2211 #ifdef CONFIG_IP_ROUTE_CLASSID
2212         rth->dst.tclassid = itag;
2213 #endif
2214
2215         rth->rt_key_dst = daddr;
2216         rth->rt_key_src = saddr;
2217         rth->rt_genid = rt_genid(net);
2218         rth->rt_flags   = flags|RTCF_LOCAL;
2219         rth->rt_type    = res.type;
2220         rth->rt_key_tos = tos;
2221         rth->rt_dst     = daddr;
2222         rth->rt_src     = saddr;
2223 #ifdef CONFIG_IP_ROUTE_CLASSID
2224         rth->dst.tclassid = itag;
2225 #endif
2226         rth->rt_route_iif = dev->ifindex;
2227         rth->rt_iif     = dev->ifindex;
2228         rth->rt_oif     = 0;
2229         rth->rt_mark    = skb->mark;
2230         rth->rt_gateway = daddr;
2231         rth->rt_spec_dst= spec_dst;
2232         rth->rt_peer_genid = 0;
2233         rth->peer = NULL;
2234         rth->fi = NULL;
2235         if (res.type == RTN_UNREACHABLE) {
2236                 rth->dst.input= ip_error;
2237                 rth->dst.error= -err;
2238                 rth->rt_flags   &= ~RTCF_LOCAL;
2239         }
2240         hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2241         rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2242         err = 0;
2243         if (IS_ERR(rth))
2244                 err = PTR_ERR(rth);
2245         goto out;
2246
2247 no_route:
2248         RT_CACHE_STAT_INC(in_no_route);
2249         spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2250         res.type = RTN_UNREACHABLE;
2251         if (err == -ESRCH)
2252                 err = -ENETUNREACH;
2253         goto local_input;
2254
2255         /*
2256          *      Do not cache martian addresses: they should be logged (RFC1812)
2257          */
2258 martian_destination:
2259         RT_CACHE_STAT_INC(in_martian_dst);
2260 #ifdef CONFIG_IP_ROUTE_VERBOSE
2261         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2262                 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2263                         &daddr, &saddr, dev->name);
2264 #endif
2265
2266 e_hostunreach:
2267         err = -EHOSTUNREACH;
2268         goto out;
2269
2270 e_inval:
2271         err = -EINVAL;
2272         goto out;
2273
2274 e_nobufs:
2275         err = -ENOBUFS;
2276         goto out;
2277
2278 martian_source:
2279         err = -EINVAL;
2280 martian_source_keep_err:
2281         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2282         goto out;
2283 }
2284
2285 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2286                            u8 tos, struct net_device *dev, bool noref)
2287 {
2288         struct rtable * rth;
2289         unsigned        hash;
2290         int iif = dev->ifindex;
2291         struct net *net;
2292         int res;
2293
2294         net = dev_net(dev);
2295
2296         rcu_read_lock();
2297
2298         if (!rt_caching(net))
2299                 goto skip_cache;
2300
2301         tos &= IPTOS_RT_MASK;
2302         hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2303
2304         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2305              rth = rcu_dereference(rth->dst.rt_next)) {
2306                 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2307                      ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2308                      (rth->rt_iif ^ iif) |
2309                      rth->rt_oif |
2310                      (rth->rt_key_tos ^ tos)) == 0 &&
2311                     rth->rt_mark == skb->mark &&
2312                     net_eq(dev_net(rth->dst.dev), net) &&
2313                     !rt_is_expired(rth)) {
2314                         if (noref) {
2315                                 dst_use_noref(&rth->dst, jiffies);
2316                                 skb_dst_set_noref(skb, &rth->dst);
2317                         } else {
2318                                 dst_use(&rth->dst, jiffies);
2319                                 skb_dst_set(skb, &rth->dst);
2320                         }
2321                         RT_CACHE_STAT_INC(in_hit);
2322                         rcu_read_unlock();
2323                         return 0;
2324                 }
2325                 RT_CACHE_STAT_INC(in_hlist_search);
2326         }
2327
2328 skip_cache:
2329         /* Multicast recognition logic is moved from route cache to here.
2330            The problem was that too many Ethernet cards have broken/missing
2331            hardware multicast filters :-( As result the host on multicasting
2332            network acquires a lot of useless route cache entries, sort of
2333            SDR messages from all the world. Now we try to get rid of them.
2334            Really, provided software IP multicast filter is organized
2335            reasonably (at least, hashed), it does not result in a slowdown
2336            comparing with route cache reject entries.
2337            Note, that multicast routers are not affected, because
2338            route cache entry is created eventually.
2339          */
2340         if (ipv4_is_multicast(daddr)) {
2341                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2342
2343                 if (in_dev) {
2344                         int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2345                                                   ip_hdr(skb)->protocol);
2346                         if (our
2347 #ifdef CONFIG_IP_MROUTE
2348                                 ||
2349                             (!ipv4_is_local_multicast(daddr) &&
2350                              IN_DEV_MFORWARD(in_dev))
2351 #endif
2352                            ) {
2353                                 int res = ip_route_input_mc(skb, daddr, saddr,
2354                                                             tos, dev, our);
2355                                 rcu_read_unlock();
2356                                 return res;
2357                         }
2358                 }
2359                 rcu_read_unlock();
2360                 return -EINVAL;
2361         }
2362         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2363         rcu_read_unlock();
2364         return res;
2365 }
2366 EXPORT_SYMBOL(ip_route_input_common);
2367
2368 /* called with rcu_read_lock() */
2369 static struct rtable *__mkroute_output(const struct fib_result *res,
2370                                        const struct flowi4 *fl4,
2371                                        __be32 orig_daddr, __be32 orig_saddr,
2372                                        int orig_oif, struct net_device *dev_out,
2373                                        unsigned int flags)
2374 {
2375         struct fib_info *fi = res->fi;
2376         u32 tos = RT_FL_TOS(fl4);
2377         struct in_device *in_dev;
2378         u16 type = res->type;
2379         struct rtable *rth;
2380
2381         if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2382                 return ERR_PTR(-EINVAL);
2383
2384         if (ipv4_is_lbcast(fl4->daddr))
2385                 type = RTN_BROADCAST;
2386         else if (ipv4_is_multicast(fl4->daddr))
2387                 type = RTN_MULTICAST;
2388         else if (ipv4_is_zeronet(fl4->daddr))
2389                 return ERR_PTR(-EINVAL);
2390
2391         if (dev_out->flags & IFF_LOOPBACK)
2392                 flags |= RTCF_LOCAL;
2393
2394         in_dev = __in_dev_get_rcu(dev_out);
2395         if (!in_dev)
2396                 return ERR_PTR(-EINVAL);
2397
2398         if (type == RTN_BROADCAST) {
2399                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2400                 fi = NULL;
2401         } else if (type == RTN_MULTICAST) {
2402                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2403                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2404                                      fl4->flowi4_proto))
2405                         flags &= ~RTCF_LOCAL;
2406                 /* If multicast route do not exist use
2407                  * default one, but do not gateway in this case.
2408                  * Yes, it is hack.
2409                  */
2410                 if (fi && res->prefixlen < 4)
2411                         fi = NULL;
2412         }
2413
2414         rth = rt_dst_alloc(dev_out,
2415                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2416                            IN_DEV_CONF_GET(in_dev, NOXFRM));
2417         if (!rth)
2418                 return ERR_PTR(-ENOBUFS);
2419
2420         rth->dst.output = ip_output;
2421
2422         rth->rt_key_dst = orig_daddr;
2423         rth->rt_key_src = orig_saddr;
2424         rth->rt_genid = rt_genid(dev_net(dev_out));
2425         rth->rt_flags   = flags;
2426         rth->rt_type    = type;
2427         rth->rt_key_tos = tos;
2428         rth->rt_dst     = fl4->daddr;
2429         rth->rt_src     = fl4->saddr;
2430         rth->rt_route_iif = 0;
2431         rth->rt_iif     = orig_oif ? : dev_out->ifindex;
2432         rth->rt_oif     = orig_oif;
2433         rth->rt_mark    = fl4->flowi4_mark;
2434         rth->rt_gateway = fl4->daddr;
2435         rth->rt_spec_dst= fl4->saddr;
2436         rth->rt_peer_genid = 0;
2437         rth->peer = NULL;
2438         rth->fi = NULL;
2439
2440         RT_CACHE_STAT_INC(out_slow_tot);
2441
2442         if (flags & RTCF_LOCAL) {
2443                 rth->dst.input = ip_local_deliver;
2444                 rth->rt_spec_dst = fl4->daddr;
2445         }
2446         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2447                 rth->rt_spec_dst = fl4->saddr;
2448                 if (flags & RTCF_LOCAL &&
2449                     !(dev_out->flags & IFF_LOOPBACK)) {
2450                         rth->dst.output = ip_mc_output;
2451                         RT_CACHE_STAT_INC(out_slow_mc);
2452                 }
2453 #ifdef CONFIG_IP_MROUTE
2454                 if (type == RTN_MULTICAST) {
2455                         if (IN_DEV_MFORWARD(in_dev) &&
2456                             !ipv4_is_local_multicast(fl4->daddr)) {
2457                                 rth->dst.input = ip_mr_input;
2458                                 rth->dst.output = ip_mc_output;
2459                         }
2460                 }
2461 #endif
2462         }
2463
2464         rt_set_nexthop(rth, fl4, res, fi, type, 0);
2465
2466         return rth;
2467 }
2468
2469 /*
2470  * Major route resolver routine.
2471  * called with rcu_read_lock();
2472  */
2473
2474 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2475 {
2476         struct net_device *dev_out = NULL;
2477         u32 tos = RT_FL_TOS(fl4);
2478         unsigned int flags = 0;
2479         struct fib_result res;
2480         struct rtable *rth;
2481         __be32 orig_daddr;
2482         __be32 orig_saddr;
2483         int orig_oif;
2484
2485         res.fi          = NULL;
2486 #ifdef CONFIG_IP_MULTIPLE_TABLES
2487         res.r           = NULL;
2488 #endif
2489
2490         orig_daddr = fl4->daddr;
2491         orig_saddr = fl4->saddr;
2492         orig_oif = fl4->flowi4_oif;
2493
2494         fl4->flowi4_iif = net->loopback_dev->ifindex;
2495         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2496         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2497                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2498
2499         rcu_read_lock();
2500         if (fl4->saddr) {
2501                 rth = ERR_PTR(-EINVAL);
2502                 if (ipv4_is_multicast(fl4->saddr) ||
2503                     ipv4_is_lbcast(fl4->saddr) ||
2504                     ipv4_is_zeronet(fl4->saddr))
2505                         goto out;
2506
2507                 /* I removed check for oif == dev_out->oif here.
2508                    It was wrong for two reasons:
2509                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2510                       is assigned to multiple interfaces.
2511                    2. Moreover, we are allowed to send packets with saddr
2512                       of another iface. --ANK
2513                  */
2514
2515                 if (fl4->flowi4_oif == 0 &&
2516                     (ipv4_is_multicast(fl4->daddr) ||
2517                      ipv4_is_lbcast(fl4->daddr))) {
2518                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2519                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2520                         if (dev_out == NULL)
2521                                 goto out;
2522
2523                         /* Special hack: user can direct multicasts
2524                            and limited broadcast via necessary interface
2525                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2526                            This hack is not just for fun, it allows
2527                            vic,vat and friends to work.
2528                            They bind socket to loopback, set ttl to zero
2529                            and expect that it will work.
2530                            From the viewpoint of routing cache they are broken,
2531                            because we are not allowed to build multicast path
2532                            with loopback source addr (look, routing cache
2533                            cannot know, that ttl is zero, so that packet
2534                            will not leave this host and route is valid).
2535                            Luckily, this hack is good workaround.
2536                          */
2537
2538                         fl4->flowi4_oif = dev_out->ifindex;
2539                         goto make_route;
2540                 }
2541
2542                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2543                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2544                         if (!__ip_dev_find(net, fl4->saddr, false))
2545                                 goto out;
2546                 }
2547         }
2548
2549
2550         if (fl4->flowi4_oif) {
2551                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2552                 rth = ERR_PTR(-ENODEV);
2553                 if (dev_out == NULL)
2554                         goto out;
2555
2556                 /* RACE: Check return value of inet_select_addr instead. */
2557                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2558                         rth = ERR_PTR(-ENETUNREACH);
2559                         goto out;
2560                 }
2561                 if (ipv4_is_local_multicast(fl4->daddr) ||
2562                     ipv4_is_lbcast(fl4->daddr)) {
2563                         if (!fl4->saddr)
2564                                 fl4->saddr = inet_select_addr(dev_out, 0,
2565                                                               RT_SCOPE_LINK);
2566                         goto make_route;
2567                 }
2568                 if (fl4->saddr) {
2569                         if (ipv4_is_multicast(fl4->daddr))
2570                                 fl4->saddr = inet_select_addr(dev_out, 0,
2571                                                               fl4->flowi4_scope);
2572                         else if (!fl4->daddr)
2573                                 fl4->saddr = inet_select_addr(dev_out, 0,
2574                                                               RT_SCOPE_HOST);
2575                 }
2576         }
2577
2578         if (!fl4->daddr) {
2579                 fl4->daddr = fl4->saddr;
2580                 if (!fl4->daddr)
2581                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2582                 dev_out = net->loopback_dev;
2583                 fl4->flowi4_oif = net->loopback_dev->ifindex;
2584                 res.type = RTN_LOCAL;
2585                 flags |= RTCF_LOCAL;
2586                 goto make_route;
2587         }
2588
2589         if (fib_lookup(net, fl4, &res)) {
2590                 res.fi = NULL;
2591                 if (fl4->flowi4_oif) {
2592                         /* Apparently, routing tables are wrong. Assume,
2593                            that the destination is on link.
2594
2595                            WHY? DW.
2596                            Because we are allowed to send to iface
2597                            even if it has NO routes and NO assigned
2598                            addresses. When oif is specified, routing
2599                            tables are looked up with only one purpose:
2600                            to catch if destination is gatewayed, rather than
2601                            direct. Moreover, if MSG_DONTROUTE is set,
2602                            we send packet, ignoring both routing tables
2603                            and ifaddr state. --ANK
2604
2605
2606                            We could make it even if oif is unknown,
2607                            likely IPv6, but we do not.
2608                          */
2609
2610                         if (fl4->saddr == 0)
2611                                 fl4->saddr = inet_select_addr(dev_out, 0,
2612                                                               RT_SCOPE_LINK);
2613                         res.type = RTN_UNICAST;
2614                         goto make_route;
2615                 }
2616                 rth = ERR_PTR(-ENETUNREACH);
2617                 goto out;
2618         }
2619
2620         if (res.type == RTN_LOCAL) {
2621                 if (!fl4->saddr) {
2622                         if (res.fi->fib_prefsrc)
2623                                 fl4->saddr = res.fi->fib_prefsrc;
2624                         else
2625                                 fl4->saddr = fl4->daddr;
2626                 }
2627                 dev_out = net->loopback_dev;
2628                 fl4->flowi4_oif = dev_out->ifindex;
2629                 res.fi = NULL;
2630                 flags |= RTCF_LOCAL;
2631                 goto make_route;
2632         }
2633
2634 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2635         if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2636                 fib_select_multipath(&res);
2637         else
2638 #endif
2639         if (!res.prefixlen &&
2640             res.table->tb_num_default > 1 &&
2641             res.type == RTN_UNICAST && !fl4->flowi4_oif)
2642                 fib_select_default(&res);
2643
2644         if (!fl4->saddr)
2645                 fl4->saddr = FIB_RES_PREFSRC(net, res);
2646
2647         dev_out = FIB_RES_DEV(res);
2648         fl4->flowi4_oif = dev_out->ifindex;
2649
2650
2651 make_route:
2652         rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2653                                dev_out, flags);
2654         if (!IS_ERR(rth)) {
2655                 unsigned int hash;
2656
2657                 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2658                                rt_genid(dev_net(dev_out)));
2659                 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2660         }
2661
2662 out:
2663         rcu_read_unlock();
2664         return rth;
2665 }
2666
2667 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2668 {
2669         struct rtable *rth;
2670         unsigned int hash;
2671
2672         if (!rt_caching(net))
2673                 goto slow_output;
2674
2675         hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2676
2677         rcu_read_lock_bh();
2678         for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2679                 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2680                 if (rth->rt_key_dst == flp4->daddr &&
2681                     rth->rt_key_src == flp4->saddr &&
2682                     rt_is_output_route(rth) &&
2683                     rth->rt_oif == flp4->flowi4_oif &&
2684                     rth->rt_mark == flp4->flowi4_mark &&
2685                     !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2686                             (IPTOS_RT_MASK | RTO_ONLINK)) &&
2687                     net_eq(dev_net(rth->dst.dev), net) &&
2688                     !rt_is_expired(rth)) {
2689                         dst_use(&rth->dst, jiffies);
2690                         RT_CACHE_STAT_INC(out_hit);
2691                         rcu_read_unlock_bh();
2692                         if (!flp4->saddr)
2693                                 flp4->saddr = rth->rt_src;
2694                         if (!flp4->daddr)
2695                                 flp4->daddr = rth->rt_dst;
2696                         return rth;
2697                 }
2698                 RT_CACHE_STAT_INC(out_hlist_search);
2699         }
2700         rcu_read_unlock_bh();
2701
2702 slow_output:
2703         return ip_route_output_slow(net, flp4);
2704 }
2705 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2706
2707 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2708 {
2709         return NULL;
2710 }
2711
2712 static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2713 {
2714         return 0;
2715 }
2716
2717 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2718 {
2719 }
2720
2721 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2722                                           unsigned long old)
2723 {
2724         return NULL;
2725 }
2726
2727 static struct dst_ops ipv4_dst_blackhole_ops = {
2728         .family                 =       AF_INET,
2729         .protocol               =       cpu_to_be16(ETH_P_IP),
2730         .destroy                =       ipv4_dst_destroy,
2731         .check                  =       ipv4_blackhole_dst_check,
2732         .default_mtu            =       ipv4_blackhole_default_mtu,
2733         .default_advmss         =       ipv4_default_advmss,
2734         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2735         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2736 };
2737
2738 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2739 {
2740         struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2741         struct rtable *ort = (struct rtable *) dst_orig;
2742
2743         if (rt) {
2744                 struct dst_entry *new = &rt->dst;
2745
2746                 new->__use = 1;
2747                 new->input = dst_discard;
2748                 new->output = dst_discard;
2749                 dst_copy_metrics(new, &ort->dst);
2750
2751                 new->dev = ort->dst.dev;
2752                 if (new->dev)
2753                         dev_hold(new->dev);
2754
2755                 rt->rt_key_dst = ort->rt_key_dst;
2756                 rt->rt_key_src = ort->rt_key_src;
2757                 rt->rt_key_tos = ort->rt_key_tos;
2758                 rt->rt_route_iif = ort->rt_route_iif;
2759                 rt->rt_iif = ort->rt_iif;
2760                 rt->rt_oif = ort->rt_oif;
2761                 rt->rt_mark = ort->rt_mark;
2762
2763                 rt->rt_genid = rt_genid(net);
2764                 rt->rt_flags = ort->rt_flags;
2765                 rt->rt_type = ort->rt_type;
2766                 rt->rt_dst = ort->rt_dst;
2767                 rt->rt_src = ort->rt_src;
2768                 rt->rt_gateway = ort->rt_gateway;
2769                 rt->rt_spec_dst = ort->rt_spec_dst;
2770                 rt->peer = ort->peer;
2771                 if (rt->peer)
2772                         atomic_inc(&rt->peer->refcnt);
2773                 rt->fi = ort->fi;
2774                 if (rt->fi)
2775                         atomic_inc(&rt->fi->fib_clntref);
2776
2777                 dst_free(new);
2778         }
2779
2780         dst_release(dst_orig);
2781
2782         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2783 }
2784
2785 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2786                                     struct sock *sk)
2787 {
2788         struct rtable *rt = __ip_route_output_key(net, flp4);
2789
2790         if (IS_ERR(rt))
2791                 return rt;
2792
2793         if (flp4->flowi4_proto)
2794                 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2795                                                    flowi4_to_flowi(flp4),
2796                                                    sk, 0);
2797
2798         return rt;
2799 }
2800 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2801
2802 static int rt_fill_info(struct net *net,
2803                         struct sk_buff *skb, u32 pid, u32 seq, int event,
2804                         int nowait, unsigned int flags)
2805 {
2806         struct rtable *rt = skb_rtable(skb);
2807         struct rtmsg *r;
2808         struct nlmsghdr *nlh;
2809         long expires = 0;
2810         const struct inet_peer *peer = rt->peer;
2811         u32 id = 0, ts = 0, tsage = 0, error;
2812
2813         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2814         if (nlh == NULL)
2815                 return -EMSGSIZE;
2816
2817         r = nlmsg_data(nlh);
2818         r->rtm_family    = AF_INET;
2819         r->rtm_dst_len  = 32;
2820         r->rtm_src_len  = 0;
2821         r->rtm_tos      = rt->rt_key_tos;
2822         r->rtm_table    = RT_TABLE_MAIN;
2823         NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2824         r->rtm_type     = rt->rt_type;
2825         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2826         r->rtm_protocol = RTPROT_UNSPEC;
2827         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2828         if (rt->rt_flags & RTCF_NOTIFY)
2829                 r->rtm_flags |= RTM_F_NOTIFY;
2830
2831         NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2832
2833         if (rt->rt_key_src) {
2834                 r->rtm_src_len = 32;
2835                 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
2836         }
2837         if (rt->dst.dev)
2838                 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2839 #ifdef CONFIG_IP_ROUTE_CLASSID
2840         if (rt->dst.tclassid)
2841                 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2842 #endif
2843         if (rt_is_input_route(rt))
2844                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2845         else if (rt->rt_src != rt->rt_key_src)
2846                 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2847
2848         if (rt->rt_dst != rt->rt_gateway)
2849                 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2850
2851         if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2852                 goto nla_put_failure;
2853
2854         if (rt->rt_mark)
2855                 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2856
2857         error = rt->dst.error;
2858         if (peer) {
2859                 inet_peer_refcheck(rt->peer);
2860                 id = atomic_read(&peer->ip_id_count) & 0xffff;
2861                 if (peer->tcp_ts_stamp) {
2862                         ts = peer->tcp_ts;
2863                         tsage = get_seconds() - peer->tcp_ts_stamp;
2864                 }
2865                 expires = ACCESS_ONCE(peer->pmtu_expires);
2866                 if (expires)
2867                         expires -= jiffies;
2868         }
2869
2870         if (rt_is_input_route(rt)) {
2871 #ifdef CONFIG_IP_MROUTE
2872                 __be32 dst = rt->rt_dst;
2873
2874                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2875                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2876                         int err = ipmr_get_route(net, skb,
2877                                                  rt->rt_src, rt->rt_dst,
2878                                                  r, nowait);
2879                         if (err <= 0) {
2880                                 if (!nowait) {
2881                                         if (err == 0)
2882                                                 return 0;
2883                                         goto nla_put_failure;
2884                                 } else {
2885                                         if (err == -EMSGSIZE)
2886                                                 goto nla_put_failure;
2887                                         error = err;
2888                                 }
2889                         }
2890                 } else
2891 #endif
2892                         NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
2893         }
2894
2895         if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2896                                expires, error) < 0)
2897                 goto nla_put_failure;
2898
2899         return nlmsg_end(skb, nlh);
2900
2901 nla_put_failure:
2902         nlmsg_cancel(skb, nlh);
2903         return -EMSGSIZE;
2904 }
2905
2906 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2907 {
2908         struct net *net = sock_net(in_skb->sk);
2909         struct rtmsg *rtm;
2910         struct nlattr *tb[RTA_MAX+1];
2911         struct rtable *rt = NULL;
2912         __be32 dst = 0;
2913         __be32 src = 0;
2914         u32 iif;
2915         int err;
2916         int mark;
2917         struct sk_buff *skb;
2918
2919         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2920         if (err < 0)
2921                 goto errout;
2922
2923         rtm = nlmsg_data(nlh);
2924
2925         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2926         if (skb == NULL) {
2927                 err = -ENOBUFS;
2928                 goto errout;
2929         }
2930
2931         /* Reserve room for dummy headers, this skb can pass
2932            through good chunk of routing engine.
2933          */
2934         skb_reset_mac_header(skb);
2935         skb_reset_network_header(skb);
2936
2937         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2938         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2939         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2940
2941         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2942         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2943         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2944         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2945
2946         if (iif) {
2947                 struct net_device *dev;
2948
2949                 dev = __dev_get_by_index(net, iif);
2950                 if (dev == NULL) {
2951                         err = -ENODEV;
2952                         goto errout_free;
2953                 }
2954
2955                 skb->protocol   = htons(ETH_P_IP);
2956                 skb->dev        = dev;
2957                 skb->mark       = mark;
2958                 local_bh_disable();
2959                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2960                 local_bh_enable();
2961
2962                 rt = skb_rtable(skb);
2963                 if (err == 0 && rt->dst.error)
2964                         err = -rt->dst.error;
2965         } else {
2966                 struct flowi4 fl4 = {
2967                         .daddr = dst,
2968                         .saddr = src,
2969                         .flowi4_tos = rtm->rtm_tos,
2970                         .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2971                         .flowi4_mark = mark,
2972                 };
2973                 rt = ip_route_output_key(net, &fl4);
2974
2975                 err = 0;
2976                 if (IS_ERR(rt))
2977                         err = PTR_ERR(rt);
2978         }
2979
2980         if (err)
2981                 goto errout_free;
2982
2983         skb_dst_set(skb, &rt->dst);
2984         if (rtm->rtm_flags & RTM_F_NOTIFY)
2985                 rt->rt_flags |= RTCF_NOTIFY;
2986
2987         err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2988                            RTM_NEWROUTE, 0, 0);
2989         if (err <= 0)
2990                 goto errout_free;
2991
2992         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2993 errout:
2994         return err;
2995
2996 errout_free:
2997         kfree_skb(skb);
2998         goto errout;
2999 }
3000
3001 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
3002 {
3003         struct rtable *rt;
3004         int h, s_h;
3005         int idx, s_idx;
3006         struct net *net;
3007
3008         net = sock_net(skb->sk);
3009
3010         s_h = cb->args[0];
3011         if (s_h < 0)
3012                 s_h = 0;
3013         s_idx = idx = cb->args[1];
3014         for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3015                 if (!rt_hash_table[h].chain)
3016                         continue;
3017                 rcu_read_lock_bh();
3018                 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3019                      rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3020                         if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3021                                 continue;
3022                         if (rt_is_expired(rt))
3023                                 continue;
3024                         skb_dst_set_noref(skb, &rt->dst);
3025                         if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3026                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3027                                          1, NLM_F_MULTI) <= 0) {
3028                                 skb_dst_drop(skb);
3029                                 rcu_read_unlock_bh();
3030                                 goto done;
3031                         }
3032                         skb_dst_drop(skb);
3033                 }
3034                 rcu_read_unlock_bh();
3035         }
3036
3037 done:
3038         cb->args[0] = h;
3039         cb->args[1] = idx;
3040         return skb->len;
3041 }
3042
3043 void ip_rt_multicast_event(struct in_device *in_dev)
3044 {
3045         rt_cache_flush(dev_net(in_dev->dev), 0);
3046 }
3047
3048 #ifdef CONFIG_SYSCTL
3049 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3050                                         void __user *buffer,
3051                                         size_t *lenp, loff_t *ppos)
3052 {
3053         if (write) {
3054                 int flush_delay;
3055                 ctl_table ctl;
3056                 struct net *net;
3057
3058                 memcpy(&ctl, __ctl, sizeof(ctl));
3059                 ctl.data = &flush_delay;
3060                 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3061
3062                 net = (struct net *)__ctl->extra1;
3063                 rt_cache_flush(net, flush_delay);
3064                 return 0;
3065         }
3066
3067         return -EINVAL;
3068 }
3069
3070 static ctl_table ipv4_route_table[] = {
3071         {
3072                 .procname       = "gc_thresh",
3073                 .data           = &ipv4_dst_ops.gc_thresh,
3074                 .maxlen         = sizeof(int),
3075                 .mode           = 0644,
3076                 .proc_handler   = proc_dointvec,
3077         },
3078         {
3079                 .procname       = "max_size",
3080                 .data           = &ip_rt_max_size,
3081                 .maxlen         = sizeof(int),
3082                 .mode           = 0644,
3083                 .proc_handler   = proc_dointvec,
3084         },
3085         {
3086                 /*  Deprecated. Use gc_min_interval_ms */
3087
3088                 .procname       = "gc_min_interval",
3089                 .data           = &ip_rt_gc_min_interval,
3090                 .maxlen         = sizeof(int),
3091                 .mode           = 0644,
3092                 .proc_handler   = proc_dointvec_jiffies,
3093         },
3094         {
3095                 .procname       = "gc_min_interval_ms",
3096                 .data           = &ip_rt_gc_min_interval,
3097                 .maxlen         = sizeof(int),
3098                 .mode           = 0644,
3099                 .proc_handler   = proc_dointvec_ms_jiffies,
3100         },
3101         {
3102                 .procname       = "gc_timeout",
3103                 .data           = &ip_rt_gc_timeout,
3104                 .maxlen         = sizeof(int),
3105                 .mode           = 0644,
3106                 .proc_handler   = proc_dointvec_jiffies,
3107         },
3108         {
3109                 .procname       = "gc_interval",
3110                 .data           = &ip_rt_gc_interval,
3111                 .maxlen         = sizeof(int),
3112                 .mode           = 0644,
3113                 .proc_handler   = proc_dointvec_jiffies,
3114         },
3115         {
3116                 .procname       = "redirect_load",
3117                 .data           = &ip_rt_redirect_load,
3118                 .maxlen         = sizeof(int),
3119                 .mode           = 0644,
3120                 .proc_handler   = proc_dointvec,
3121         },
3122         {
3123                 .procname       = "redirect_number",
3124                 .data           = &ip_rt_redirect_number,
3125                 .maxlen         = sizeof(int),
3126                 .mode           = 0644,
3127                 .proc_handler   = proc_dointvec,
3128         },
3129         {
3130                 .procname       = "redirect_silence",
3131                 .data           = &ip_rt_redirect_silence,
3132                 .maxlen         = sizeof(int),
3133                 .mode           = 0644,
3134                 .proc_handler   = proc_dointvec,
3135         },
3136         {
3137                 .procname       = "error_cost",
3138                 .data           = &ip_rt_error_cost,
3139                 .maxlen         = sizeof(int),
3140                 .mode           = 0644,
3141                 .proc_handler   = proc_dointvec,
3142         },
3143         {
3144                 .procname       = "error_burst",
3145                 .data           = &ip_rt_error_burst,
3146                 .maxlen         = sizeof(int),
3147                 .mode           = 0644,
3148                 .proc_handler   = proc_dointvec,
3149         },
3150         {
3151                 .procname       = "gc_elasticity",
3152                 .data           = &ip_rt_gc_elasticity,
3153                 .maxlen         = sizeof(int),
3154                 .mode           = 0644,
3155                 .proc_handler   = proc_dointvec,
3156         },
3157         {
3158                 .procname       = "mtu_expires",
3159                 .data           = &ip_rt_mtu_expires,
3160                 .maxlen         = sizeof(int),
3161                 .mode           = 0644,
3162                 .proc_handler   = proc_dointvec_jiffies,
3163         },
3164         {
3165                 .procname       = "min_pmtu",
3166                 .data           = &ip_rt_min_pmtu,
3167                 .maxlen         = sizeof(int),
3168                 .mode           = 0644,
3169                 .proc_handler   = proc_dointvec,
3170         },
3171         {
3172                 .procname       = "min_adv_mss",
3173                 .data           = &ip_rt_min_advmss,
3174                 .maxlen         = sizeof(int),
3175                 .mode           = 0644,
3176                 .proc_handler   = proc_dointvec,
3177         },
3178         { }
3179 };
3180
3181 static struct ctl_table empty[1];
3182
3183 static struct ctl_table ipv4_skeleton[] =
3184 {
3185         { .procname = "route", 
3186           .mode = 0555, .child = ipv4_route_table},
3187         { .procname = "neigh", 
3188           .mode = 0555, .child = empty},
3189         { }
3190 };
3191
3192 static __net_initdata struct ctl_path ipv4_path[] = {
3193         { .procname = "net", },
3194         { .procname = "ipv4", },
3195         { },
3196 };
3197
3198 static struct ctl_table ipv4_route_flush_table[] = {
3199         {
3200                 .procname       = "flush",
3201                 .maxlen         = sizeof(int),
3202                 .mode           = 0200,
3203                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3204         },
3205         { },
3206 };
3207
3208 static __net_initdata struct ctl_path ipv4_route_path[] = {
3209         { .procname = "net", },
3210         { .procname = "ipv4", },
3211         { .procname = "route", },
3212         { },
3213 };
3214
3215 static __net_init int sysctl_route_net_init(struct net *net)
3216 {
3217         struct ctl_table *tbl;
3218
3219         tbl = ipv4_route_flush_table;
3220         if (!net_eq(net, &init_net)) {
3221                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3222                 if (tbl == NULL)
3223                         goto err_dup;
3224         }
3225         tbl[0].extra1 = net;
3226
3227         net->ipv4.route_hdr =
3228                 register_net_sysctl_table(net, ipv4_route_path, tbl);
3229         if (net->ipv4.route_hdr == NULL)
3230                 goto err_reg;
3231         return 0;
3232
3233 err_reg:
3234         if (tbl != ipv4_route_flush_table)
3235                 kfree(tbl);
3236 err_dup:
3237         return -ENOMEM;
3238 }
3239
3240 static __net_exit void sysctl_route_net_exit(struct net *net)
3241 {
3242         struct ctl_table *tbl;
3243
3244         tbl = net->ipv4.route_hdr->ctl_table_arg;
3245         unregister_net_sysctl_table(net->ipv4.route_hdr);
3246         BUG_ON(tbl == ipv4_route_flush_table);
3247         kfree(tbl);
3248 }
3249
3250 static __net_initdata struct pernet_operations sysctl_route_ops = {
3251         .init = sysctl_route_net_init,
3252         .exit = sysctl_route_net_exit,
3253 };
3254 #endif
3255
3256 static __net_init int rt_genid_init(struct net *net)
3257 {
3258         get_random_bytes(&net->ipv4.rt_genid,
3259                          sizeof(net->ipv4.rt_genid));
3260         get_random_bytes(&net->ipv4.dev_addr_genid,
3261                          sizeof(net->ipv4.dev_addr_genid));
3262         return 0;
3263 }
3264
3265 static __net_initdata struct pernet_operations rt_genid_ops = {
3266         .init = rt_genid_init,
3267 };
3268
3269
3270 #ifdef CONFIG_IP_ROUTE_CLASSID
3271 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3272 #endif /* CONFIG_IP_ROUTE_CLASSID */
3273
3274 static __initdata unsigned long rhash_entries;
3275 static int __init set_rhash_entries(char *str)
3276 {
3277         if (!str)
3278                 return 0;
3279         rhash_entries = simple_strtoul(str, &str, 0);
3280         return 1;
3281 }
3282 __setup("rhash_entries=", set_rhash_entries);
3283
3284 int __init ip_rt_init(void)
3285 {
3286         int rc = 0;
3287
3288 #ifdef CONFIG_IP_ROUTE_CLASSID
3289         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3290         if (!ip_rt_acct)
3291                 panic("IP: failed to allocate ip_rt_acct\n");
3292 #endif
3293
3294         ipv4_dst_ops.kmem_cachep =
3295                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3296                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3297
3298         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3299
3300         if (dst_entries_init(&ipv4_dst_ops) < 0)
3301                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3302
3303         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3304                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3305
3306         rt_hash_table = (struct rt_hash_bucket *)
3307                 alloc_large_system_hash("IP route cache",
3308                                         sizeof(struct rt_hash_bucket),
3309                                         rhash_entries,
3310                                         (totalram_pages >= 128 * 1024) ?
3311                                         15 : 17,
3312                                         0,
3313                                         &rt_hash_log,
3314                                         &rt_hash_mask,
3315                                         rhash_entries ? 0 : 512 * 1024);
3316         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3317         rt_hash_lock_init();
3318
3319         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3320         ip_rt_max_size = (rt_hash_mask + 1) * 16;
3321
3322         devinet_init();
3323         ip_fib_init();
3324
3325         if (ip_rt_proc_init())
3326                 printk(KERN_ERR "Unable to create route proc files\n");
3327 #ifdef CONFIG_XFRM
3328         xfrm_init();
3329         xfrm4_init(ip_rt_max_size);
3330 #endif
3331         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3332
3333 #ifdef CONFIG_SYSCTL
3334         register_pernet_subsys(&sysctl_route_ops);
3335 #endif
3336         register_pernet_subsys(&rt_genid_ops);
3337         return rc;
3338 }
3339
3340 #ifdef CONFIG_SYSCTL
3341 /*
3342  * We really need to sanitize the damn ipv4 init order, then all
3343  * this nonsense will go away.
3344  */
3345 void __init ip_static_sysctl_init(void)
3346 {
3347         register_sysctl_paths(ipv4_path, ipv4_skeleton);
3348 }
3349 #endif