net: Embed hh_cache inside of struct neighbour.
[linux-2.6-block.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
3769cffb 111#include <net/atmclip.h>
1da177e4 112
68a5e3dd
DM
113#define RT_FL_TOS(oldflp4) \
114 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
1da177e4
LT
115
116#define IP_MAX_MTU 0xFFF0
117
118#define RT_GC_TIMEOUT (300*HZ)
119
1da177e4 120static int ip_rt_max_size;
817bc4db
SH
121static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
122static int ip_rt_gc_interval __read_mostly = 60 * HZ;
123static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
124static int ip_rt_redirect_number __read_mostly = 9;
125static int ip_rt_redirect_load __read_mostly = HZ / 50;
126static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127static int ip_rt_error_cost __read_mostly = HZ;
128static int ip_rt_error_burst __read_mostly = 5 * HZ;
129static int ip_rt_gc_elasticity __read_mostly = 8;
130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256;
1080d709 133static int rt_chain_length_max __read_mostly = 20;
1da177e4 134
1da177e4
LT
135/*
136 * Interface to generic destination cache.
137 */
138
139static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 140static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 141static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 142static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144static void ipv4_link_failure(struct sk_buff *skb);
145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 146static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 147
72cdd1d9
ED
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
1da177e4 152
62fa8a84
DM
153static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154{
06582540
DM
155 struct rtable *rt = (struct rtable *) dst;
156 struct inet_peer *peer;
157 u32 *p = NULL;
158
159 if (!rt->peer)
a48eff12 160 rt_bind_peer(rt, rt->rt_dst, 1);
62fa8a84 161
06582540
DM
162 peer = rt->peer;
163 if (peer) {
62fa8a84
DM
164 u32 *old_p = __DST_METRICS_PTR(old);
165 unsigned long prev, new;
166
06582540
DM
167 p = peer->metrics;
168 if (inet_metrics_new(peer))
169 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
170
171 new = (unsigned long) p;
172 prev = cmpxchg(&dst->_metrics, old, new);
173
174 if (prev != old) {
62fa8a84
DM
175 p = __DST_METRICS_PTR(prev);
176 if (prev & DST_METRICS_READ_ONLY)
177 p = NULL;
178 } else {
62fa8a84
DM
179 if (rt->fi) {
180 fib_info_put(rt->fi);
181 rt->fi = NULL;
182 }
183 }
184 }
185 return p;
186}
187
1da177e4
LT
188static struct dst_ops ipv4_dst_ops = {
189 .family = AF_INET,
09640e63 190 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
191 .gc = rt_garbage_collect,
192 .check = ipv4_dst_check,
0dbaee3b 193 .default_advmss = ipv4_default_advmss,
d33e4553 194 .default_mtu = ipv4_default_mtu,
62fa8a84 195 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
196 .destroy = ipv4_dst_destroy,
197 .ifdown = ipv4_dst_ifdown,
198 .negative_advice = ipv4_negative_advice,
199 .link_failure = ipv4_link_failure,
200 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 201 .local_out = __ip_local_out,
1da177e4
LT
202};
203
204#define ECN_OR_COST(class) TC_PRIO_##class
205
4839c52b 206const __u8 ip_tos2prio[16] = {
1da177e4 207 TC_PRIO_BESTEFFORT,
4a2b9c37 208 ECN_OR_COST(BESTEFFORT),
1da177e4
LT
209 TC_PRIO_BESTEFFORT,
210 ECN_OR_COST(BESTEFFORT),
211 TC_PRIO_BULK,
212 ECN_OR_COST(BULK),
213 TC_PRIO_BULK,
214 ECN_OR_COST(BULK),
215 TC_PRIO_INTERACTIVE,
216 ECN_OR_COST(INTERACTIVE),
217 TC_PRIO_INTERACTIVE,
218 ECN_OR_COST(INTERACTIVE),
219 TC_PRIO_INTERACTIVE_BULK,
220 ECN_OR_COST(INTERACTIVE_BULK),
221 TC_PRIO_INTERACTIVE_BULK,
222 ECN_OR_COST(INTERACTIVE_BULK)
223};
224
225
226/*
227 * Route cache.
228 */
229
230/* The locking scheme is rather straight forward:
231 *
232 * 1) Read-Copy Update protects the buckets of the central route hash.
233 * 2) Only writers remove entries, and they hold the lock
234 * as they look at rtable reference counts.
235 * 3) Only readers acquire references to rtable entries,
236 * they do so with atomic increments and with the
237 * lock held.
238 */
239
240struct rt_hash_bucket {
1c31720a 241 struct rtable __rcu *chain;
22c047cc 242};
1080d709 243
8a25d5de
IM
244#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
245 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
246/*
247 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
248 * The size of this table is a power of two and depends on the number of CPUS.
62051200 249 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 250 */
62051200
IM
251#ifdef CONFIG_LOCKDEP
252# define RT_HASH_LOCK_SZ 256
22c047cc 253#else
62051200
IM
254# if NR_CPUS >= 32
255# define RT_HASH_LOCK_SZ 4096
256# elif NR_CPUS >= 16
257# define RT_HASH_LOCK_SZ 2048
258# elif NR_CPUS >= 8
259# define RT_HASH_LOCK_SZ 1024
260# elif NR_CPUS >= 4
261# define RT_HASH_LOCK_SZ 512
262# else
263# define RT_HASH_LOCK_SZ 256
264# endif
22c047cc
ED
265#endif
266
267static spinlock_t *rt_hash_locks;
268# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
269
270static __init void rt_hash_lock_init(void)
271{
272 int i;
273
274 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
275 GFP_KERNEL);
276 if (!rt_hash_locks)
277 panic("IP: failed to allocate rt_hash_locks\n");
278
279 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
280 spin_lock_init(&rt_hash_locks[i]);
281}
22c047cc
ED
282#else
283# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
284
285static inline void rt_hash_lock_init(void)
286{
287}
22c047cc 288#endif
1da177e4 289
817bc4db
SH
290static struct rt_hash_bucket *rt_hash_table __read_mostly;
291static unsigned rt_hash_mask __read_mostly;
292static unsigned int rt_hash_log __read_mostly;
1da177e4 293
2f970d83 294static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 295#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 296
b00180de 297static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 298 int genid)
1da177e4 299{
0eae88f3 300 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 301 idx, genid)
29e75252 302 & rt_hash_mask;
1da177e4
LT
303}
304
e84f84f2
DL
305static inline int rt_genid(struct net *net)
306{
307 return atomic_read(&net->ipv4.rt_genid);
308}
309
1da177e4
LT
310#ifdef CONFIG_PROC_FS
311struct rt_cache_iter_state {
a75e936f 312 struct seq_net_private p;
1da177e4 313 int bucket;
29e75252 314 int genid;
1da177e4
LT
315};
316
1218854a 317static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 318{
1218854a 319 struct rt_cache_iter_state *st = seq->private;
1da177e4 320 struct rtable *r = NULL;
1da177e4
LT
321
322 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 323 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 324 continue;
1da177e4 325 rcu_read_lock_bh();
a898def2 326 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 327 while (r) {
d8d1f30b 328 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 329 r->rt_genid == st->genid)
29e75252 330 return r;
d8d1f30b 331 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 332 }
1da177e4
LT
333 rcu_read_unlock_bh();
334 }
29e75252 335 return r;
1da177e4
LT
336}
337
1218854a 338static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 339 struct rtable *r)
1da177e4 340{
1218854a 341 struct rt_cache_iter_state *st = seq->private;
a6272665 342
1c31720a 343 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
344 while (!r) {
345 rcu_read_unlock_bh();
a6272665
ED
346 do {
347 if (--st->bucket < 0)
348 return NULL;
1c31720a 349 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 350 rcu_read_lock_bh();
1c31720a 351 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 352 }
1c31720a 353 return r;
1da177e4
LT
354}
355
1218854a 356static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
357 struct rtable *r)
358{
1218854a
YH
359 struct rt_cache_iter_state *st = seq->private;
360 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 361 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 362 continue;
642d6318
DL
363 if (r->rt_genid == st->genid)
364 break;
365 }
366 return r;
367}
368
1218854a 369static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 370{
1218854a 371 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
372
373 if (r)
1218854a 374 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
375 --pos;
376 return pos ? NULL : r;
377}
378
379static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
380{
29e75252 381 struct rt_cache_iter_state *st = seq->private;
29e75252 382 if (*pos)
1218854a 383 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 384 st->genid = rt_genid(seq_file_net(seq));
29e75252 385 return SEQ_START_TOKEN;
1da177e4
LT
386}
387
388static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
389{
29e75252 390 struct rtable *r;
1da177e4
LT
391
392 if (v == SEQ_START_TOKEN)
1218854a 393 r = rt_cache_get_first(seq);
1da177e4 394 else
1218854a 395 r = rt_cache_get_next(seq, v);
1da177e4
LT
396 ++*pos;
397 return r;
398}
399
400static void rt_cache_seq_stop(struct seq_file *seq, void *v)
401{
402 if (v && v != SEQ_START_TOKEN)
403 rcu_read_unlock_bh();
404}
405
406static int rt_cache_seq_show(struct seq_file *seq, void *v)
407{
408 if (v == SEQ_START_TOKEN)
409 seq_printf(seq, "%-127s\n",
410 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
411 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
412 "HHUptod\tSpecDst");
413 else {
414 struct rtable *r = v;
5e659e4c 415 int len;
1da177e4 416
0eae88f3
ED
417 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
418 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 419 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
420 (__force u32)r->rt_dst,
421 (__force u32)r->rt_gateway,
d8d1f30b
CG
422 r->rt_flags, atomic_read(&r->dst.__refcnt),
423 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 424 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
425 dst_metric(&r->dst, RTAX_WINDOW),
426 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
427 dst_metric(&r->dst, RTAX_RTTVAR)),
475949d8 428 r->rt_key_tos,
f6b72b62
DM
429 -1,
430 (r->dst.neighbour ?
431 (r->dst.neighbour->hh.hh_output ==
432 dev_queue_xmit) : 0),
5e659e4c
PE
433 r->rt_spec_dst, &len);
434
435 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
436 }
437 return 0;
1da177e4
LT
438}
439
f690808e 440static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
441 .start = rt_cache_seq_start,
442 .next = rt_cache_seq_next,
443 .stop = rt_cache_seq_stop,
444 .show = rt_cache_seq_show,
445};
446
447static int rt_cache_seq_open(struct inode *inode, struct file *file)
448{
a75e936f 449 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 450 sizeof(struct rt_cache_iter_state));
1da177e4
LT
451}
452
9a32144e 453static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
454 .owner = THIS_MODULE,
455 .open = rt_cache_seq_open,
456 .read = seq_read,
457 .llseek = seq_lseek,
a75e936f 458 .release = seq_release_net,
1da177e4
LT
459};
460
461
462static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
463{
464 int cpu;
465
466 if (*pos == 0)
467 return SEQ_START_TOKEN;
468
0f23174a 469 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
470 if (!cpu_possible(cpu))
471 continue;
472 *pos = cpu+1;
2f970d83 473 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
474 }
475 return NULL;
476}
477
478static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
479{
480 int cpu;
481
0f23174a 482 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
483 if (!cpu_possible(cpu))
484 continue;
485 *pos = cpu+1;
2f970d83 486 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
487 }
488 return NULL;
e905a9ed 489
1da177e4
LT
490}
491
492static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
493{
494
495}
496
497static int rt_cpu_seq_show(struct seq_file *seq, void *v)
498{
499 struct rt_cache_stat *st = v;
500
501 if (v == SEQ_START_TOKEN) {
5bec0039 502 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
503 return 0;
504 }
e905a9ed 505
1da177e4
LT
506 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
507 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 508 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
509 st->in_hit,
510 st->in_slow_tot,
511 st->in_slow_mc,
512 st->in_no_route,
513 st->in_brd,
514 st->in_martian_dst,
515 st->in_martian_src,
516
517 st->out_hit,
518 st->out_slow_tot,
e905a9ed 519 st->out_slow_mc,
1da177e4
LT
520
521 st->gc_total,
522 st->gc_ignored,
523 st->gc_goal_miss,
524 st->gc_dst_overflow,
525 st->in_hlist_search,
526 st->out_hlist_search
527 );
528 return 0;
529}
530
f690808e 531static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
532 .start = rt_cpu_seq_start,
533 .next = rt_cpu_seq_next,
534 .stop = rt_cpu_seq_stop,
535 .show = rt_cpu_seq_show,
536};
537
538
539static int rt_cpu_seq_open(struct inode *inode, struct file *file)
540{
541 return seq_open(file, &rt_cpu_seq_ops);
542}
543
9a32144e 544static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
545 .owner = THIS_MODULE,
546 .open = rt_cpu_seq_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = seq_release,
550};
551
c7066f70 552#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 553static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 554{
a661c419
AD
555 struct ip_rt_acct *dst, *src;
556 unsigned int i, j;
557
558 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
559 if (!dst)
560 return -ENOMEM;
561
562 for_each_possible_cpu(i) {
563 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
564 for (j = 0; j < 256; j++) {
565 dst[j].o_bytes += src[j].o_bytes;
566 dst[j].o_packets += src[j].o_packets;
567 dst[j].i_bytes += src[j].i_bytes;
568 dst[j].i_packets += src[j].i_packets;
569 }
78c686e9
PE
570 }
571
a661c419
AD
572 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
573 kfree(dst);
574 return 0;
575}
78c686e9 576
a661c419
AD
577static int rt_acct_proc_open(struct inode *inode, struct file *file)
578{
579 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 580}
a661c419
AD
581
582static const struct file_operations rt_acct_proc_fops = {
583 .owner = THIS_MODULE,
584 .open = rt_acct_proc_open,
585 .read = seq_read,
586 .llseek = seq_lseek,
587 .release = single_release,
588};
78c686e9 589#endif
107f1634 590
73b38711 591static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
592{
593 struct proc_dir_entry *pde;
594
595 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
596 &rt_cache_seq_fops);
597 if (!pde)
598 goto err1;
599
77020720
WC
600 pde = proc_create("rt_cache", S_IRUGO,
601 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
602 if (!pde)
603 goto err2;
604
c7066f70 605#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 606 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
607 if (!pde)
608 goto err3;
609#endif
610 return 0;
611
c7066f70 612#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
613err3:
614 remove_proc_entry("rt_cache", net->proc_net_stat);
615#endif
616err2:
617 remove_proc_entry("rt_cache", net->proc_net);
618err1:
619 return -ENOMEM;
620}
73b38711
DL
621
622static void __net_exit ip_rt_do_proc_exit(struct net *net)
623{
624 remove_proc_entry("rt_cache", net->proc_net_stat);
625 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 626#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 627 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 628#endif
73b38711
DL
629}
630
631static struct pernet_operations ip_rt_proc_ops __net_initdata = {
632 .init = ip_rt_do_proc_init,
633 .exit = ip_rt_do_proc_exit,
634};
635
636static int __init ip_rt_proc_init(void)
637{
638 return register_pernet_subsys(&ip_rt_proc_ops);
639}
640
107f1634 641#else
73b38711 642static inline int ip_rt_proc_init(void)
107f1634
PE
643{
644 return 0;
645}
1da177e4 646#endif /* CONFIG_PROC_FS */
e905a9ed 647
5969f71d 648static inline void rt_free(struct rtable *rt)
1da177e4 649{
d8d1f30b 650 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
651}
652
5969f71d 653static inline void rt_drop(struct rtable *rt)
1da177e4 654{
1da177e4 655 ip_rt_put(rt);
d8d1f30b 656 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
657}
658
5969f71d 659static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
660{
661 /* Kill broadcast/multicast entries very aggresively, if they
662 collide in hash table with more useful entries */
663 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 664 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
665}
666
5969f71d 667static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
668{
669 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 670 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
671}
672
673static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
674{
675 unsigned long age;
676 int ret = 0;
677
d8d1f30b 678 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
679 goto out;
680
d8d1f30b 681 age = jiffies - rth->dst.lastuse;
1da177e4
LT
682 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
683 (age <= tmo2 && rt_valuable(rth)))
684 goto out;
685 ret = 1;
686out: return ret;
687}
688
689/* Bits of score are:
690 * 31: very valuable
691 * 30: not quite useless
692 * 29..0: usage counter
693 */
694static inline u32 rt_score(struct rtable *rt)
695{
d8d1f30b 696 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
697
698 score = ~score & ~(3<<30);
699
700 if (rt_valuable(rt))
701 score |= (1<<31);
702
c7537967 703 if (rt_is_output_route(rt) ||
1da177e4
LT
704 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
705 score |= (1<<30);
706
707 return score;
708}
709
1080d709
NH
710static inline bool rt_caching(const struct net *net)
711{
712 return net->ipv4.current_rt_cache_rebuild_count <=
713 net->ipv4.sysctl_rt_cache_rebuild_count;
714}
715
5e2b61f7
DM
716static inline bool compare_hash_inputs(const struct rtable *rt1,
717 const struct rtable *rt2)
1080d709 718{
5e2b61f7
DM
719 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
720 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
721 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
1080d709
NH
722}
723
5e2b61f7 724static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 725{
5e2b61f7
DM
726 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
727 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
728 (rt1->rt_mark ^ rt2->rt_mark) |
475949d8 729 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
5e2b61f7
DM
730 (rt1->rt_oif ^ rt2->rt_oif) |
731 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
1da177e4
LT
732}
733
b5921910
DL
734static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
735{
d8d1f30b 736 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
737}
738
e84f84f2
DL
739static inline int rt_is_expired(struct rtable *rth)
740{
d8d1f30b 741 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
742}
743
beb659bd
ED
744/*
745 * Perform a full scan of hash table and free all entries.
746 * Can be called by a softirq or a process.
747 * In the later case, we want to be reschedule if necessary
748 */
6561a3b1 749static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
750{
751 unsigned int i;
752 struct rtable *rth, *next;
753
754 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
755 struct rtable __rcu **pprev;
756 struct rtable *list;
757
beb659bd
ED
758 if (process_context && need_resched())
759 cond_resched();
1c31720a 760 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
761 if (!rth)
762 continue;
763
764 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 765
6561a3b1
DM
766 list = NULL;
767 pprev = &rt_hash_table[i].chain;
768 rth = rcu_dereference_protected(*pprev,
1c31720a 769 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 770
6561a3b1
DM
771 while (rth) {
772 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 773 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
774
775 if (!net ||
776 net_eq(dev_net(rth->dst.dev), net)) {
777 rcu_assign_pointer(*pprev, next);
778 rcu_assign_pointer(rth->dst.rt_next, list);
779 list = rth;
32cb5b4e 780 } else {
6561a3b1 781 pprev = &rth->dst.rt_next;
32cb5b4e 782 }
6561a3b1 783 rth = next;
32cb5b4e 784 }
6561a3b1 785
beb659bd
ED
786 spin_unlock_bh(rt_hash_lock_addr(i));
787
6561a3b1
DM
788 for (; list; list = next) {
789 next = rcu_dereference_protected(list->dst.rt_next, 1);
790 rt_free(list);
beb659bd
ED
791 }
792 }
793}
794
1080d709
NH
795/*
796 * While freeing expired entries, we compute average chain length
797 * and standard deviation, using fixed-point arithmetic.
798 * This to have an estimation of rt_chain_length_max
799 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
800 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
801 */
802
803#define FRACT_BITS 3
804#define ONE (1UL << FRACT_BITS)
805
98376387
ED
806/*
807 * Given a hash chain and an item in this hash chain,
808 * find if a previous entry has the same hash_inputs
809 * (but differs on tos, mark or oif)
810 * Returns 0 if an alias is found.
811 * Returns ONE if rth has no alias before itself.
812 */
813static int has_noalias(const struct rtable *head, const struct rtable *rth)
814{
815 const struct rtable *aux = head;
816
817 while (aux != rth) {
5e2b61f7 818 if (compare_hash_inputs(aux, rth))
98376387 819 return 0;
1c31720a 820 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
821 }
822 return ONE;
823}
824
29e75252 825/*
25985edc 826 * Perturbation of rt_genid by a small quantity [1..256]
29e75252
ED
827 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
828 * many times (2^24) without giving recent rt_genid.
829 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 830 */
86c657f6 831static void rt_cache_invalidate(struct net *net)
1da177e4 832{
29e75252 833 unsigned char shuffle;
1da177e4 834
29e75252 835 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 836 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
837}
838
29e75252
ED
839/*
840 * delay < 0 : invalidate cache (fast : entries will be deleted later)
841 * delay >= 0 : invalidate & flush cache (can be long)
842 */
76e6ebfb 843void rt_cache_flush(struct net *net, int delay)
1da177e4 844{
86c657f6 845 rt_cache_invalidate(net);
29e75252 846 if (delay >= 0)
6561a3b1 847 rt_do_flush(net, !in_softirq());
1da177e4
LT
848}
849
a5ee1551 850/* Flush previous cache invalidated entries from the cache */
6561a3b1 851void rt_cache_flush_batch(struct net *net)
a5ee1551 852{
6561a3b1 853 rt_do_flush(net, !in_softirq());
a5ee1551
EB
854}
855
1080d709
NH
856static void rt_emergency_hash_rebuild(struct net *net)
857{
3ee94372 858 if (net_ratelimit())
1080d709 859 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 860 rt_cache_invalidate(net);
1080d709
NH
861}
862
1da177e4
LT
863/*
864 Short description of GC goals.
865
866 We want to build algorithm, which will keep routing cache
867 at some equilibrium point, when number of aged off entries
868 is kept approximately equal to newly generated ones.
869
870 Current expiration strength is variable "expire".
871 We try to adjust it dynamically, so that if networking
872 is idle expires is large enough to keep enough of warm entries,
873 and when load increases it reduces to limit cache size.
874 */
875
569d3645 876static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
877{
878 static unsigned long expire = RT_GC_TIMEOUT;
879 static unsigned long last_gc;
880 static int rover;
881 static int equilibrium;
1c31720a
ED
882 struct rtable *rth;
883 struct rtable __rcu **rthp;
1da177e4
LT
884 unsigned long now = jiffies;
885 int goal;
fc66f95c 886 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
887
888 /*
889 * Garbage collection is pretty expensive,
890 * do not make it too frequently.
891 */
892
893 RT_CACHE_STAT_INC(gc_total);
894
895 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 896 entries < ip_rt_max_size) {
1da177e4
LT
897 RT_CACHE_STAT_INC(gc_ignored);
898 goto out;
899 }
900
fc66f95c 901 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 902 /* Calculate number of entries, which we want to expire now. */
fc66f95c 903 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
904 if (goal <= 0) {
905 if (equilibrium < ipv4_dst_ops.gc_thresh)
906 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 907 goal = entries - equilibrium;
1da177e4 908 if (goal > 0) {
b790cedd 909 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 910 goal = entries - equilibrium;
1da177e4
LT
911 }
912 } else {
913 /* We are in dangerous area. Try to reduce cache really
914 * aggressively.
915 */
b790cedd 916 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 917 equilibrium = entries - goal;
1da177e4
LT
918 }
919
920 if (now - last_gc >= ip_rt_gc_min_interval)
921 last_gc = now;
922
923 if (goal <= 0) {
924 equilibrium += goal;
925 goto work_done;
926 }
927
928 do {
929 int i, k;
930
931 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
932 unsigned long tmo = expire;
933
934 k = (k + 1) & rt_hash_mask;
935 rthp = &rt_hash_table[k].chain;
22c047cc 936 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
937 while ((rth = rcu_dereference_protected(*rthp,
938 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 939 if (!rt_is_expired(rth) &&
29e75252 940 !rt_may_expire(rth, tmo, expire)) {
1da177e4 941 tmo >>= 1;
d8d1f30b 942 rthp = &rth->dst.rt_next;
1da177e4
LT
943 continue;
944 }
d8d1f30b 945 *rthp = rth->dst.rt_next;
1da177e4
LT
946 rt_free(rth);
947 goal--;
1da177e4 948 }
22c047cc 949 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
950 if (goal <= 0)
951 break;
952 }
953 rover = k;
954
955 if (goal <= 0)
956 goto work_done;
957
958 /* Goal is not achieved. We stop process if:
959
960 - if expire reduced to zero. Otherwise, expire is halfed.
961 - if table is not full.
962 - if we are called from interrupt.
963 - jiffies check is just fallback/debug loop breaker.
964 We will not spin here for long time in any case.
965 */
966
967 RT_CACHE_STAT_INC(gc_goal_miss);
968
969 if (expire == 0)
970 break;
971
972 expire >>= 1;
1da177e4 973
fc66f95c 974 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
975 goto out;
976 } while (!in_softirq() && time_before_eq(jiffies, now));
977
fc66f95c
ED
978 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
979 goto out;
980 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
981 goto out;
982 if (net_ratelimit())
983 printk(KERN_WARNING "dst cache overflow\n");
984 RT_CACHE_STAT_INC(gc_dst_overflow);
985 return 1;
986
987work_done:
988 expire += ip_rt_gc_min_interval;
989 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
990 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
991 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4 992 expire = ip_rt_gc_timeout;
1da177e4
LT
993out: return 0;
994}
995
98376387
ED
996/*
997 * Returns number of entries in a hash chain that have different hash_inputs
998 */
999static int slow_chain_length(const struct rtable *head)
1000{
1001 int length = 0;
1002 const struct rtable *rth = head;
1003
1004 while (rth) {
1005 length += has_noalias(head, rth);
1c31720a 1006 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1007 }
1008 return length >> FRACT_BITS;
1009}
1010
3769cffb
DM
1011static int rt_bind_neighbour(struct rtable *rt)
1012{
1013 static const __be32 inaddr_any = 0;
1014 struct net_device *dev = rt->dst.dev;
1015 struct neigh_table *tbl = &arp_tbl;
1016 const __be32 *nexthop;
1017 struct neighbour *n;
1018
1019#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1020 if (dev->type == ARPHRD_ATM)
1021 tbl = clip_tbl_hook;
1022#endif
1023 nexthop = &rt->rt_gateway;
1024 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1025 nexthop = &inaddr_any;
1026 n = ipv4_neigh_lookup(tbl, dev, nexthop);
1027 if (IS_ERR(n))
1028 return PTR_ERR(n);
1029 rt->dst.neighbour = n;
1030
1031 return 0;
1032}
1033
b23dd4fe
DM
1034static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1035 struct sk_buff *skb, int ifindex)
1da177e4 1036{
1c31720a
ED
1037 struct rtable *rth, *cand;
1038 struct rtable __rcu **rthp, **candp;
1da177e4 1039 unsigned long now;
1da177e4
LT
1040 u32 min_score;
1041 int chain_length;
1042 int attempts = !in_softirq();
1043
1044restart:
1045 chain_length = 0;
1046 min_score = ~(u32)0;
1047 cand = NULL;
1048 candp = NULL;
1049 now = jiffies;
1050
d8d1f30b 1051 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1052 /*
1053 * If we're not caching, just tell the caller we
1054 * were successful and don't touch the route. The
1055 * caller hold the sole reference to the cache entry, and
1056 * it will be released when the caller is done with it.
1057 * If we drop it here, the callers have no way to resolve routes
1058 * when we're not caching. Instead, just point *rp at rt, so
1059 * the caller gets a single use out of the route
b6280b47
NH
1060 * Note that we do rt_free on this new route entry, so that
1061 * once its refcount hits zero, we are still able to reap it
1062 * (Thanks Alexey)
27b75c95
ED
1063 * Note: To avoid expensive rcu stuff for this uncached dst,
1064 * we set DST_NOCACHE so that dst_release() can free dst without
1065 * waiting a grace period.
73e42897 1066 */
b6280b47 1067
c7d4426a 1068 rt->dst.flags |= DST_NOCACHE;
c7537967 1069 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
3769cffb 1070 int err = rt_bind_neighbour(rt);
b6280b47
NH
1071 if (err) {
1072 if (net_ratelimit())
1073 printk(KERN_WARNING
1074 "Neighbour table failure & not caching routes.\n");
27b75c95 1075 ip_rt_put(rt);
b23dd4fe 1076 return ERR_PTR(err);
b6280b47
NH
1077 }
1078 }
1079
b6280b47 1080 goto skip_hashing;
1080d709
NH
1081 }
1082
1da177e4
LT
1083 rthp = &rt_hash_table[hash].chain;
1084
22c047cc 1085 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1086 while ((rth = rcu_dereference_protected(*rthp,
1087 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1088 if (rt_is_expired(rth)) {
d8d1f30b 1089 *rthp = rth->dst.rt_next;
29e75252
ED
1090 rt_free(rth);
1091 continue;
1092 }
5e2b61f7 1093 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1094 /* Put it first */
d8d1f30b 1095 *rthp = rth->dst.rt_next;
1da177e4
LT
1096 /*
1097 * Since lookup is lockfree, the deletion
1098 * must be visible to another weakly ordered CPU before
1099 * the insertion at the start of the hash chain.
1100 */
d8d1f30b 1101 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1102 rt_hash_table[hash].chain);
1103 /*
1104 * Since lookup is lockfree, the update writes
1105 * must be ordered for consistency on SMP.
1106 */
1107 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1108
d8d1f30b 1109 dst_use(&rth->dst, now);
22c047cc 1110 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1111
1112 rt_drop(rt);
b23dd4fe 1113 if (skb)
d8d1f30b 1114 skb_dst_set(skb, &rth->dst);
b23dd4fe 1115 return rth;
1da177e4
LT
1116 }
1117
d8d1f30b 1118 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1119 u32 score = rt_score(rth);
1120
1121 if (score <= min_score) {
1122 cand = rth;
1123 candp = rthp;
1124 min_score = score;
1125 }
1126 }
1127
1128 chain_length++;
1129
d8d1f30b 1130 rthp = &rth->dst.rt_next;
1da177e4
LT
1131 }
1132
1133 if (cand) {
1134 /* ip_rt_gc_elasticity used to be average length of chain
1135 * length, when exceeded gc becomes really aggressive.
1136 *
1137 * The second limit is less certain. At the moment it allows
1138 * only 2 entries per bucket. We will see.
1139 */
1140 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1141 *candp = cand->dst.rt_next;
1da177e4
LT
1142 rt_free(cand);
1143 }
1080d709 1144 } else {
98376387
ED
1145 if (chain_length > rt_chain_length_max &&
1146 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1147 struct net *net = dev_net(rt->dst.dev);
1080d709 1148 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1149 if (!rt_caching(net)) {
1080d709 1150 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1151 rt->dst.dev->name, num);
1080d709 1152 }
b35ecb5d 1153 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1154 spin_unlock_bh(rt_hash_lock_addr(hash));
1155
5e2b61f7 1156 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1157 ifindex, rt_genid(net));
1158 goto restart;
1080d709 1159 }
1da177e4
LT
1160 }
1161
1162 /* Try to bind route to arp only if it is output
1163 route or unicast forwarding path.
1164 */
c7537967 1165 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
3769cffb 1166 int err = rt_bind_neighbour(rt);
1da177e4 1167 if (err) {
22c047cc 1168 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1169
1170 if (err != -ENOBUFS) {
1171 rt_drop(rt);
b23dd4fe 1172 return ERR_PTR(err);
1da177e4
LT
1173 }
1174
1175 /* Neighbour tables are full and nothing
1176 can be released. Try to shrink route cache,
1177 it is most likely it holds some neighbour records.
1178 */
1179 if (attempts-- > 0) {
1180 int saved_elasticity = ip_rt_gc_elasticity;
1181 int saved_int = ip_rt_gc_min_interval;
1182 ip_rt_gc_elasticity = 1;
1183 ip_rt_gc_min_interval = 0;
569d3645 1184 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1185 ip_rt_gc_min_interval = saved_int;
1186 ip_rt_gc_elasticity = saved_elasticity;
1187 goto restart;
1188 }
1189
1190 if (net_ratelimit())
7e1b33e5 1191 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4 1192 rt_drop(rt);
b23dd4fe 1193 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1194 }
1195 }
1196
d8d1f30b 1197 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1198
00269b54
ED
1199 /*
1200 * Since lookup is lockfree, we must make sure
25985edc 1201 * previous writes to rt are committed to memory
00269b54
ED
1202 * before making rt visible to other CPUS.
1203 */
1ddbcb00 1204 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1205
22c047cc 1206 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1207
b6280b47 1208skip_hashing:
b23dd4fe 1209 if (skb)
d8d1f30b 1210 skb_dst_set(skb, &rt->dst);
b23dd4fe 1211 return rt;
1da177e4
LT
1212}
1213
6431cbc2
DM
1214static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1215
1216static u32 rt_peer_genid(void)
1217{
1218 return atomic_read(&__rt_peer_genid);
1219}
1220
a48eff12 1221void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1da177e4 1222{
1da177e4
LT
1223 struct inet_peer *peer;
1224
a48eff12 1225 peer = inet_getpeer_v4(daddr, create);
1da177e4 1226
49e8ab03 1227 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1228 inet_putpeer(peer);
6431cbc2
DM
1229 else
1230 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1231}
1232
1233/*
1234 * Peer allocation may fail only in serious out-of-memory conditions. However
1235 * we still can generate some output.
1236 * Random ID selection looks a bit dangerous because we have no chances to
1237 * select ID being unique in a reasonable period of time.
1238 * But broken packet identifier may be better than no packet at all.
1239 */
1240static void ip_select_fb_ident(struct iphdr *iph)
1241{
1242 static DEFINE_SPINLOCK(ip_fb_id_lock);
1243 static u32 ip_fallback_id;
1244 u32 salt;
1245
1246 spin_lock_bh(&ip_fb_id_lock);
e448515c 1247 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1248 iph->id = htons(salt & 0xFFFF);
1249 ip_fallback_id = salt;
1250 spin_unlock_bh(&ip_fb_id_lock);
1251}
1252
1253void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1254{
1255 struct rtable *rt = (struct rtable *) dst;
1256
1257 if (rt) {
1258 if (rt->peer == NULL)
a48eff12 1259 rt_bind_peer(rt, rt->rt_dst, 1);
1da177e4
LT
1260
1261 /* If peer is attached to destination, it is never detached,
1262 so that we need not to grab a lock to dereference it.
1263 */
1264 if (rt->peer) {
1265 iph->id = htons(inet_getid(rt->peer, more));
1266 return;
1267 }
1268 } else
e905a9ed 1269 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1270 __builtin_return_address(0));
1da177e4
LT
1271
1272 ip_select_fb_ident(iph);
1273}
4bc2f18b 1274EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1275
1276static void rt_del(unsigned hash, struct rtable *rt)
1277{
1c31720a
ED
1278 struct rtable __rcu **rthp;
1279 struct rtable *aux;
1da177e4 1280
29e75252 1281 rthp = &rt_hash_table[hash].chain;
22c047cc 1282 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1283 ip_rt_put(rt);
1c31720a
ED
1284 while ((aux = rcu_dereference_protected(*rthp,
1285 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1286 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1287 *rthp = aux->dst.rt_next;
29e75252
ED
1288 rt_free(aux);
1289 continue;
1da177e4 1290 }
d8d1f30b 1291 rthp = &aux->dst.rt_next;
29e75252 1292 }
22c047cc 1293 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1294}
1295
ed7865a4 1296/* called in rcu_read_lock() section */
f7655229
AV
1297void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1298 __be32 saddr, struct net_device *dev)
1da177e4 1299{
ed7865a4 1300 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1301 struct inet_peer *peer;
317805b8 1302 struct net *net;
1da177e4 1303
1da177e4
LT
1304 if (!in_dev)
1305 return;
1306
c346dca1 1307 net = dev_net(dev);
9d4fb27d
JP
1308 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1309 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1310 ipv4_is_zeronet(new_gw))
1da177e4
LT
1311 goto reject_redirect;
1312
1313 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1314 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1315 goto reject_redirect;
1316 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1317 goto reject_redirect;
1318 } else {
317805b8 1319 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1320 goto reject_redirect;
1321 }
1322
f39925db
DM
1323 peer = inet_getpeer_v4(daddr, 1);
1324 if (peer) {
1325 peer->redirect_learned.a4 = new_gw;
e905a9ed 1326
f39925db 1327 inet_putpeer(peer);
1da177e4 1328
f39925db 1329 atomic_inc(&__rt_peer_genid);
1da177e4 1330 }
1da177e4
LT
1331 return;
1332
1333reject_redirect:
1334#ifdef CONFIG_IP_ROUTE_VERBOSE
1335 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1336 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1337 " Advised path = %pI4 -> %pI4\n",
1338 &old_gw, dev->name, &new_gw,
1339 &saddr, &daddr);
1da177e4 1340#endif
ed7865a4 1341 ;
1da177e4
LT
1342}
1343
fe6fe792
ED
1344static bool peer_pmtu_expired(struct inet_peer *peer)
1345{
1346 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1347
1348 return orig &&
1349 time_after_eq(jiffies, orig) &&
1350 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1351}
1352
1353static bool peer_pmtu_cleaned(struct inet_peer *peer)
1354{
1355 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1356
1357 return orig &&
1358 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1359}
1360
1da177e4
LT
1361static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1362{
ee6b9673 1363 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1364 struct dst_entry *ret = dst;
1365
1366 if (rt) {
d11a4dc1 1367 if (dst->obsolete > 0) {
1da177e4
LT
1368 ip_rt_put(rt);
1369 ret = NULL;
2c8cec5c 1370 } else if (rt->rt_flags & RTCF_REDIRECTED) {
5e2b61f7
DM
1371 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1372 rt->rt_oif,
e84f84f2 1373 rt_genid(dev_net(dst->dev)));
1da177e4
LT
1374 rt_del(hash, rt);
1375 ret = NULL;
fe6fe792
ED
1376 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1377 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1da177e4
LT
1378 }
1379 }
1380 return ret;
1381}
1382
1383/*
1384 * Algorithm:
1385 * 1. The first ip_rt_redirect_number redirects are sent
1386 * with exponential backoff, then we stop sending them at all,
1387 * assuming that the host ignores our redirects.
1388 * 2. If we did not see packets requiring redirects
1389 * during ip_rt_redirect_silence, we assume that the host
1390 * forgot redirected route and start to send redirects again.
1391 *
1392 * This algorithm is much cheaper and more intelligent than dumb load limiting
1393 * in icmp.c.
1394 *
1395 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1396 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1397 */
1398
1399void ip_rt_send_redirect(struct sk_buff *skb)
1400{
511c3f92 1401 struct rtable *rt = skb_rtable(skb);
30038fc6 1402 struct in_device *in_dev;
92d86829 1403 struct inet_peer *peer;
30038fc6 1404 int log_martians;
1da177e4 1405
30038fc6 1406 rcu_read_lock();
d8d1f30b 1407 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1408 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1409 rcu_read_unlock();
1da177e4 1410 return;
30038fc6
ED
1411 }
1412 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1413 rcu_read_unlock();
1da177e4 1414
92d86829 1415 if (!rt->peer)
a48eff12 1416 rt_bind_peer(rt, rt->rt_dst, 1);
92d86829
DM
1417 peer = rt->peer;
1418 if (!peer) {
1419 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1420 return;
1421 }
1422
1da177e4
LT
1423 /* No redirected packets during ip_rt_redirect_silence;
1424 * reset the algorithm.
1425 */
92d86829
DM
1426 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1427 peer->rate_tokens = 0;
1da177e4
LT
1428
1429 /* Too many ignored redirects; do not send anything
d8d1f30b 1430 * set dst.rate_last to the last seen redirected packet.
1da177e4 1431 */
92d86829
DM
1432 if (peer->rate_tokens >= ip_rt_redirect_number) {
1433 peer->rate_last = jiffies;
30038fc6 1434 return;
1da177e4
LT
1435 }
1436
1437 /* Check for load limit; set rate_last to the latest sent
1438 * redirect.
1439 */
92d86829 1440 if (peer->rate_tokens == 0 ||
14fb8a76 1441 time_after(jiffies,
92d86829
DM
1442 (peer->rate_last +
1443 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1444 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1445 peer->rate_last = jiffies;
1446 ++peer->rate_tokens;
1da177e4 1447#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1448 if (log_martians &&
92d86829 1449 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1450 net_ratelimit())
673d57e7 1451 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
c5be24ff 1452 &ip_hdr(skb)->saddr, rt->rt_iif,
673d57e7 1453 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1454#endif
1455 }
1da177e4
LT
1456}
1457
1458static int ip_error(struct sk_buff *skb)
1459{
511c3f92 1460 struct rtable *rt = skb_rtable(skb);
92d86829 1461 struct inet_peer *peer;
1da177e4 1462 unsigned long now;
92d86829 1463 bool send;
1da177e4
LT
1464 int code;
1465
d8d1f30b 1466 switch (rt->dst.error) {
4500ebf8
JP
1467 case EINVAL:
1468 default:
1469 goto out;
1470 case EHOSTUNREACH:
1471 code = ICMP_HOST_UNREACH;
1472 break;
1473 case ENETUNREACH:
1474 code = ICMP_NET_UNREACH;
1475 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1476 IPSTATS_MIB_INNOROUTES);
1477 break;
1478 case EACCES:
1479 code = ICMP_PKT_FILTERED;
1480 break;
1da177e4
LT
1481 }
1482
92d86829 1483 if (!rt->peer)
a48eff12 1484 rt_bind_peer(rt, rt->rt_dst, 1);
92d86829
DM
1485 peer = rt->peer;
1486
1487 send = true;
1488 if (peer) {
1489 now = jiffies;
1490 peer->rate_tokens += now - peer->rate_last;
1491 if (peer->rate_tokens > ip_rt_error_burst)
1492 peer->rate_tokens = ip_rt_error_burst;
1493 peer->rate_last = now;
1494 if (peer->rate_tokens >= ip_rt_error_cost)
1495 peer->rate_tokens -= ip_rt_error_cost;
1496 else
1497 send = false;
1da177e4 1498 }
92d86829
DM
1499 if (send)
1500 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1501
1502out: kfree_skb(skb);
1503 return 0;
e905a9ed 1504}
1da177e4
LT
1505
1506/*
1507 * The last two values are not from the RFC but
1508 * are needed for AMPRnet AX.25 paths.
1509 */
1510
9b5b5cff 1511static const unsigned short mtu_plateau[] =
1da177e4
LT
1512{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1513
5969f71d 1514static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1515{
1516 int i;
e905a9ed 1517
1da177e4
LT
1518 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1519 if (old_mtu > mtu_plateau[i])
1520 return mtu_plateau[i];
1521 return 68;
1522}
1523
b71d1d42 1524unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
0010e465
TT
1525 unsigned short new_mtu,
1526 struct net_device *dev)
1da177e4 1527{
1da177e4 1528 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1529 unsigned short est_mtu = 0;
2c8cec5c 1530 struct inet_peer *peer;
1da177e4 1531
2c8cec5c
DM
1532 peer = inet_getpeer_v4(iph->daddr, 1);
1533 if (peer) {
1534 unsigned short mtu = new_mtu;
1da177e4 1535
2c8cec5c
DM
1536 if (new_mtu < 68 || new_mtu >= old_mtu) {
1537 /* BSD 4.2 derived systems incorrectly adjust
1538 * tot_len by the IP header length, and report
1539 * a zero MTU in the ICMP message.
1540 */
1541 if (mtu == 0 &&
1542 old_mtu >= 68 + (iph->ihl << 2))
1543 old_mtu -= iph->ihl << 2;
1544 mtu = guess_mtu(old_mtu);
1545 }
0010e465 1546
2c8cec5c
DM
1547 if (mtu < ip_rt_min_pmtu)
1548 mtu = ip_rt_min_pmtu;
1549 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1550 unsigned long pmtu_expires;
1551
1552 pmtu_expires = jiffies + ip_rt_mtu_expires;
1553 if (!pmtu_expires)
1554 pmtu_expires = 1UL;
1555
2c8cec5c
DM
1556 est_mtu = mtu;
1557 peer->pmtu_learned = mtu;
46af3180 1558 peer->pmtu_expires = pmtu_expires;
2c8cec5c 1559 }
1da177e4 1560
2c8cec5c 1561 inet_putpeer(peer);
1da177e4 1562
2c8cec5c 1563 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1564 }
1565 return est_mtu ? : new_mtu;
1566}
1567
2c8cec5c
DM
1568static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1569{
fe6fe792 1570 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
2c8cec5c 1571
fe6fe792
ED
1572 if (!expires)
1573 return;
46af3180 1574 if (time_before(jiffies, expires)) {
2c8cec5c
DM
1575 u32 orig_dst_mtu = dst_mtu(dst);
1576 if (peer->pmtu_learned < orig_dst_mtu) {
1577 if (!peer->pmtu_orig)
1578 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1579 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1580 }
1581 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1582 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1583}
1584
1da177e4
LT
1585static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1586{
2c8cec5c
DM
1587 struct rtable *rt = (struct rtable *) dst;
1588 struct inet_peer *peer;
1589
1590 dst_confirm(dst);
1591
1592 if (!rt->peer)
a48eff12 1593 rt_bind_peer(rt, rt->rt_dst, 1);
2c8cec5c
DM
1594 peer = rt->peer;
1595 if (peer) {
fe6fe792
ED
1596 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1597
2c8cec5c 1598 if (mtu < ip_rt_min_pmtu)
1da177e4 1599 mtu = ip_rt_min_pmtu;
fe6fe792 1600 if (!pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1601
1602 pmtu_expires = jiffies + ip_rt_mtu_expires;
1603 if (!pmtu_expires)
1604 pmtu_expires = 1UL;
1605
2c8cec5c 1606 peer->pmtu_learned = mtu;
46af3180 1607 peer->pmtu_expires = pmtu_expires;
2c8cec5c
DM
1608
1609 atomic_inc(&__rt_peer_genid);
1610 rt->rt_peer_genid = rt_peer_genid();
1da177e4 1611 }
46af3180 1612 check_peer_pmtu(dst, peer);
1da177e4
LT
1613 }
1614}
1615
f39925db
DM
1616static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1617{
1618 struct rtable *rt = (struct rtable *) dst;
1619 __be32 orig_gw = rt->rt_gateway;
1620
1621 dst_confirm(&rt->dst);
1622
1623 neigh_release(rt->dst.neighbour);
1624 rt->dst.neighbour = NULL;
1625
1626 rt->rt_gateway = peer->redirect_learned.a4;
3769cffb 1627 if (rt_bind_neighbour(rt) ||
f39925db
DM
1628 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1629 if (rt->dst.neighbour)
1630 neigh_event_send(rt->dst.neighbour, NULL);
1631 rt->rt_gateway = orig_gw;
1632 return -EAGAIN;
1633 } else {
1634 rt->rt_flags |= RTCF_REDIRECTED;
1635 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1636 rt->dst.neighbour);
1637 }
1638 return 0;
1639}
1640
1da177e4
LT
1641static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1642{
6431cbc2
DM
1643 struct rtable *rt = (struct rtable *) dst;
1644
1645 if (rt_is_expired(rt))
d11a4dc1 1646 return NULL;
6431cbc2 1647 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1648 struct inet_peer *peer;
1649
6431cbc2 1650 if (!rt->peer)
a48eff12 1651 rt_bind_peer(rt, rt->rt_dst, 0);
6431cbc2 1652
2c8cec5c 1653 peer = rt->peer;
fe6fe792 1654 if (peer) {
2c8cec5c
DM
1655 check_peer_pmtu(dst, peer);
1656
fe6fe792
ED
1657 if (peer->redirect_learned.a4 &&
1658 peer->redirect_learned.a4 != rt->rt_gateway) {
1659 if (check_peer_redir(dst, peer))
1660 return NULL;
1661 }
f39925db
DM
1662 }
1663
6431cbc2
DM
1664 rt->rt_peer_genid = rt_peer_genid();
1665 }
d11a4dc1 1666 return dst;
1da177e4
LT
1667}
1668
1669static void ipv4_dst_destroy(struct dst_entry *dst)
1670{
1671 struct rtable *rt = (struct rtable *) dst;
1672 struct inet_peer *peer = rt->peer;
1da177e4 1673
62fa8a84
DM
1674 if (rt->fi) {
1675 fib_info_put(rt->fi);
1676 rt->fi = NULL;
1677 }
1da177e4
LT
1678 if (peer) {
1679 rt->peer = NULL;
1680 inet_putpeer(peer);
1681 }
1da177e4
LT
1682}
1683
1da177e4
LT
1684
1685static void ipv4_link_failure(struct sk_buff *skb)
1686{
1687 struct rtable *rt;
1688
1689 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1690
511c3f92 1691 rt = skb_rtable(skb);
fe6fe792
ED
1692 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1693 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1da177e4
LT
1694}
1695
1696static int ip_rt_bug(struct sk_buff *skb)
1697{
673d57e7
HH
1698 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1699 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1700 skb->dev ? skb->dev->name : "?");
1701 kfree_skb(skb);
c378a9c0 1702 WARN_ON(1);
1da177e4
LT
1703 return 0;
1704}
1705
1706/*
1707 We do not cache source address of outgoing interface,
1708 because it is used only by IP RR, TS and SRR options,
1709 so that it out of fast path.
1710
1711 BTW remember: "addr" is allowed to be not aligned
1712 in IP options!
1713 */
1714
8e36360a 1715void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1da177e4 1716{
a61ced5d 1717 __be32 src;
1da177e4 1718
c7537967 1719 if (rt_is_output_route(rt))
c5be24ff 1720 src = ip_hdr(skb)->saddr;
ebc0ffae 1721 else {
8e36360a
DM
1722 struct fib_result res;
1723 struct flowi4 fl4;
1724 struct iphdr *iph;
1725
1726 iph = ip_hdr(skb);
1727
1728 memset(&fl4, 0, sizeof(fl4));
1729 fl4.daddr = iph->daddr;
1730 fl4.saddr = iph->saddr;
1731 fl4.flowi4_tos = iph->tos;
1732 fl4.flowi4_oif = rt->dst.dev->ifindex;
1733 fl4.flowi4_iif = skb->dev->ifindex;
1734 fl4.flowi4_mark = skb->mark;
5e2b61f7 1735
ebc0ffae 1736 rcu_read_lock();
68a5e3dd 1737 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
436c3b66 1738 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
ebc0ffae
ED
1739 else
1740 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1741 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1742 rcu_read_unlock();
1743 }
1da177e4
LT
1744 memcpy(addr, &src, 4);
1745}
1746
c7066f70 1747#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1748static void set_class_tag(struct rtable *rt, u32 tag)
1749{
d8d1f30b
CG
1750 if (!(rt->dst.tclassid & 0xFFFF))
1751 rt->dst.tclassid |= tag & 0xFFFF;
1752 if (!(rt->dst.tclassid & 0xFFFF0000))
1753 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1754}
1755#endif
1756
0dbaee3b
DM
1757static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1758{
1759 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1760
1761 if (advmss == 0) {
1762 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1763 ip_rt_min_advmss);
1764 if (advmss > 65535 - 40)
1765 advmss = 65535 - 40;
1766 }
1767 return advmss;
1768}
1769
d33e4553
DM
1770static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1771{
1772 unsigned int mtu = dst->dev->mtu;
1773
1774 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1775 const struct rtable *rt = (const struct rtable *) dst;
1776
1777 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1778 mtu = 576;
1779 }
1780
1781 if (mtu > IP_MAX_MTU)
1782 mtu = IP_MAX_MTU;
1783
1784 return mtu;
1785}
1786
813b3b5d 1787static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
5e2b61f7 1788 struct fib_info *fi)
a4daad6b 1789{
0131ba45
DM
1790 struct inet_peer *peer;
1791 int create = 0;
a4daad6b 1792
0131ba45
DM
1793 /* If a peer entry exists for this destination, we must hook
1794 * it up in order to get at cached metrics.
1795 */
813b3b5d 1796 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1797 create = 1;
1798
3c0afdca 1799 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
0131ba45 1800 if (peer) {
3c0afdca 1801 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1802 if (inet_metrics_new(peer))
1803 memcpy(peer->metrics, fi->fib_metrics,
1804 sizeof(u32) * RTAX_MAX);
1805 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c 1806
fe6fe792 1807 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1808 if (peer->redirect_learned.a4 &&
1809 peer->redirect_learned.a4 != rt->rt_gateway) {
1810 rt->rt_gateway = peer->redirect_learned.a4;
1811 rt->rt_flags |= RTCF_REDIRECTED;
1812 }
0131ba45
DM
1813 } else {
1814 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1815 rt->fi = fi;
1816 atomic_inc(&fi->fib_clntref);
1817 }
1818 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1819 }
1820}
1821
813b3b5d 1822static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
5e2b61f7 1823 const struct fib_result *res,
982721f3 1824 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1825{
defb3519 1826 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1827
1828 if (fi) {
1829 if (FIB_RES_GW(*res) &&
1830 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1831 rt->rt_gateway = FIB_RES_GW(*res);
813b3b5d 1832 rt_init_metrics(rt, fl4, fi);
c7066f70 1833#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1834 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1835#endif
d33e4553 1836 }
defb3519 1837
defb3519
DM
1838 if (dst_mtu(dst) > IP_MAX_MTU)
1839 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1840 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1841 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1842
c7066f70 1843#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1844#ifdef CONFIG_IP_MULTIPLE_TABLES
1845 set_class_tag(rt, fib_rules_tclass(res));
1846#endif
1847 set_class_tag(rt, itag);
1848#endif
1da177e4
LT
1849}
1850
5c1e6aa3
DM
1851static struct rtable *rt_dst_alloc(struct net_device *dev,
1852 bool nopolicy, bool noxfrm)
0c4dcd58 1853{
5c1e6aa3
DM
1854 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1855 DST_HOST |
1856 (nopolicy ? DST_NOPOLICY : 0) |
1857 (noxfrm ? DST_NOXFRM : 0));
0c4dcd58
DM
1858}
1859
96d36220 1860/* called in rcu_read_lock() section */
9e12bb22 1861static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1862 u8 tos, struct net_device *dev, int our)
1863{
96d36220 1864 unsigned int hash;
1da177e4 1865 struct rtable *rth;
a61ced5d 1866 __be32 spec_dst;
96d36220 1867 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1868 u32 itag = 0;
b5f7e755 1869 int err;
1da177e4
LT
1870
1871 /* Primary sanity checks. */
1872
1873 if (in_dev == NULL)
1874 return -EINVAL;
1875
1e637c74 1876 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1877 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1878 goto e_inval;
1879
f97c1e0c
JP
1880 if (ipv4_is_zeronet(saddr)) {
1881 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1882 goto e_inval;
1883 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755 1884 } else {
5c04c819
MS
1885 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1886 &itag);
b5f7e755
ED
1887 if (err < 0)
1888 goto e_err;
1889 }
5c1e6aa3
DM
1890 rth = rt_dst_alloc(init_net.loopback_dev,
1891 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1892 if (!rth)
1893 goto e_nobufs;
1894
cf911662
DM
1895#ifdef CONFIG_IP_ROUTE_CLASSID
1896 rth->dst.tclassid = itag;
1897#endif
d8d1f30b 1898 rth->dst.output = ip_rt_bug;
1da177e4 1899
5e2b61f7 1900 rth->rt_key_dst = daddr;
5e2b61f7 1901 rth->rt_key_src = saddr;
cf911662
DM
1902 rth->rt_genid = rt_genid(dev_net(dev));
1903 rth->rt_flags = RTCF_MULTICAST;
1904 rth->rt_type = RTN_MULTICAST;
475949d8 1905 rth->rt_key_tos = tos;
cf911662 1906 rth->rt_dst = daddr;
1da177e4 1907 rth->rt_src = saddr;
1b86a58f 1908 rth->rt_route_iif = dev->ifindex;
5e2b61f7 1909 rth->rt_iif = dev->ifindex;
5e2b61f7 1910 rth->rt_oif = 0;
cf911662 1911 rth->rt_mark = skb->mark;
1da177e4
LT
1912 rth->rt_gateway = daddr;
1913 rth->rt_spec_dst= spec_dst;
cf911662
DM
1914 rth->rt_peer_genid = 0;
1915 rth->peer = NULL;
1916 rth->fi = NULL;
1da177e4 1917 if (our) {
d8d1f30b 1918 rth->dst.input= ip_local_deliver;
1da177e4
LT
1919 rth->rt_flags |= RTCF_LOCAL;
1920 }
1921
1922#ifdef CONFIG_IP_MROUTE
f97c1e0c 1923 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1924 rth->dst.input = ip_mr_input;
1da177e4
LT
1925#endif
1926 RT_CACHE_STAT_INC(in_slow_mc);
1927
e84f84f2 1928 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe 1929 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
9aa3c94c 1930 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1da177e4
LT
1931
1932e_nobufs:
1da177e4 1933 return -ENOBUFS;
1da177e4 1934e_inval:
96d36220 1935 return -EINVAL;
b5f7e755 1936e_err:
b5f7e755 1937 return err;
1da177e4
LT
1938}
1939
1940
1941static void ip_handle_martian_source(struct net_device *dev,
1942 struct in_device *in_dev,
1943 struct sk_buff *skb,
9e12bb22
AV
1944 __be32 daddr,
1945 __be32 saddr)
1da177e4
LT
1946{
1947 RT_CACHE_STAT_INC(in_martian_src);
1948#ifdef CONFIG_IP_ROUTE_VERBOSE
1949 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1950 /*
1951 * RFC1812 recommendation, if source is martian,
1952 * the only hint is MAC header.
1953 */
673d57e7
HH
1954 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1955 &daddr, &saddr, dev->name);
98e399f8 1956 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1957 int i;
98e399f8 1958 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1959 printk(KERN_WARNING "ll header: ");
1960 for (i = 0; i < dev->hard_header_len; i++, p++) {
1961 printk("%02x", *p);
1962 if (i < (dev->hard_header_len - 1))
1963 printk(":");
1964 }
1965 printk("\n");
1966 }
1967 }
1968#endif
1969}
1970
47360228 1971/* called in rcu_read_lock() section */
5969f71d 1972static int __mkroute_input(struct sk_buff *skb,
982721f3 1973 const struct fib_result *res,
5969f71d
SH
1974 struct in_device *in_dev,
1975 __be32 daddr, __be32 saddr, u32 tos,
1976 struct rtable **result)
1da177e4 1977{
1da177e4
LT
1978 struct rtable *rth;
1979 int err;
1980 struct in_device *out_dev;
47360228 1981 unsigned int flags = 0;
d9c9df8c
AV
1982 __be32 spec_dst;
1983 u32 itag;
1da177e4
LT
1984
1985 /* get a working reference to the output device */
47360228 1986 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1987 if (out_dev == NULL) {
1988 if (net_ratelimit())
1989 printk(KERN_CRIT "Bug in ip_route_input" \
1990 "_slow(). Please, report\n");
1991 return -EINVAL;
1992 }
1993
1994
5c04c819
MS
1995 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1996 in_dev->dev, &spec_dst, &itag);
1da177e4 1997 if (err < 0) {
e905a9ed 1998 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1999 saddr);
e905a9ed 2000
1da177e4
LT
2001 goto cleanup;
2002 }
2003
2004 if (err)
2005 flags |= RTCF_DIRECTSRC;
2006
51b77cae 2007 if (out_dev == in_dev && err &&
1da177e4
LT
2008 (IN_DEV_SHARED_MEDIA(out_dev) ||
2009 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2010 flags |= RTCF_DOREDIRECT;
2011
2012 if (skb->protocol != htons(ETH_P_IP)) {
2013 /* Not IP (i.e. ARP). Do not create route, if it is
2014 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
2015 *
2016 * Proxy arp feature have been extended to allow, ARP
2017 * replies back to the same interface, to support
2018 * Private VLAN switch technologies. See arp.c.
1da177e4 2019 */
65324144
JDB
2020 if (out_dev == in_dev &&
2021 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2022 err = -EINVAL;
2023 goto cleanup;
2024 }
2025 }
2026
5c1e6aa3
DM
2027 rth = rt_dst_alloc(out_dev->dev,
2028 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2029 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2030 if (!rth) {
2031 err = -ENOBUFS;
2032 goto cleanup;
2033 }
2034
5e2b61f7 2035 rth->rt_key_dst = daddr;
5e2b61f7 2036 rth->rt_key_src = saddr;
cf911662
DM
2037 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2038 rth->rt_flags = flags;
2039 rth->rt_type = res->type;
475949d8 2040 rth->rt_key_tos = tos;
cf911662 2041 rth->rt_dst = daddr;
1da177e4 2042 rth->rt_src = saddr;
1b86a58f 2043 rth->rt_route_iif = in_dev->dev->ifindex;
5e2b61f7 2044 rth->rt_iif = in_dev->dev->ifindex;
5e2b61f7 2045 rth->rt_oif = 0;
cf911662
DM
2046 rth->rt_mark = skb->mark;
2047 rth->rt_gateway = daddr;
1da177e4 2048 rth->rt_spec_dst= spec_dst;
cf911662
DM
2049 rth->rt_peer_genid = 0;
2050 rth->peer = NULL;
2051 rth->fi = NULL;
1da177e4 2052
d8d1f30b
CG
2053 rth->dst.input = ip_forward;
2054 rth->dst.output = ip_output;
1da177e4 2055
5e2b61f7 2056 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4 2057
1da177e4
LT
2058 *result = rth;
2059 err = 0;
2060 cleanup:
1da177e4 2061 return err;
e905a9ed 2062}
1da177e4 2063
5969f71d
SH
2064static int ip_mkroute_input(struct sk_buff *skb,
2065 struct fib_result *res,
68a5e3dd 2066 const struct flowi4 *fl4,
5969f71d
SH
2067 struct in_device *in_dev,
2068 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2069{
7abaa27c 2070 struct rtable* rth = NULL;
1da177e4
LT
2071 int err;
2072 unsigned hash;
2073
2074#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 2075 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 2076 fib_select_multipath(res);
1da177e4
LT
2077#endif
2078
2079 /* create a routing cache entry */
2080 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2081 if (err)
2082 return err;
1da177e4
LT
2083
2084 /* put it into the cache */
68a5e3dd 2085 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
d8d1f30b 2086 rt_genid(dev_net(rth->dst.dev)));
68a5e3dd 2087 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
b23dd4fe
DM
2088 if (IS_ERR(rth))
2089 return PTR_ERR(rth);
2090 return 0;
1da177e4
LT
2091}
2092
1da177e4
LT
2093/*
2094 * NOTE. We drop all the packets that has local source
2095 * addresses, because every properly looped back packet
2096 * must have correct destination already attached by output routine.
2097 *
2098 * Such approach solves two big problems:
2099 * 1. Not simplex devices are handled properly.
2100 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2101 * called with rcu_read_lock()
1da177e4
LT
2102 */
2103
9e12bb22 2104static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2105 u8 tos, struct net_device *dev)
2106{
2107 struct fib_result res;
96d36220 2108 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 2109 struct flowi4 fl4;
1da177e4
LT
2110 unsigned flags = 0;
2111 u32 itag = 0;
2112 struct rtable * rth;
2113 unsigned hash;
9e12bb22 2114 __be32 spec_dst;
1da177e4 2115 int err = -EINVAL;
c346dca1 2116 struct net * net = dev_net(dev);
1da177e4
LT
2117
2118 /* IP on this device is disabled. */
2119
2120 if (!in_dev)
2121 goto out;
2122
2123 /* Check for the most weird martians, which can be not detected
2124 by fib_lookup.
2125 */
2126
1e637c74 2127 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2128 ipv4_is_loopback(saddr))
1da177e4
LT
2129 goto martian_source;
2130
27a954bd 2131 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2132 goto brd_input;
2133
2134 /* Accept zero addresses only to limited broadcast;
2135 * I even do not know to fix it or not. Waiting for complains :-)
2136 */
f97c1e0c 2137 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2138 goto martian_source;
2139
27a954bd 2140 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2141 goto martian_destination;
2142
2143 /*
2144 * Now we are ready to route packet.
2145 */
68a5e3dd
DM
2146 fl4.flowi4_oif = 0;
2147 fl4.flowi4_iif = dev->ifindex;
2148 fl4.flowi4_mark = skb->mark;
2149 fl4.flowi4_tos = tos;
2150 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2151 fl4.daddr = daddr;
2152 fl4.saddr = saddr;
2153 err = fib_lookup(net, &fl4, &res);
ebc0ffae 2154 if (err != 0) {
1da177e4 2155 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2156 goto e_hostunreach;
1da177e4
LT
2157 goto no_route;
2158 }
1da177e4
LT
2159
2160 RT_CACHE_STAT_INC(in_slow_tot);
2161
2162 if (res.type == RTN_BROADCAST)
2163 goto brd_input;
2164
2165 if (res.type == RTN_LOCAL) {
5c04c819 2166 err = fib_validate_source(skb, saddr, daddr, tos,
ebc0ffae 2167 net->loopback_dev->ifindex,
5c04c819 2168 dev, &spec_dst, &itag);
b5f7e755
ED
2169 if (err < 0)
2170 goto martian_source_keep_err;
2171 if (err)
1da177e4
LT
2172 flags |= RTCF_DIRECTSRC;
2173 spec_dst = daddr;
2174 goto local_input;
2175 }
2176
2177 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2178 goto e_hostunreach;
1da177e4
LT
2179 if (res.type != RTN_UNICAST)
2180 goto martian_destination;
2181
68a5e3dd 2182 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
2183out: return err;
2184
2185brd_input:
2186 if (skb->protocol != htons(ETH_P_IP))
2187 goto e_inval;
2188
f97c1e0c 2189 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2190 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2191 else {
5c04c819
MS
2192 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2193 &itag);
1da177e4 2194 if (err < 0)
b5f7e755 2195 goto martian_source_keep_err;
1da177e4
LT
2196 if (err)
2197 flags |= RTCF_DIRECTSRC;
2198 }
2199 flags |= RTCF_BROADCAST;
2200 res.type = RTN_BROADCAST;
2201 RT_CACHE_STAT_INC(in_brd);
2202
2203local_input:
5c1e6aa3
DM
2204 rth = rt_dst_alloc(net->loopback_dev,
2205 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2206 if (!rth)
2207 goto e_nobufs;
2208
cf911662 2209 rth->dst.input= ip_local_deliver;
d8d1f30b 2210 rth->dst.output= ip_rt_bug;
cf911662
DM
2211#ifdef CONFIG_IP_ROUTE_CLASSID
2212 rth->dst.tclassid = itag;
2213#endif
1da177e4 2214
5e2b61f7 2215 rth->rt_key_dst = daddr;
5e2b61f7 2216 rth->rt_key_src = saddr;
cf911662
DM
2217 rth->rt_genid = rt_genid(net);
2218 rth->rt_flags = flags|RTCF_LOCAL;
2219 rth->rt_type = res.type;
475949d8 2220 rth->rt_key_tos = tos;
cf911662 2221 rth->rt_dst = daddr;
1da177e4 2222 rth->rt_src = saddr;
c7066f70 2223#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2224 rth->dst.tclassid = itag;
1da177e4 2225#endif
1b86a58f 2226 rth->rt_route_iif = dev->ifindex;
5e2b61f7 2227 rth->rt_iif = dev->ifindex;
cf911662
DM
2228 rth->rt_oif = 0;
2229 rth->rt_mark = skb->mark;
1da177e4
LT
2230 rth->rt_gateway = daddr;
2231 rth->rt_spec_dst= spec_dst;
cf911662
DM
2232 rth->rt_peer_genid = 0;
2233 rth->peer = NULL;
2234 rth->fi = NULL;
1da177e4 2235 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2236 rth->dst.input= ip_error;
2237 rth->dst.error= -err;
1da177e4
LT
2238 rth->rt_flags &= ~RTCF_LOCAL;
2239 }
68a5e3dd
DM
2240 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2241 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
b23dd4fe
DM
2242 err = 0;
2243 if (IS_ERR(rth))
2244 err = PTR_ERR(rth);
ebc0ffae 2245 goto out;
1da177e4
LT
2246
2247no_route:
2248 RT_CACHE_STAT_INC(in_no_route);
2249 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2250 res.type = RTN_UNREACHABLE;
7f53878d
MC
2251 if (err == -ESRCH)
2252 err = -ENETUNREACH;
1da177e4
LT
2253 goto local_input;
2254
2255 /*
2256 * Do not cache martian addresses: they should be logged (RFC1812)
2257 */
2258martian_destination:
2259 RT_CACHE_STAT_INC(in_martian_dst);
2260#ifdef CONFIG_IP_ROUTE_VERBOSE
2261 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2262 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2263 &daddr, &saddr, dev->name);
1da177e4 2264#endif
2c2910a4
DE
2265
2266e_hostunreach:
e905a9ed 2267 err = -EHOSTUNREACH;
ebc0ffae 2268 goto out;
2c2910a4 2269
1da177e4
LT
2270e_inval:
2271 err = -EINVAL;
ebc0ffae 2272 goto out;
1da177e4
LT
2273
2274e_nobufs:
2275 err = -ENOBUFS;
ebc0ffae 2276 goto out;
1da177e4
LT
2277
2278martian_source:
b5f7e755
ED
2279 err = -EINVAL;
2280martian_source_keep_err:
1da177e4 2281 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2282 goto out;
1da177e4
LT
2283}
2284
407eadd9
ED
2285int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2286 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2287{
2288 struct rtable * rth;
2289 unsigned hash;
2290 int iif = dev->ifindex;
b5921910 2291 struct net *net;
96d36220 2292 int res;
1da177e4 2293
c346dca1 2294 net = dev_net(dev);
1080d709 2295
96d36220
ED
2296 rcu_read_lock();
2297
1080d709
NH
2298 if (!rt_caching(net))
2299 goto skip_cache;
2300
1da177e4 2301 tos &= IPTOS_RT_MASK;
e84f84f2 2302 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2303
1da177e4 2304 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2305 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2306 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2307 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2308 (rth->rt_iif ^ iif) |
2309 rth->rt_oif |
475949d8 2310 (rth->rt_key_tos ^ tos)) == 0 &&
5e2b61f7 2311 rth->rt_mark == skb->mark &&
d8d1f30b 2312 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2313 !rt_is_expired(rth)) {
407eadd9 2314 if (noref) {
d8d1f30b
CG
2315 dst_use_noref(&rth->dst, jiffies);
2316 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2317 } else {
d8d1f30b
CG
2318 dst_use(&rth->dst, jiffies);
2319 skb_dst_set(skb, &rth->dst);
407eadd9 2320 }
1da177e4
LT
2321 RT_CACHE_STAT_INC(in_hit);
2322 rcu_read_unlock();
1da177e4
LT
2323 return 0;
2324 }
2325 RT_CACHE_STAT_INC(in_hlist_search);
2326 }
1da177e4 2327
1080d709 2328skip_cache:
1da177e4
LT
2329 /* Multicast recognition logic is moved from route cache to here.
2330 The problem was that too many Ethernet cards have broken/missing
2331 hardware multicast filters :-( As result the host on multicasting
2332 network acquires a lot of useless route cache entries, sort of
2333 SDR messages from all the world. Now we try to get rid of them.
2334 Really, provided software IP multicast filter is organized
2335 reasonably (at least, hashed), it does not result in a slowdown
2336 comparing with route cache reject entries.
2337 Note, that multicast routers are not affected, because
2338 route cache entry is created eventually.
2339 */
f97c1e0c 2340 if (ipv4_is_multicast(daddr)) {
96d36220 2341 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2342
96d36220 2343 if (in_dev) {
dbdd9a52
DM
2344 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2345 ip_hdr(skb)->protocol);
1da177e4
LT
2346 if (our
2347#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2348 ||
2349 (!ipv4_is_local_multicast(daddr) &&
2350 IN_DEV_MFORWARD(in_dev))
1da177e4 2351#endif
9d4fb27d 2352 ) {
96d36220
ED
2353 int res = ip_route_input_mc(skb, daddr, saddr,
2354 tos, dev, our);
1da177e4 2355 rcu_read_unlock();
96d36220 2356 return res;
1da177e4
LT
2357 }
2358 }
2359 rcu_read_unlock();
2360 return -EINVAL;
2361 }
96d36220
ED
2362 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2363 rcu_read_unlock();
2364 return res;
1da177e4 2365}
407eadd9 2366EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2367
ebc0ffae 2368/* called with rcu_read_lock() */
982721f3 2369static struct rtable *__mkroute_output(const struct fib_result *res,
68a5e3dd 2370 const struct flowi4 *fl4,
813b3b5d
DM
2371 __be32 orig_daddr, __be32 orig_saddr,
2372 int orig_oif, struct net_device *dev_out,
5ada5527 2373 unsigned int flags)
1da177e4 2374{
982721f3 2375 struct fib_info *fi = res->fi;
813b3b5d 2376 u32 tos = RT_FL_TOS(fl4);
5ada5527 2377 struct in_device *in_dev;
982721f3 2378 u16 type = res->type;
5ada5527 2379 struct rtable *rth;
1da177e4 2380
68a5e3dd 2381 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2382 return ERR_PTR(-EINVAL);
1da177e4 2383
68a5e3dd 2384 if (ipv4_is_lbcast(fl4->daddr))
982721f3 2385 type = RTN_BROADCAST;
68a5e3dd 2386 else if (ipv4_is_multicast(fl4->daddr))
982721f3 2387 type = RTN_MULTICAST;
68a5e3dd 2388 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 2389 return ERR_PTR(-EINVAL);
1da177e4
LT
2390
2391 if (dev_out->flags & IFF_LOOPBACK)
2392 flags |= RTCF_LOCAL;
2393
dd28d1a0 2394 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2395 if (!in_dev)
5ada5527 2396 return ERR_PTR(-EINVAL);
ebc0ffae 2397
982721f3 2398 if (type == RTN_BROADCAST) {
1da177e4 2399 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2400 fi = NULL;
2401 } else if (type == RTN_MULTICAST) {
dd28d1a0 2402 flags |= RTCF_MULTICAST | RTCF_LOCAL;
813b3b5d
DM
2403 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2404 fl4->flowi4_proto))
1da177e4
LT
2405 flags &= ~RTCF_LOCAL;
2406 /* If multicast route do not exist use
dd28d1a0
ED
2407 * default one, but do not gateway in this case.
2408 * Yes, it is hack.
1da177e4 2409 */
982721f3
DM
2410 if (fi && res->prefixlen < 4)
2411 fi = NULL;
1da177e4
LT
2412 }
2413
5c1e6aa3
DM
2414 rth = rt_dst_alloc(dev_out,
2415 IN_DEV_CONF_GET(in_dev, NOPOLICY),
0c4dcd58 2416 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2417 if (!rth)
5ada5527 2418 return ERR_PTR(-ENOBUFS);
8391d07b 2419
cf911662
DM
2420 rth->dst.output = ip_output;
2421
813b3b5d
DM
2422 rth->rt_key_dst = orig_daddr;
2423 rth->rt_key_src = orig_saddr;
cf911662
DM
2424 rth->rt_genid = rt_genid(dev_net(dev_out));
2425 rth->rt_flags = flags;
2426 rth->rt_type = type;
475949d8 2427 rth->rt_key_tos = tos;
68a5e3dd
DM
2428 rth->rt_dst = fl4->daddr;
2429 rth->rt_src = fl4->saddr;
1b86a58f 2430 rth->rt_route_iif = 0;
813b3b5d
DM
2431 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2432 rth->rt_oif = orig_oif;
2433 rth->rt_mark = fl4->flowi4_mark;
68a5e3dd
DM
2434 rth->rt_gateway = fl4->daddr;
2435 rth->rt_spec_dst= fl4->saddr;
cf911662
DM
2436 rth->rt_peer_genid = 0;
2437 rth->peer = NULL;
2438 rth->fi = NULL;
1da177e4
LT
2439
2440 RT_CACHE_STAT_INC(out_slow_tot);
2441
2442 if (flags & RTCF_LOCAL) {
d8d1f30b 2443 rth->dst.input = ip_local_deliver;
68a5e3dd 2444 rth->rt_spec_dst = fl4->daddr;
1da177e4
LT
2445 }
2446 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
68a5e3dd 2447 rth->rt_spec_dst = fl4->saddr;
e905a9ed 2448 if (flags & RTCF_LOCAL &&
1da177e4 2449 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2450 rth->dst.output = ip_mc_output;
1da177e4
LT
2451 RT_CACHE_STAT_INC(out_slow_mc);
2452 }
2453#ifdef CONFIG_IP_MROUTE
982721f3 2454 if (type == RTN_MULTICAST) {
1da177e4 2455 if (IN_DEV_MFORWARD(in_dev) &&
813b3b5d 2456 !ipv4_is_local_multicast(fl4->daddr)) {
d8d1f30b
CG
2457 rth->dst.input = ip_mr_input;
2458 rth->dst.output = ip_mc_output;
1da177e4
LT
2459 }
2460 }
2461#endif
2462 }
2463
813b3b5d 2464 rt_set_nexthop(rth, fl4, res, fi, type, 0);
1da177e4 2465
5ada5527 2466 return rth;
1da177e4
LT
2467}
2468
1da177e4
LT
2469/*
2470 * Major route resolver routine.
0197aa38 2471 * called with rcu_read_lock();
1da177e4
LT
2472 */
2473
813b3b5d 2474static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
1da177e4 2475{
1da177e4 2476 struct net_device *dev_out = NULL;
813b3b5d
DM
2477 u32 tos = RT_FL_TOS(fl4);
2478 unsigned int flags = 0;
2479 struct fib_result res;
5ada5527 2480 struct rtable *rth;
813b3b5d
DM
2481 __be32 orig_daddr;
2482 __be32 orig_saddr;
2483 int orig_oif;
1da177e4
LT
2484
2485 res.fi = NULL;
2486#ifdef CONFIG_IP_MULTIPLE_TABLES
2487 res.r = NULL;
2488#endif
2489
813b3b5d
DM
2490 orig_daddr = fl4->daddr;
2491 orig_saddr = fl4->saddr;
2492 orig_oif = fl4->flowi4_oif;
2493
2494 fl4->flowi4_iif = net->loopback_dev->ifindex;
2495 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2496 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2497 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
44713b67 2498
010c2708 2499 rcu_read_lock();
813b3b5d 2500 if (fl4->saddr) {
b23dd4fe 2501 rth = ERR_PTR(-EINVAL);
813b3b5d
DM
2502 if (ipv4_is_multicast(fl4->saddr) ||
2503 ipv4_is_lbcast(fl4->saddr) ||
2504 ipv4_is_zeronet(fl4->saddr))
1da177e4
LT
2505 goto out;
2506
1da177e4
LT
2507 /* I removed check for oif == dev_out->oif here.
2508 It was wrong for two reasons:
1ab35276
DL
2509 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2510 is assigned to multiple interfaces.
1da177e4
LT
2511 2. Moreover, we are allowed to send packets with saddr
2512 of another iface. --ANK
2513 */
2514
813b3b5d
DM
2515 if (fl4->flowi4_oif == 0 &&
2516 (ipv4_is_multicast(fl4->daddr) ||
2517 ipv4_is_lbcast(fl4->daddr))) {
a210d01a 2518 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 2519 dev_out = __ip_dev_find(net, fl4->saddr, false);
a210d01a
JA
2520 if (dev_out == NULL)
2521 goto out;
2522
1da177e4
LT
2523 /* Special hack: user can direct multicasts
2524 and limited broadcast via necessary interface
2525 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2526 This hack is not just for fun, it allows
2527 vic,vat and friends to work.
2528 They bind socket to loopback, set ttl to zero
2529 and expect that it will work.
2530 From the viewpoint of routing cache they are broken,
2531 because we are not allowed to build multicast path
2532 with loopback source addr (look, routing cache
2533 cannot know, that ttl is zero, so that packet
2534 will not leave this host and route is valid).
2535 Luckily, this hack is good workaround.
2536 */
2537
813b3b5d 2538 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2539 goto make_route;
2540 }
a210d01a 2541
813b3b5d 2542 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 2543 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 2544 if (!__ip_dev_find(net, fl4->saddr, false))
a210d01a 2545 goto out;
a210d01a 2546 }
1da177e4
LT
2547 }
2548
2549
813b3b5d
DM
2550 if (fl4->flowi4_oif) {
2551 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
b23dd4fe 2552 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2553 if (dev_out == NULL)
2554 goto out;
e5ed6399
HX
2555
2556 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2557 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2558 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2559 goto out;
2560 }
813b3b5d
DM
2561 if (ipv4_is_local_multicast(fl4->daddr) ||
2562 ipv4_is_lbcast(fl4->daddr)) {
2563 if (!fl4->saddr)
2564 fl4->saddr = inet_select_addr(dev_out, 0,
2565 RT_SCOPE_LINK);
1da177e4
LT
2566 goto make_route;
2567 }
813b3b5d
DM
2568 if (fl4->saddr) {
2569 if (ipv4_is_multicast(fl4->daddr))
2570 fl4->saddr = inet_select_addr(dev_out, 0,
2571 fl4->flowi4_scope);
2572 else if (!fl4->daddr)
2573 fl4->saddr = inet_select_addr(dev_out, 0,
2574 RT_SCOPE_HOST);
1da177e4
LT
2575 }
2576 }
2577
813b3b5d
DM
2578 if (!fl4->daddr) {
2579 fl4->daddr = fl4->saddr;
2580 if (!fl4->daddr)
2581 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
b40afd0e 2582 dev_out = net->loopback_dev;
813b3b5d 2583 fl4->flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
2584 res.type = RTN_LOCAL;
2585 flags |= RTCF_LOCAL;
2586 goto make_route;
2587 }
2588
813b3b5d 2589 if (fib_lookup(net, fl4, &res)) {
1da177e4 2590 res.fi = NULL;
813b3b5d 2591 if (fl4->flowi4_oif) {
1da177e4
LT
2592 /* Apparently, routing tables are wrong. Assume,
2593 that the destination is on link.
2594
2595 WHY? DW.
2596 Because we are allowed to send to iface
2597 even if it has NO routes and NO assigned
2598 addresses. When oif is specified, routing
2599 tables are looked up with only one purpose:
2600 to catch if destination is gatewayed, rather than
2601 direct. Moreover, if MSG_DONTROUTE is set,
2602 we send packet, ignoring both routing tables
2603 and ifaddr state. --ANK
2604
2605
2606 We could make it even if oif is unknown,
2607 likely IPv6, but we do not.
2608 */
2609
813b3b5d
DM
2610 if (fl4->saddr == 0)
2611 fl4->saddr = inet_select_addr(dev_out, 0,
2612 RT_SCOPE_LINK);
1da177e4
LT
2613 res.type = RTN_UNICAST;
2614 goto make_route;
2615 }
b23dd4fe 2616 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2617 goto out;
2618 }
1da177e4
LT
2619
2620 if (res.type == RTN_LOCAL) {
813b3b5d 2621 if (!fl4->saddr) {
9fc3bbb4 2622 if (res.fi->fib_prefsrc)
813b3b5d 2623 fl4->saddr = res.fi->fib_prefsrc;
9fc3bbb4 2624 else
813b3b5d 2625 fl4->saddr = fl4->daddr;
9fc3bbb4 2626 }
b40afd0e 2627 dev_out = net->loopback_dev;
813b3b5d 2628 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2629 res.fi = NULL;
2630 flags |= RTCF_LOCAL;
2631 goto make_route;
2632 }
2633
2634#ifdef CONFIG_IP_ROUTE_MULTIPATH
813b3b5d 2635 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
1b7fe593 2636 fib_select_multipath(&res);
1da177e4
LT
2637 else
2638#endif
21d8c49e
DM
2639 if (!res.prefixlen &&
2640 res.table->tb_num_default > 1 &&
813b3b5d 2641 res.type == RTN_UNICAST && !fl4->flowi4_oif)
0c838ff1 2642 fib_select_default(&res);
1da177e4 2643
813b3b5d
DM
2644 if (!fl4->saddr)
2645 fl4->saddr = FIB_RES_PREFSRC(net, res);
1da177e4 2646
1da177e4 2647 dev_out = FIB_RES_DEV(res);
813b3b5d 2648 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
2649
2650
2651make_route:
813b3b5d
DM
2652 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2653 dev_out, flags);
b23dd4fe 2654 if (!IS_ERR(rth)) {
5ada5527
DM
2655 unsigned int hash;
2656
813b3b5d 2657 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
5ada5527 2658 rt_genid(dev_net(dev_out)));
813b3b5d 2659 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
5ada5527 2660 }
1da177e4 2661
010c2708
DM
2662out:
2663 rcu_read_unlock();
b23dd4fe 2664 return rth;
1da177e4
LT
2665}
2666
813b3b5d 2667struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
1da177e4 2668{
1da177e4 2669 struct rtable *rth;
010c2708 2670 unsigned int hash;
1da177e4 2671
1080d709
NH
2672 if (!rt_caching(net))
2673 goto slow_output;
2674
9d6ec938 2675 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
1da177e4
LT
2676
2677 rcu_read_lock_bh();
a898def2 2678 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2679 rth = rcu_dereference_bh(rth->dst.rt_next)) {
9d6ec938
DM
2680 if (rth->rt_key_dst == flp4->daddr &&
2681 rth->rt_key_src == flp4->saddr &&
c7537967 2682 rt_is_output_route(rth) &&
9d6ec938
DM
2683 rth->rt_oif == flp4->flowi4_oif &&
2684 rth->rt_mark == flp4->flowi4_mark &&
475949d8 2685 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
b5921910 2686 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2687 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2688 !rt_is_expired(rth)) {
d8d1f30b 2689 dst_use(&rth->dst, jiffies);
1da177e4
LT
2690 RT_CACHE_STAT_INC(out_hit);
2691 rcu_read_unlock_bh();
56157872
DM
2692 if (!flp4->saddr)
2693 flp4->saddr = rth->rt_src;
2694 if (!flp4->daddr)
2695 flp4->daddr = rth->rt_dst;
b23dd4fe 2696 return rth;
1da177e4
LT
2697 }
2698 RT_CACHE_STAT_INC(out_hlist_search);
2699 }
2700 rcu_read_unlock_bh();
2701
1080d709 2702slow_output:
9d6ec938 2703 return ip_route_output_slow(net, flp4);
1da177e4 2704}
d8c97a94
ACM
2705EXPORT_SYMBOL_GPL(__ip_route_output_key);
2706
ae2688d5
JW
2707static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2708{
2709 return NULL;
2710}
2711
ec831ea7
RD
2712static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2713{
2714 return 0;
2715}
2716
14e50e57
DM
2717static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2718{
2719}
2720
0972ddb2
HB
2721static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2722 unsigned long old)
2723{
2724 return NULL;
2725}
2726
14e50e57
DM
2727static struct dst_ops ipv4_dst_blackhole_ops = {
2728 .family = AF_INET,
09640e63 2729 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2730 .destroy = ipv4_dst_destroy,
ae2688d5 2731 .check = ipv4_blackhole_dst_check,
ec831ea7 2732 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2733 .default_advmss = ipv4_default_advmss,
14e50e57 2734 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
0972ddb2 2735 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
14e50e57
DM
2736};
2737
2774c131 2738struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2739{
5c1e6aa3 2740 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2774c131 2741 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2742
2743 if (rt) {
d8d1f30b 2744 struct dst_entry *new = &rt->dst;
14e50e57 2745
14e50e57 2746 new->__use = 1;
352e512c
HX
2747 new->input = dst_discard;
2748 new->output = dst_discard;
defb3519 2749 dst_copy_metrics(new, &ort->dst);
14e50e57 2750
d8d1f30b 2751 new->dev = ort->dst.dev;
14e50e57
DM
2752 if (new->dev)
2753 dev_hold(new->dev);
2754
5e2b61f7
DM
2755 rt->rt_key_dst = ort->rt_key_dst;
2756 rt->rt_key_src = ort->rt_key_src;
475949d8 2757 rt->rt_key_tos = ort->rt_key_tos;
1b86a58f 2758 rt->rt_route_iif = ort->rt_route_iif;
5e2b61f7
DM
2759 rt->rt_iif = ort->rt_iif;
2760 rt->rt_oif = ort->rt_oif;
2761 rt->rt_mark = ort->rt_mark;
14e50e57 2762
e84f84f2 2763 rt->rt_genid = rt_genid(net);
14e50e57
DM
2764 rt->rt_flags = ort->rt_flags;
2765 rt->rt_type = ort->rt_type;
2766 rt->rt_dst = ort->rt_dst;
2767 rt->rt_src = ort->rt_src;
14e50e57
DM
2768 rt->rt_gateway = ort->rt_gateway;
2769 rt->rt_spec_dst = ort->rt_spec_dst;
2770 rt->peer = ort->peer;
2771 if (rt->peer)
2772 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2773 rt->fi = ort->fi;
2774 if (rt->fi)
2775 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2776
2777 dst_free(new);
2778 }
2779
2774c131
DM
2780 dst_release(dst_orig);
2781
2782 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2783}
2784
9d6ec938 2785struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2786 struct sock *sk)
1da177e4 2787{
9d6ec938 2788 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2789
b23dd4fe
DM
2790 if (IS_ERR(rt))
2791 return rt;
1da177e4 2792
56157872 2793 if (flp4->flowi4_proto)
9d6ec938
DM
2794 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2795 flowi4_to_flowi(flp4),
2796 sk, 0);
1da177e4 2797
b23dd4fe 2798 return rt;
1da177e4 2799}
d8c97a94
ACM
2800EXPORT_SYMBOL_GPL(ip_route_output_flow);
2801
4feb88e5
BT
2802static int rt_fill_info(struct net *net,
2803 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2804 int nowait, unsigned int flags)
1da177e4 2805{
511c3f92 2806 struct rtable *rt = skb_rtable(skb);
1da177e4 2807 struct rtmsg *r;
be403ea1 2808 struct nlmsghdr *nlh;
fe6fe792
ED
2809 long expires = 0;
2810 const struct inet_peer *peer = rt->peer;
e3703b3d 2811 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2812
2813 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2814 if (nlh == NULL)
26932566 2815 return -EMSGSIZE;
be403ea1
TG
2816
2817 r = nlmsg_data(nlh);
1da177e4
LT
2818 r->rtm_family = AF_INET;
2819 r->rtm_dst_len = 32;
2820 r->rtm_src_len = 0;
475949d8 2821 r->rtm_tos = rt->rt_key_tos;
1da177e4 2822 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2823 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2824 r->rtm_type = rt->rt_type;
2825 r->rtm_scope = RT_SCOPE_UNIVERSE;
2826 r->rtm_protocol = RTPROT_UNSPEC;
2827 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2828 if (rt->rt_flags & RTCF_NOTIFY)
2829 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2830
17fb2c64 2831 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2832
5e2b61f7 2833 if (rt->rt_key_src) {
1da177e4 2834 r->rtm_src_len = 32;
5e2b61f7 2835 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
1da177e4 2836 }
d8d1f30b
CG
2837 if (rt->dst.dev)
2838 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2839#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2840 if (rt->dst.tclassid)
2841 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2842#endif
c7537967 2843 if (rt_is_input_route(rt))
17fb2c64 2844 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
5e2b61f7 2845 else if (rt->rt_src != rt->rt_key_src)
17fb2c64 2846 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2847
1da177e4 2848 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2849 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2850
defb3519 2851 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2852 goto nla_put_failure;
2853
5e2b61f7
DM
2854 if (rt->rt_mark)
2855 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
963bfeee 2856
d8d1f30b 2857 error = rt->dst.error;
fe6fe792 2858 if (peer) {
317fe0e6 2859 inet_peer_refcheck(rt->peer);
fe6fe792
ED
2860 id = atomic_read(&peer->ip_id_count) & 0xffff;
2861 if (peer->tcp_ts_stamp) {
2862 ts = peer->tcp_ts;
2863 tsage = get_seconds() - peer->tcp_ts_stamp;
1da177e4 2864 }
fe6fe792
ED
2865 expires = ACCESS_ONCE(peer->pmtu_expires);
2866 if (expires)
2867 expires -= jiffies;
1da177e4 2868 }
be403ea1 2869
c7537967 2870 if (rt_is_input_route(rt)) {
1da177e4 2871#ifdef CONFIG_IP_MROUTE
e448515c 2872 __be32 dst = rt->rt_dst;
1da177e4 2873
f97c1e0c 2874 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5 2875 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
9a1b9496
DM
2876 int err = ipmr_get_route(net, skb,
2877 rt->rt_src, rt->rt_dst,
2878 r, nowait);
1da177e4
LT
2879 if (err <= 0) {
2880 if (!nowait) {
2881 if (err == 0)
2882 return 0;
be403ea1 2883 goto nla_put_failure;
1da177e4
LT
2884 } else {
2885 if (err == -EMSGSIZE)
be403ea1 2886 goto nla_put_failure;
e3703b3d 2887 error = err;
1da177e4
LT
2888 }
2889 }
2890 } else
2891#endif
5e2b61f7 2892 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
1da177e4
LT
2893 }
2894
d8d1f30b 2895 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2896 expires, error) < 0)
2897 goto nla_put_failure;
be403ea1
TG
2898
2899 return nlmsg_end(skb, nlh);
1da177e4 2900
be403ea1 2901nla_put_failure:
26932566
PM
2902 nlmsg_cancel(skb, nlh);
2903 return -EMSGSIZE;
1da177e4
LT
2904}
2905
63f3444f 2906static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2907{
3b1e0a65 2908 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2909 struct rtmsg *rtm;
2910 struct nlattr *tb[RTA_MAX+1];
1da177e4 2911 struct rtable *rt = NULL;
9e12bb22
AV
2912 __be32 dst = 0;
2913 __be32 src = 0;
2914 u32 iif;
d889ce3b 2915 int err;
963bfeee 2916 int mark;
1da177e4
LT
2917 struct sk_buff *skb;
2918
d889ce3b
TG
2919 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2920 if (err < 0)
2921 goto errout;
2922
2923 rtm = nlmsg_data(nlh);
2924
1da177e4 2925 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2926 if (skb == NULL) {
2927 err = -ENOBUFS;
2928 goto errout;
2929 }
1da177e4
LT
2930
2931 /* Reserve room for dummy headers, this skb can pass
2932 through good chunk of routing engine.
2933 */
459a98ed 2934 skb_reset_mac_header(skb);
c1d2bbe1 2935 skb_reset_network_header(skb);
d2c962b8
SH
2936
2937 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2938 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2939 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2940
17fb2c64
AV
2941 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2942 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2943 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2944 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2945
2946 if (iif) {
d889ce3b
TG
2947 struct net_device *dev;
2948
1937504d 2949 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2950 if (dev == NULL) {
2951 err = -ENODEV;
2952 goto errout_free;
2953 }
2954
1da177e4
LT
2955 skb->protocol = htons(ETH_P_IP);
2956 skb->dev = dev;
963bfeee 2957 skb->mark = mark;
1da177e4
LT
2958 local_bh_disable();
2959 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2960 local_bh_enable();
d889ce3b 2961
511c3f92 2962 rt = skb_rtable(skb);
d8d1f30b
CG
2963 if (err == 0 && rt->dst.error)
2964 err = -rt->dst.error;
1da177e4 2965 } else {
68a5e3dd
DM
2966 struct flowi4 fl4 = {
2967 .daddr = dst,
2968 .saddr = src,
2969 .flowi4_tos = rtm->rtm_tos,
2970 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2971 .flowi4_mark = mark,
d889ce3b 2972 };
9d6ec938 2973 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
2974
2975 err = 0;
2976 if (IS_ERR(rt))
2977 err = PTR_ERR(rt);
1da177e4 2978 }
d889ce3b 2979
1da177e4 2980 if (err)
d889ce3b 2981 goto errout_free;
1da177e4 2982
d8d1f30b 2983 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2984 if (rtm->rtm_flags & RTM_F_NOTIFY)
2985 rt->rt_flags |= RTCF_NOTIFY;
2986
4feb88e5 2987 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2988 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2989 if (err <= 0)
2990 goto errout_free;
1da177e4 2991
1937504d 2992 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2993errout:
2942e900 2994 return err;
1da177e4 2995
d889ce3b 2996errout_free:
1da177e4 2997 kfree_skb(skb);
d889ce3b 2998 goto errout;
1da177e4
LT
2999}
3000
3001int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3002{
3003 struct rtable *rt;
3004 int h, s_h;
3005 int idx, s_idx;
1937504d
DL
3006 struct net *net;
3007
3b1e0a65 3008 net = sock_net(skb->sk);
1da177e4
LT
3009
3010 s_h = cb->args[0];
d8c92830
ED
3011 if (s_h < 0)
3012 s_h = 0;
1da177e4 3013 s_idx = idx = cb->args[1];
a6272665
ED
3014 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
3015 if (!rt_hash_table[h].chain)
3016 continue;
1da177e4 3017 rcu_read_lock_bh();
a898def2 3018 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
3019 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3020 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 3021 continue;
e84f84f2 3022 if (rt_is_expired(rt))
29e75252 3023 continue;
d8d1f30b 3024 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 3025 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 3026 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 3027 1, NLM_F_MULTI) <= 0) {
adf30907 3028 skb_dst_drop(skb);
1da177e4
LT
3029 rcu_read_unlock_bh();
3030 goto done;
3031 }
adf30907 3032 skb_dst_drop(skb);
1da177e4
LT
3033 }
3034 rcu_read_unlock_bh();
3035 }
3036
3037done:
3038 cb->args[0] = h;
3039 cb->args[1] = idx;
3040 return skb->len;
3041}
3042
3043void ip_rt_multicast_event(struct in_device *in_dev)
3044{
76e6ebfb 3045 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3046}
3047
3048#ifdef CONFIG_SYSCTL
81c684d1 3049static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3050 void __user *buffer,
1da177e4
LT
3051 size_t *lenp, loff_t *ppos)
3052{
3053 if (write) {
639e104f 3054 int flush_delay;
81c684d1 3055 ctl_table ctl;
39a23e75 3056 struct net *net;
639e104f 3057
81c684d1
DL
3058 memcpy(&ctl, __ctl, sizeof(ctl));
3059 ctl.data = &flush_delay;
8d65af78 3060 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3061
81c684d1 3062 net = (struct net *)__ctl->extra1;
39a23e75 3063 rt_cache_flush(net, flush_delay);
1da177e4 3064 return 0;
e905a9ed 3065 }
1da177e4
LT
3066
3067 return -EINVAL;
3068}
3069
eeb61f71 3070static ctl_table ipv4_route_table[] = {
1da177e4 3071 {
1da177e4
LT
3072 .procname = "gc_thresh",
3073 .data = &ipv4_dst_ops.gc_thresh,
3074 .maxlen = sizeof(int),
3075 .mode = 0644,
6d9f239a 3076 .proc_handler = proc_dointvec,
1da177e4
LT
3077 },
3078 {
1da177e4
LT
3079 .procname = "max_size",
3080 .data = &ip_rt_max_size,
3081 .maxlen = sizeof(int),
3082 .mode = 0644,
6d9f239a 3083 .proc_handler = proc_dointvec,
1da177e4
LT
3084 },
3085 {
3086 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3087
1da177e4
LT
3088 .procname = "gc_min_interval",
3089 .data = &ip_rt_gc_min_interval,
3090 .maxlen = sizeof(int),
3091 .mode = 0644,
6d9f239a 3092 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3093 },
3094 {
1da177e4
LT
3095 .procname = "gc_min_interval_ms",
3096 .data = &ip_rt_gc_min_interval,
3097 .maxlen = sizeof(int),
3098 .mode = 0644,
6d9f239a 3099 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3100 },
3101 {
1da177e4
LT
3102 .procname = "gc_timeout",
3103 .data = &ip_rt_gc_timeout,
3104 .maxlen = sizeof(int),
3105 .mode = 0644,
6d9f239a 3106 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3107 },
3108 {
1da177e4
LT
3109 .procname = "gc_interval",
3110 .data = &ip_rt_gc_interval,
3111 .maxlen = sizeof(int),
3112 .mode = 0644,
6d9f239a 3113 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3114 },
3115 {
1da177e4
LT
3116 .procname = "redirect_load",
3117 .data = &ip_rt_redirect_load,
3118 .maxlen = sizeof(int),
3119 .mode = 0644,
6d9f239a 3120 .proc_handler = proc_dointvec,
1da177e4
LT
3121 },
3122 {
1da177e4
LT
3123 .procname = "redirect_number",
3124 .data = &ip_rt_redirect_number,
3125 .maxlen = sizeof(int),
3126 .mode = 0644,
6d9f239a 3127 .proc_handler = proc_dointvec,
1da177e4
LT
3128 },
3129 {
1da177e4
LT
3130 .procname = "redirect_silence",
3131 .data = &ip_rt_redirect_silence,
3132 .maxlen = sizeof(int),
3133 .mode = 0644,
6d9f239a 3134 .proc_handler = proc_dointvec,
1da177e4
LT
3135 },
3136 {
1da177e4
LT
3137 .procname = "error_cost",
3138 .data = &ip_rt_error_cost,
3139 .maxlen = sizeof(int),
3140 .mode = 0644,
6d9f239a 3141 .proc_handler = proc_dointvec,
1da177e4
LT
3142 },
3143 {
1da177e4
LT
3144 .procname = "error_burst",
3145 .data = &ip_rt_error_burst,
3146 .maxlen = sizeof(int),
3147 .mode = 0644,
6d9f239a 3148 .proc_handler = proc_dointvec,
1da177e4
LT
3149 },
3150 {
1da177e4
LT
3151 .procname = "gc_elasticity",
3152 .data = &ip_rt_gc_elasticity,
3153 .maxlen = sizeof(int),
3154 .mode = 0644,
6d9f239a 3155 .proc_handler = proc_dointvec,
1da177e4
LT
3156 },
3157 {
1da177e4
LT
3158 .procname = "mtu_expires",
3159 .data = &ip_rt_mtu_expires,
3160 .maxlen = sizeof(int),
3161 .mode = 0644,
6d9f239a 3162 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3163 },
3164 {
1da177e4
LT
3165 .procname = "min_pmtu",
3166 .data = &ip_rt_min_pmtu,
3167 .maxlen = sizeof(int),
3168 .mode = 0644,
6d9f239a 3169 .proc_handler = proc_dointvec,
1da177e4
LT
3170 },
3171 {
1da177e4
LT
3172 .procname = "min_adv_mss",
3173 .data = &ip_rt_min_advmss,
3174 .maxlen = sizeof(int),
3175 .mode = 0644,
6d9f239a 3176 .proc_handler = proc_dointvec,
1da177e4 3177 },
f8572d8f 3178 { }
1da177e4 3179};
39a23e75 3180
2f4520d3
AV
3181static struct ctl_table empty[1];
3182
3183static struct ctl_table ipv4_skeleton[] =
3184{
f8572d8f 3185 { .procname = "route",
d994af0d 3186 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3187 { .procname = "neigh",
d994af0d 3188 .mode = 0555, .child = empty},
2f4520d3
AV
3189 { }
3190};
3191
3192static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3193 { .procname = "net", },
3194 { .procname = "ipv4", },
39a23e75
DL
3195 { },
3196};
3197
39a23e75
DL
3198static struct ctl_table ipv4_route_flush_table[] = {
3199 {
39a23e75
DL
3200 .procname = "flush",
3201 .maxlen = sizeof(int),
3202 .mode = 0200,
6d9f239a 3203 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3204 },
f8572d8f 3205 { },
39a23e75
DL
3206};
3207
2f4520d3 3208static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3209 { .procname = "net", },
3210 { .procname = "ipv4", },
3211 { .procname = "route", },
2f4520d3
AV
3212 { },
3213};
3214
39a23e75
DL
3215static __net_init int sysctl_route_net_init(struct net *net)
3216{
3217 struct ctl_table *tbl;
3218
3219 tbl = ipv4_route_flush_table;
09ad9bc7 3220 if (!net_eq(net, &init_net)) {
39a23e75
DL
3221 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3222 if (tbl == NULL)
3223 goto err_dup;
3224 }
3225 tbl[0].extra1 = net;
3226
3227 net->ipv4.route_hdr =
3228 register_net_sysctl_table(net, ipv4_route_path, tbl);
3229 if (net->ipv4.route_hdr == NULL)
3230 goto err_reg;
3231 return 0;
3232
3233err_reg:
3234 if (tbl != ipv4_route_flush_table)
3235 kfree(tbl);
3236err_dup:
3237 return -ENOMEM;
3238}
3239
3240static __net_exit void sysctl_route_net_exit(struct net *net)
3241{
3242 struct ctl_table *tbl;
3243
3244 tbl = net->ipv4.route_hdr->ctl_table_arg;
3245 unregister_net_sysctl_table(net->ipv4.route_hdr);
3246 BUG_ON(tbl == ipv4_route_flush_table);
3247 kfree(tbl);
3248}
3249
3250static __net_initdata struct pernet_operations sysctl_route_ops = {
3251 .init = sysctl_route_net_init,
3252 .exit = sysctl_route_net_exit,
3253};
1da177e4
LT
3254#endif
3255
3ee94372 3256static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3257{
3ee94372
NH
3258 get_random_bytes(&net->ipv4.rt_genid,
3259 sizeof(net->ipv4.rt_genid));
436c3b66
DM
3260 get_random_bytes(&net->ipv4.dev_addr_genid,
3261 sizeof(net->ipv4.dev_addr_genid));
9f5e97e5
DL
3262 return 0;
3263}
3264
3ee94372
NH
3265static __net_initdata struct pernet_operations rt_genid_ops = {
3266 .init = rt_genid_init,
9f5e97e5
DL
3267};
3268
3269
c7066f70 3270#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3271struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3272#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3273
3274static __initdata unsigned long rhash_entries;
3275static int __init set_rhash_entries(char *str)
3276{
3277 if (!str)
3278 return 0;
3279 rhash_entries = simple_strtoul(str, &str, 0);
3280 return 1;
3281}
3282__setup("rhash_entries=", set_rhash_entries);
3283
3284int __init ip_rt_init(void)
3285{
424c4b70 3286 int rc = 0;
1da177e4 3287
c7066f70 3288#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3289 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3290 if (!ip_rt_acct)
3291 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3292#endif
3293
e5d679f3
AD
3294 ipv4_dst_ops.kmem_cachep =
3295 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3296 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3297
14e50e57
DM
3298 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3299
fc66f95c
ED
3300 if (dst_entries_init(&ipv4_dst_ops) < 0)
3301 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3302
3303 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3304 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3305
424c4b70
ED
3306 rt_hash_table = (struct rt_hash_bucket *)
3307 alloc_large_system_hash("IP route cache",
3308 sizeof(struct rt_hash_bucket),
3309 rhash_entries,
4481374c 3310 (totalram_pages >= 128 * 1024) ?
18955cfc 3311 15 : 17,
8d1502de 3312 0,
424c4b70
ED
3313 &rt_hash_log,
3314 &rt_hash_mask,
c9503e0f 3315 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3316 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3317 rt_hash_lock_init();
1da177e4
LT
3318
3319 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3320 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3321
1da177e4
LT
3322 devinet_init();
3323 ip_fib_init();
3324
73b38711 3325 if (ip_rt_proc_init())
107f1634 3326 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3327#ifdef CONFIG_XFRM
3328 xfrm_init();
a33bc5c1 3329 xfrm4_init(ip_rt_max_size);
1da177e4 3330#endif
c7ac8679 3331 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
63f3444f 3332
39a23e75
DL
3333#ifdef CONFIG_SYSCTL
3334 register_pernet_subsys(&sysctl_route_ops);
3335#endif
3ee94372 3336 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3337 return rc;
3338}
3339
a1bc6eb4 3340#ifdef CONFIG_SYSCTL
eeb61f71
AV
3341/*
3342 * We really need to sanitize the damn ipv4 init order, then all
3343 * this nonsense will go away.
3344 */
3345void __init ip_static_sysctl_init(void)
3346{
2f4520d3 3347 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3348}
a1bc6eb4 3349#endif