xfrm: Return dst directly from xfrm_lookup()
[linux-2.6-block.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
112#define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
1da177e4
LT
134/*
135 * Interface to generic destination cache.
136 */
137
138static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 139static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 140static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 141static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 145static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 146
72cdd1d9
ED
147static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 int how)
149{
150}
1da177e4 151
62fa8a84
DM
152static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153{
06582540
DM
154 struct rtable *rt = (struct rtable *) dst;
155 struct inet_peer *peer;
156 u32 *p = NULL;
157
158 if (!rt->peer)
159 rt_bind_peer(rt, 1);
62fa8a84 160
06582540
DM
161 peer = rt->peer;
162 if (peer) {
62fa8a84
DM
163 u32 *old_p = __DST_METRICS_PTR(old);
164 unsigned long prev, new;
165
06582540
DM
166 p = peer->metrics;
167 if (inet_metrics_new(peer))
168 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
169
170 new = (unsigned long) p;
171 prev = cmpxchg(&dst->_metrics, old, new);
172
173 if (prev != old) {
62fa8a84
DM
174 p = __DST_METRICS_PTR(prev);
175 if (prev & DST_METRICS_READ_ONLY)
176 p = NULL;
177 } else {
62fa8a84
DM
178 if (rt->fi) {
179 fib_info_put(rt->fi);
180 rt->fi = NULL;
181 }
182 }
183 }
184 return p;
185}
186
1da177e4
LT
187static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET,
09640e63 189 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
190 .gc = rt_garbage_collect,
191 .check = ipv4_dst_check,
0dbaee3b 192 .default_advmss = ipv4_default_advmss,
d33e4553 193 .default_mtu = ipv4_default_mtu,
62fa8a84 194 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
195 .destroy = ipv4_dst_destroy,
196 .ifdown = ipv4_dst_ifdown,
197 .negative_advice = ipv4_negative_advice,
198 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 200 .local_out = __ip_local_out,
1da177e4
LT
201};
202
203#define ECN_OR_COST(class) TC_PRIO_##class
204
4839c52b 205const __u8 ip_tos2prio[16] = {
1da177e4
LT
206 TC_PRIO_BESTEFFORT,
207 ECN_OR_COST(FILLER),
208 TC_PRIO_BESTEFFORT,
209 ECN_OR_COST(BESTEFFORT),
210 TC_PRIO_BULK,
211 ECN_OR_COST(BULK),
212 TC_PRIO_BULK,
213 ECN_OR_COST(BULK),
214 TC_PRIO_INTERACTIVE,
215 ECN_OR_COST(INTERACTIVE),
216 TC_PRIO_INTERACTIVE,
217 ECN_OR_COST(INTERACTIVE),
218 TC_PRIO_INTERACTIVE_BULK,
219 ECN_OR_COST(INTERACTIVE_BULK),
220 TC_PRIO_INTERACTIVE_BULK,
221 ECN_OR_COST(INTERACTIVE_BULK)
222};
223
224
225/*
226 * Route cache.
227 */
228
229/* The locking scheme is rather straight forward:
230 *
231 * 1) Read-Copy Update protects the buckets of the central route hash.
232 * 2) Only writers remove entries, and they hold the lock
233 * as they look at rtable reference counts.
234 * 3) Only readers acquire references to rtable entries,
235 * they do so with atomic increments and with the
236 * lock held.
237 */
238
239struct rt_hash_bucket {
1c31720a 240 struct rtable __rcu *chain;
22c047cc 241};
1080d709 242
8a25d5de
IM
243#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
245/*
246 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247 * The size of this table is a power of two and depends on the number of CPUS.
62051200 248 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 249 */
62051200
IM
250#ifdef CONFIG_LOCKDEP
251# define RT_HASH_LOCK_SZ 256
22c047cc 252#else
62051200
IM
253# if NR_CPUS >= 32
254# define RT_HASH_LOCK_SZ 4096
255# elif NR_CPUS >= 16
256# define RT_HASH_LOCK_SZ 2048
257# elif NR_CPUS >= 8
258# define RT_HASH_LOCK_SZ 1024
259# elif NR_CPUS >= 4
260# define RT_HASH_LOCK_SZ 512
261# else
262# define RT_HASH_LOCK_SZ 256
263# endif
22c047cc
ED
264#endif
265
266static spinlock_t *rt_hash_locks;
267# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
268
269static __init void rt_hash_lock_init(void)
270{
271 int i;
272
273 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 GFP_KERNEL);
275 if (!rt_hash_locks)
276 panic("IP: failed to allocate rt_hash_locks\n");
277
278 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 spin_lock_init(&rt_hash_locks[i]);
280}
22c047cc
ED
281#else
282# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
283
284static inline void rt_hash_lock_init(void)
285{
286}
22c047cc 287#endif
1da177e4 288
817bc4db
SH
289static struct rt_hash_bucket *rt_hash_table __read_mostly;
290static unsigned rt_hash_mask __read_mostly;
291static unsigned int rt_hash_log __read_mostly;
1da177e4 292
2f970d83 293static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 294#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 295
b00180de 296static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 297 int genid)
1da177e4 298{
0eae88f3 299 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 300 idx, genid)
29e75252 301 & rt_hash_mask;
1da177e4
LT
302}
303
e84f84f2
DL
304static inline int rt_genid(struct net *net)
305{
306 return atomic_read(&net->ipv4.rt_genid);
307}
308
1da177e4
LT
309#ifdef CONFIG_PROC_FS
310struct rt_cache_iter_state {
a75e936f 311 struct seq_net_private p;
1da177e4 312 int bucket;
29e75252 313 int genid;
1da177e4
LT
314};
315
1218854a 316static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 317{
1218854a 318 struct rt_cache_iter_state *st = seq->private;
1da177e4 319 struct rtable *r = NULL;
1da177e4
LT
320
321 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 322 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 323 continue;
1da177e4 324 rcu_read_lock_bh();
a898def2 325 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 326 while (r) {
d8d1f30b 327 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 328 r->rt_genid == st->genid)
29e75252 329 return r;
d8d1f30b 330 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 331 }
1da177e4
LT
332 rcu_read_unlock_bh();
333 }
29e75252 334 return r;
1da177e4
LT
335}
336
1218854a 337static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 338 struct rtable *r)
1da177e4 339{
1218854a 340 struct rt_cache_iter_state *st = seq->private;
a6272665 341
1c31720a 342 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
343 while (!r) {
344 rcu_read_unlock_bh();
a6272665
ED
345 do {
346 if (--st->bucket < 0)
347 return NULL;
1c31720a 348 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 349 rcu_read_lock_bh();
1c31720a 350 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 351 }
1c31720a 352 return r;
1da177e4
LT
353}
354
1218854a 355static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
356 struct rtable *r)
357{
1218854a
YH
358 struct rt_cache_iter_state *st = seq->private;
359 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 360 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 361 continue;
642d6318
DL
362 if (r->rt_genid == st->genid)
363 break;
364 }
365 return r;
366}
367
1218854a 368static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 369{
1218854a 370 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
371
372 if (r)
1218854a 373 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
374 --pos;
375 return pos ? NULL : r;
376}
377
378static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379{
29e75252 380 struct rt_cache_iter_state *st = seq->private;
29e75252 381 if (*pos)
1218854a 382 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 383 st->genid = rt_genid(seq_file_net(seq));
29e75252 384 return SEQ_START_TOKEN;
1da177e4
LT
385}
386
387static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388{
29e75252 389 struct rtable *r;
1da177e4
LT
390
391 if (v == SEQ_START_TOKEN)
1218854a 392 r = rt_cache_get_first(seq);
1da177e4 393 else
1218854a 394 r = rt_cache_get_next(seq, v);
1da177e4
LT
395 ++*pos;
396 return r;
397}
398
399static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400{
401 if (v && v != SEQ_START_TOKEN)
402 rcu_read_unlock_bh();
403}
404
405static int rt_cache_seq_show(struct seq_file *seq, void *v)
406{
407 if (v == SEQ_START_TOKEN)
408 seq_printf(seq, "%-127s\n",
409 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 "HHUptod\tSpecDst");
412 else {
413 struct rtable *r = v;
5e659e4c 414 int len;
1da177e4 415
0eae88f3
ED
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 418 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
419 (__force u32)r->rt_dst,
420 (__force u32)r->rt_gateway,
d8d1f30b
CG
421 r->rt_flags, atomic_read(&r->dst.__refcnt),
422 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 423 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)),
1da177e4 427 r->fl.fl4_tos,
d8d1f30b
CG
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 430 dev_queue_xmit) : 0,
5e659e4c
PE
431 r->rt_spec_dst, &len);
432
433 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
434 }
435 return 0;
1da177e4
LT
436}
437
f690808e 438static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
439 .start = rt_cache_seq_start,
440 .next = rt_cache_seq_next,
441 .stop = rt_cache_seq_stop,
442 .show = rt_cache_seq_show,
443};
444
445static int rt_cache_seq_open(struct inode *inode, struct file *file)
446{
a75e936f 447 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 448 sizeof(struct rt_cache_iter_state));
1da177e4
LT
449}
450
9a32144e 451static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
452 .owner = THIS_MODULE,
453 .open = rt_cache_seq_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
a75e936f 456 .release = seq_release_net,
1da177e4
LT
457};
458
459
460static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461{
462 int cpu;
463
464 if (*pos == 0)
465 return SEQ_START_TOKEN;
466
0f23174a 467 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
468 if (!cpu_possible(cpu))
469 continue;
470 *pos = cpu+1;
2f970d83 471 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
472 }
473 return NULL;
474}
475
476static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 int cpu;
479
0f23174a 480 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
481 if (!cpu_possible(cpu))
482 continue;
483 *pos = cpu+1;
2f970d83 484 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
485 }
486 return NULL;
e905a9ed 487
1da177e4
LT
488}
489
490static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491{
492
493}
494
495static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496{
497 struct rt_cache_stat *st = v;
498
499 if (v == SEQ_START_TOKEN) {
5bec0039 500 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
501 return 0;
502 }
e905a9ed 503
1da177e4
LT
504 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
505 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 506 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
507 st->in_hit,
508 st->in_slow_tot,
509 st->in_slow_mc,
510 st->in_no_route,
511 st->in_brd,
512 st->in_martian_dst,
513 st->in_martian_src,
514
515 st->out_hit,
516 st->out_slow_tot,
e905a9ed 517 st->out_slow_mc,
1da177e4
LT
518
519 st->gc_total,
520 st->gc_ignored,
521 st->gc_goal_miss,
522 st->gc_dst_overflow,
523 st->in_hlist_search,
524 st->out_hlist_search
525 );
526 return 0;
527}
528
f690808e 529static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
530 .start = rt_cpu_seq_start,
531 .next = rt_cpu_seq_next,
532 .stop = rt_cpu_seq_stop,
533 .show = rt_cpu_seq_show,
534};
535
536
537static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538{
539 return seq_open(file, &rt_cpu_seq_ops);
540}
541
9a32144e 542static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
543 .owner = THIS_MODULE,
544 .open = rt_cpu_seq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
548};
549
c7066f70 550#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 551static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 552{
a661c419
AD
553 struct ip_rt_acct *dst, *src;
554 unsigned int i, j;
555
556 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 if (!dst)
558 return -ENOMEM;
559
560 for_each_possible_cpu(i) {
561 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 for (j = 0; j < 256; j++) {
563 dst[j].o_bytes += src[j].o_bytes;
564 dst[j].o_packets += src[j].o_packets;
565 dst[j].i_bytes += src[j].i_bytes;
566 dst[j].i_packets += src[j].i_packets;
567 }
78c686e9
PE
568 }
569
a661c419
AD
570 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 kfree(dst);
572 return 0;
573}
78c686e9 574
a661c419
AD
575static int rt_acct_proc_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 578}
a661c419
AD
579
580static const struct file_operations rt_acct_proc_fops = {
581 .owner = THIS_MODULE,
582 .open = rt_acct_proc_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = single_release,
586};
78c686e9 587#endif
107f1634 588
73b38711 589static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
590{
591 struct proc_dir_entry *pde;
592
593 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 &rt_cache_seq_fops);
595 if (!pde)
596 goto err1;
597
77020720
WC
598 pde = proc_create("rt_cache", S_IRUGO,
599 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
600 if (!pde)
601 goto err2;
602
c7066f70 603#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 604 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
605 if (!pde)
606 goto err3;
607#endif
608 return 0;
609
c7066f70 610#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
611err3:
612 remove_proc_entry("rt_cache", net->proc_net_stat);
613#endif
614err2:
615 remove_proc_entry("rt_cache", net->proc_net);
616err1:
617 return -ENOMEM;
618}
73b38711
DL
619
620static void __net_exit ip_rt_do_proc_exit(struct net *net)
621{
622 remove_proc_entry("rt_cache", net->proc_net_stat);
623 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 624#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 625 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 626#endif
73b38711
DL
627}
628
629static struct pernet_operations ip_rt_proc_ops __net_initdata = {
630 .init = ip_rt_do_proc_init,
631 .exit = ip_rt_do_proc_exit,
632};
633
634static int __init ip_rt_proc_init(void)
635{
636 return register_pernet_subsys(&ip_rt_proc_ops);
637}
638
107f1634 639#else
73b38711 640static inline int ip_rt_proc_init(void)
107f1634
PE
641{
642 return 0;
643}
1da177e4 644#endif /* CONFIG_PROC_FS */
e905a9ed 645
5969f71d 646static inline void rt_free(struct rtable *rt)
1da177e4 647{
d8d1f30b 648 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
649}
650
5969f71d 651static inline void rt_drop(struct rtable *rt)
1da177e4 652{
1da177e4 653 ip_rt_put(rt);
d8d1f30b 654 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
655}
656
5969f71d 657static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
658{
659 /* Kill broadcast/multicast entries very aggresively, if they
660 collide in hash table with more useful entries */
661 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 662 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
663}
664
5969f71d 665static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
666{
667 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 668 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
669}
670
671static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672{
673 unsigned long age;
674 int ret = 0;
675
d8d1f30b 676 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
677 goto out;
678
d8d1f30b 679 age = jiffies - rth->dst.lastuse;
1da177e4
LT
680 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 (age <= tmo2 && rt_valuable(rth)))
682 goto out;
683 ret = 1;
684out: return ret;
685}
686
687/* Bits of score are:
688 * 31: very valuable
689 * 30: not quite useless
690 * 29..0: usage counter
691 */
692static inline u32 rt_score(struct rtable *rt)
693{
d8d1f30b 694 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
695
696 score = ~score & ~(3<<30);
697
698 if (rt_valuable(rt))
699 score |= (1<<31);
700
c7537967 701 if (rt_is_output_route(rt) ||
1da177e4
LT
702 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 score |= (1<<30);
704
705 return score;
706}
707
1080d709
NH
708static inline bool rt_caching(const struct net *net)
709{
710 return net->ipv4.current_rt_cache_rebuild_count <=
711 net->ipv4.sysctl_rt_cache_rebuild_count;
712}
713
714static inline bool compare_hash_inputs(const struct flowi *fl1,
715 const struct flowi *fl2)
716{
5811662b
CG
717 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
718 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
1080d709
NH
719 (fl1->iif ^ fl2->iif)) == 0);
720}
721
1da177e4
LT
722static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
723{
5811662b
CG
724 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
725 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
47dcf0cb 726 (fl1->mark ^ fl2->mark) |
5811662b 727 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
8238b218
DM
728 (fl1->oif ^ fl2->oif) |
729 (fl1->iif ^ fl2->iif)) == 0;
1da177e4
LT
730}
731
b5921910
DL
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733{
d8d1f30b 734 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
735}
736
e84f84f2
DL
737static inline int rt_is_expired(struct rtable *rth)
738{
d8d1f30b 739 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
740}
741
beb659bd
ED
742/*
743 * Perform a full scan of hash table and free all entries.
744 * Can be called by a softirq or a process.
745 * In the later case, we want to be reschedule if necessary
746 */
6561a3b1 747static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
748{
749 unsigned int i;
750 struct rtable *rth, *next;
751
752 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
753 struct rtable __rcu **pprev;
754 struct rtable *list;
755
beb659bd
ED
756 if (process_context && need_resched())
757 cond_resched();
1c31720a 758 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
759 if (!rth)
760 continue;
761
762 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 763
6561a3b1
DM
764 list = NULL;
765 pprev = &rt_hash_table[i].chain;
766 rth = rcu_dereference_protected(*pprev,
1c31720a 767 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 768
6561a3b1
DM
769 while (rth) {
770 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
772
773 if (!net ||
774 net_eq(dev_net(rth->dst.dev), net)) {
775 rcu_assign_pointer(*pprev, next);
776 rcu_assign_pointer(rth->dst.rt_next, list);
777 list = rth;
32cb5b4e 778 } else {
6561a3b1 779 pprev = &rth->dst.rt_next;
32cb5b4e 780 }
6561a3b1 781 rth = next;
32cb5b4e 782 }
6561a3b1 783
beb659bd
ED
784 spin_unlock_bh(rt_hash_lock_addr(i));
785
6561a3b1
DM
786 for (; list; list = next) {
787 next = rcu_dereference_protected(list->dst.rt_next, 1);
788 rt_free(list);
beb659bd
ED
789 }
790 }
791}
792
1080d709
NH
793/*
794 * While freeing expired entries, we compute average chain length
795 * and standard deviation, using fixed-point arithmetic.
796 * This to have an estimation of rt_chain_length_max
797 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
798 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799 */
800
801#define FRACT_BITS 3
802#define ONE (1UL << FRACT_BITS)
803
98376387
ED
804/*
805 * Given a hash chain and an item in this hash chain,
806 * find if a previous entry has the same hash_inputs
807 * (but differs on tos, mark or oif)
808 * Returns 0 if an alias is found.
809 * Returns ONE if rth has no alias before itself.
810 */
811static int has_noalias(const struct rtable *head, const struct rtable *rth)
812{
813 const struct rtable *aux = head;
814
815 while (aux != rth) {
816 if (compare_hash_inputs(&aux->fl, &rth->fl))
817 return 0;
1c31720a 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
819 }
820 return ONE;
821}
822
29e75252
ED
823/*
824 * Pertubation of rt_genid by a small quantity [1..256]
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 828 */
86c657f6 829static void rt_cache_invalidate(struct net *net)
1da177e4 830{
29e75252 831 unsigned char shuffle;
1da177e4 832
29e75252 833 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 834 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
835}
836
29e75252
ED
837/*
838 * delay < 0 : invalidate cache (fast : entries will be deleted later)
839 * delay >= 0 : invalidate & flush cache (can be long)
840 */
76e6ebfb 841void rt_cache_flush(struct net *net, int delay)
1da177e4 842{
86c657f6 843 rt_cache_invalidate(net);
29e75252 844 if (delay >= 0)
6561a3b1 845 rt_do_flush(net, !in_softirq());
1da177e4
LT
846}
847
a5ee1551 848/* Flush previous cache invalidated entries from the cache */
6561a3b1 849void rt_cache_flush_batch(struct net *net)
a5ee1551 850{
6561a3b1 851 rt_do_flush(net, !in_softirq());
a5ee1551
EB
852}
853
1080d709
NH
854static void rt_emergency_hash_rebuild(struct net *net)
855{
3ee94372 856 if (net_ratelimit())
1080d709 857 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 858 rt_cache_invalidate(net);
1080d709
NH
859}
860
1da177e4
LT
861/*
862 Short description of GC goals.
863
864 We want to build algorithm, which will keep routing cache
865 at some equilibrium point, when number of aged off entries
866 is kept approximately equal to newly generated ones.
867
868 Current expiration strength is variable "expire".
869 We try to adjust it dynamically, so that if networking
870 is idle expires is large enough to keep enough of warm entries,
871 and when load increases it reduces to limit cache size.
872 */
873
569d3645 874static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
875{
876 static unsigned long expire = RT_GC_TIMEOUT;
877 static unsigned long last_gc;
878 static int rover;
879 static int equilibrium;
1c31720a
ED
880 struct rtable *rth;
881 struct rtable __rcu **rthp;
1da177e4
LT
882 unsigned long now = jiffies;
883 int goal;
fc66f95c 884 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
885
886 /*
887 * Garbage collection is pretty expensive,
888 * do not make it too frequently.
889 */
890
891 RT_CACHE_STAT_INC(gc_total);
892
893 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 894 entries < ip_rt_max_size) {
1da177e4
LT
895 RT_CACHE_STAT_INC(gc_ignored);
896 goto out;
897 }
898
fc66f95c 899 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 900 /* Calculate number of entries, which we want to expire now. */
fc66f95c 901 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
902 if (goal <= 0) {
903 if (equilibrium < ipv4_dst_ops.gc_thresh)
904 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 905 goal = entries - equilibrium;
1da177e4 906 if (goal > 0) {
b790cedd 907 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 908 goal = entries - equilibrium;
1da177e4
LT
909 }
910 } else {
911 /* We are in dangerous area. Try to reduce cache really
912 * aggressively.
913 */
b790cedd 914 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 915 equilibrium = entries - goal;
1da177e4
LT
916 }
917
918 if (now - last_gc >= ip_rt_gc_min_interval)
919 last_gc = now;
920
921 if (goal <= 0) {
922 equilibrium += goal;
923 goto work_done;
924 }
925
926 do {
927 int i, k;
928
929 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 unsigned long tmo = expire;
931
932 k = (k + 1) & rt_hash_mask;
933 rthp = &rt_hash_table[k].chain;
22c047cc 934 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
935 while ((rth = rcu_dereference_protected(*rthp,
936 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 937 if (!rt_is_expired(rth) &&
29e75252 938 !rt_may_expire(rth, tmo, expire)) {
1da177e4 939 tmo >>= 1;
d8d1f30b 940 rthp = &rth->dst.rt_next;
1da177e4
LT
941 continue;
942 }
d8d1f30b 943 *rthp = rth->dst.rt_next;
1da177e4
LT
944 rt_free(rth);
945 goal--;
1da177e4 946 }
22c047cc 947 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
948 if (goal <= 0)
949 break;
950 }
951 rover = k;
952
953 if (goal <= 0)
954 goto work_done;
955
956 /* Goal is not achieved. We stop process if:
957
958 - if expire reduced to zero. Otherwise, expire is halfed.
959 - if table is not full.
960 - if we are called from interrupt.
961 - jiffies check is just fallback/debug loop breaker.
962 We will not spin here for long time in any case.
963 */
964
965 RT_CACHE_STAT_INC(gc_goal_miss);
966
967 if (expire == 0)
968 break;
969
970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
974#endif
975
fc66f95c 976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
977 goto out;
978 } while (!in_softirq() && time_before_eq(jiffies, now));
979
fc66f95c
ED
980 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
981 goto out;
982 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
983 goto out;
984 if (net_ratelimit())
985 printk(KERN_WARNING "dst cache overflow\n");
986 RT_CACHE_STAT_INC(gc_dst_overflow);
987 return 1;
988
989work_done:
990 expire += ip_rt_gc_min_interval;
991 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
994 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
998#endif
999out: return 0;
1000}
1001
98376387
ED
1002/*
1003 * Returns number of entries in a hash chain that have different hash_inputs
1004 */
1005static int slow_chain_length(const struct rtable *head)
1006{
1007 int length = 0;
1008 const struct rtable *rth = head;
1009
1010 while (rth) {
1011 length += has_noalias(head, rth);
1c31720a 1012 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1013 }
1014 return length >> FRACT_BITS;
1015}
1016
511c3f92 1017static int rt_intern_hash(unsigned hash, struct rtable *rt,
6a2bad70 1018 struct rtable **rp, struct sk_buff *skb, int ifindex)
1da177e4 1019{
1c31720a
ED
1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp;
1da177e4 1022 unsigned long now;
1da177e4
LT
1023 u32 min_score;
1024 int chain_length;
1025 int attempts = !in_softirq();
1026
1027restart:
1028 chain_length = 0;
1029 min_score = ~(u32)0;
1030 cand = NULL;
1031 candp = NULL;
1032 now = jiffies;
1033
d8d1f30b 1034 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1035 /*
1036 * If we're not caching, just tell the caller we
1037 * were successful and don't touch the route. The
1038 * caller hold the sole reference to the cache entry, and
1039 * it will be released when the caller is done with it.
1040 * If we drop it here, the callers have no way to resolve routes
1041 * when we're not caching. Instead, just point *rp at rt, so
1042 * the caller gets a single use out of the route
b6280b47
NH
1043 * Note that we do rt_free on this new route entry, so that
1044 * once its refcount hits zero, we are still able to reap it
1045 * (Thanks Alexey)
27b75c95
ED
1046 * Note: To avoid expensive rcu stuff for this uncached dst,
1047 * we set DST_NOCACHE so that dst_release() can free dst without
1048 * waiting a grace period.
73e42897 1049 */
b6280b47 1050
c7d4426a 1051 rt->dst.flags |= DST_NOCACHE;
c7537967 1052 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1053 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1054 if (err) {
1055 if (net_ratelimit())
1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n");
27b75c95 1058 ip_rt_put(rt);
b6280b47
NH
1059 return err;
1060 }
1061 }
1062
b6280b47 1063 goto skip_hashing;
1080d709
NH
1064 }
1065
1da177e4
LT
1066 rthp = &rt_hash_table[hash].chain;
1067
22c047cc 1068 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1069 while ((rth = rcu_dereference_protected(*rthp,
1070 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1071 if (rt_is_expired(rth)) {
d8d1f30b 1072 *rthp = rth->dst.rt_next;
29e75252
ED
1073 rt_free(rth);
1074 continue;
1075 }
b5921910 1076 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1da177e4 1077 /* Put it first */
d8d1f30b 1078 *rthp = rth->dst.rt_next;
1da177e4
LT
1079 /*
1080 * Since lookup is lockfree, the deletion
1081 * must be visible to another weakly ordered CPU before
1082 * the insertion at the start of the hash chain.
1083 */
d8d1f30b 1084 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1085 rt_hash_table[hash].chain);
1086 /*
1087 * Since lookup is lockfree, the update writes
1088 * must be ordered for consistency on SMP.
1089 */
1090 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1091
d8d1f30b 1092 dst_use(&rth->dst, now);
22c047cc 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1094
1095 rt_drop(rt);
511c3f92
ED
1096 if (rp)
1097 *rp = rth;
1098 else
d8d1f30b 1099 skb_dst_set(skb, &rth->dst);
1da177e4
LT
1100 return 0;
1101 }
1102
d8d1f30b 1103 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1104 u32 score = rt_score(rth);
1105
1106 if (score <= min_score) {
1107 cand = rth;
1108 candp = rthp;
1109 min_score = score;
1110 }
1111 }
1112
1113 chain_length++;
1114
d8d1f30b 1115 rthp = &rth->dst.rt_next;
1da177e4
LT
1116 }
1117
1118 if (cand) {
1119 /* ip_rt_gc_elasticity used to be average length of chain
1120 * length, when exceeded gc becomes really aggressive.
1121 *
1122 * The second limit is less certain. At the moment it allows
1123 * only 2 entries per bucket. We will see.
1124 */
1125 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1126 *candp = cand->dst.rt_next;
1da177e4
LT
1127 rt_free(cand);
1128 }
1080d709 1129 } else {
98376387
ED
1130 if (chain_length > rt_chain_length_max &&
1131 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1132 struct net *net = dev_net(rt->dst.dev);
1080d709 1133 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1134 if (!rt_caching(net)) {
1080d709 1135 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1136 rt->dst.dev->name, num);
1080d709 1137 }
b35ecb5d 1138 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1139 spin_unlock_bh(rt_hash_lock_addr(hash));
1140
1141 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1142 ifindex, rt_genid(net));
1143 goto restart;
1080d709 1144 }
1da177e4
LT
1145 }
1146
1147 /* Try to bind route to arp only if it is output
1148 route or unicast forwarding path.
1149 */
c7537967 1150 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1151 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1152 if (err) {
22c047cc 1153 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1154
1155 if (err != -ENOBUFS) {
1156 rt_drop(rt);
1157 return err;
1158 }
1159
1160 /* Neighbour tables are full and nothing
1161 can be released. Try to shrink route cache,
1162 it is most likely it holds some neighbour records.
1163 */
1164 if (attempts-- > 0) {
1165 int saved_elasticity = ip_rt_gc_elasticity;
1166 int saved_int = ip_rt_gc_min_interval;
1167 ip_rt_gc_elasticity = 1;
1168 ip_rt_gc_min_interval = 0;
569d3645 1169 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1170 ip_rt_gc_min_interval = saved_int;
1171 ip_rt_gc_elasticity = saved_elasticity;
1172 goto restart;
1173 }
1174
1175 if (net_ratelimit())
7e1b33e5 1176 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4
LT
1177 rt_drop(rt);
1178 return -ENOBUFS;
1179 }
1180 }
1181
d8d1f30b 1182 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1183
1da177e4 1184#if RT_CACHE_DEBUG >= 2
d8d1f30b 1185 if (rt->dst.rt_next) {
1da177e4 1186 struct rtable *trt;
b6280b47
NH
1187 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1188 hash, &rt->rt_dst);
d8d1f30b 1189 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1190 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1191 printk("\n");
1192 }
1193#endif
00269b54
ED
1194 /*
1195 * Since lookup is lockfree, we must make sure
1196 * previous writes to rt are comitted to memory
1197 * before making rt visible to other CPUS.
1198 */
1ddbcb00 1199 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1200
22c047cc 1201 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1202
b6280b47 1203skip_hashing:
511c3f92
ED
1204 if (rp)
1205 *rp = rt;
1206 else
d8d1f30b 1207 skb_dst_set(skb, &rt->dst);
1da177e4
LT
1208 return 0;
1209}
1210
6431cbc2
DM
1211static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1212
1213static u32 rt_peer_genid(void)
1214{
1215 return atomic_read(&__rt_peer_genid);
1216}
1217
1da177e4
LT
1218void rt_bind_peer(struct rtable *rt, int create)
1219{
1da177e4
LT
1220 struct inet_peer *peer;
1221
b534ecf1 1222 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1223
49e8ab03 1224 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1225 inet_putpeer(peer);
6431cbc2
DM
1226 else
1227 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1228}
1229
1230/*
1231 * Peer allocation may fail only in serious out-of-memory conditions. However
1232 * we still can generate some output.
1233 * Random ID selection looks a bit dangerous because we have no chances to
1234 * select ID being unique in a reasonable period of time.
1235 * But broken packet identifier may be better than no packet at all.
1236 */
1237static void ip_select_fb_ident(struct iphdr *iph)
1238{
1239 static DEFINE_SPINLOCK(ip_fb_id_lock);
1240 static u32 ip_fallback_id;
1241 u32 salt;
1242
1243 spin_lock_bh(&ip_fb_id_lock);
e448515c 1244 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1245 iph->id = htons(salt & 0xFFFF);
1246 ip_fallback_id = salt;
1247 spin_unlock_bh(&ip_fb_id_lock);
1248}
1249
1250void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1251{
1252 struct rtable *rt = (struct rtable *) dst;
1253
1254 if (rt) {
1255 if (rt->peer == NULL)
1256 rt_bind_peer(rt, 1);
1257
1258 /* If peer is attached to destination, it is never detached,
1259 so that we need not to grab a lock to dereference it.
1260 */
1261 if (rt->peer) {
1262 iph->id = htons(inet_getid(rt->peer, more));
1263 return;
1264 }
1265 } else
e905a9ed 1266 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1267 __builtin_return_address(0));
1da177e4
LT
1268
1269 ip_select_fb_ident(iph);
1270}
4bc2f18b 1271EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1272
1273static void rt_del(unsigned hash, struct rtable *rt)
1274{
1c31720a
ED
1275 struct rtable __rcu **rthp;
1276 struct rtable *aux;
1da177e4 1277
29e75252 1278 rthp = &rt_hash_table[hash].chain;
22c047cc 1279 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1280 ip_rt_put(rt);
1c31720a
ED
1281 while ((aux = rcu_dereference_protected(*rthp,
1282 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1283 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1284 *rthp = aux->dst.rt_next;
29e75252
ED
1285 rt_free(aux);
1286 continue;
1da177e4 1287 }
d8d1f30b 1288 rthp = &aux->dst.rt_next;
29e75252 1289 }
22c047cc 1290 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1291}
1292
ed7865a4 1293/* called in rcu_read_lock() section */
f7655229
AV
1294void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1295 __be32 saddr, struct net_device *dev)
1da177e4 1296{
ed7865a4 1297 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1298 struct inet_peer *peer;
317805b8 1299 struct net *net;
1da177e4 1300
1da177e4
LT
1301 if (!in_dev)
1302 return;
1303
c346dca1 1304 net = dev_net(dev);
9d4fb27d
JP
1305 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1306 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1307 ipv4_is_zeronet(new_gw))
1da177e4
LT
1308 goto reject_redirect;
1309
1310 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1311 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1312 goto reject_redirect;
1313 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1314 goto reject_redirect;
1315 } else {
317805b8 1316 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1317 goto reject_redirect;
1318 }
1319
f39925db
DM
1320 peer = inet_getpeer_v4(daddr, 1);
1321 if (peer) {
1322 peer->redirect_learned.a4 = new_gw;
e905a9ed 1323
f39925db 1324 inet_putpeer(peer);
1da177e4 1325
f39925db 1326 atomic_inc(&__rt_peer_genid);
1da177e4 1327 }
1da177e4
LT
1328 return;
1329
1330reject_redirect:
1331#ifdef CONFIG_IP_ROUTE_VERBOSE
1332 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1333 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1334 " Advised path = %pI4 -> %pI4\n",
1335 &old_gw, dev->name, &new_gw,
1336 &saddr, &daddr);
1da177e4 1337#endif
ed7865a4 1338 ;
1da177e4
LT
1339}
1340
1341static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1342{
ee6b9673 1343 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1344 struct dst_entry *ret = dst;
1345
1346 if (rt) {
d11a4dc1 1347 if (dst->obsolete > 0) {
1da177e4
LT
1348 ip_rt_put(rt);
1349 ret = NULL;
2c8cec5c 1350 } else if (rt->rt_flags & RTCF_REDIRECTED) {
8c7bc840 1351 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
b00180de 1352 rt->fl.oif,
e84f84f2 1353 rt_genid(dev_net(dst->dev)));
1da177e4 1354#if RT_CACHE_DEBUG >= 1
673d57e7
HH
1355 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1356 &rt->rt_dst, rt->fl.fl4_tos);
1da177e4
LT
1357#endif
1358 rt_del(hash, rt);
1359 ret = NULL;
2c8cec5c
DM
1360 } else if (rt->peer &&
1361 rt->peer->pmtu_expires &&
1362 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1363 unsigned long orig = rt->peer->pmtu_expires;
1364
1365 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1366 dst_metric_set(dst, RTAX_MTU,
1367 rt->peer->pmtu_orig);
1da177e4
LT
1368 }
1369 }
1370 return ret;
1371}
1372
1373/*
1374 * Algorithm:
1375 * 1. The first ip_rt_redirect_number redirects are sent
1376 * with exponential backoff, then we stop sending them at all,
1377 * assuming that the host ignores our redirects.
1378 * 2. If we did not see packets requiring redirects
1379 * during ip_rt_redirect_silence, we assume that the host
1380 * forgot redirected route and start to send redirects again.
1381 *
1382 * This algorithm is much cheaper and more intelligent than dumb load limiting
1383 * in icmp.c.
1384 *
1385 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1386 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1387 */
1388
1389void ip_rt_send_redirect(struct sk_buff *skb)
1390{
511c3f92 1391 struct rtable *rt = skb_rtable(skb);
30038fc6 1392 struct in_device *in_dev;
92d86829 1393 struct inet_peer *peer;
30038fc6 1394 int log_martians;
1da177e4 1395
30038fc6 1396 rcu_read_lock();
d8d1f30b 1397 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1398 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1399 rcu_read_unlock();
1da177e4 1400 return;
30038fc6
ED
1401 }
1402 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1403 rcu_read_unlock();
1da177e4 1404
92d86829
DM
1405 if (!rt->peer)
1406 rt_bind_peer(rt, 1);
1407 peer = rt->peer;
1408 if (!peer) {
1409 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1410 return;
1411 }
1412
1da177e4
LT
1413 /* No redirected packets during ip_rt_redirect_silence;
1414 * reset the algorithm.
1415 */
92d86829
DM
1416 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1417 peer->rate_tokens = 0;
1da177e4
LT
1418
1419 /* Too many ignored redirects; do not send anything
d8d1f30b 1420 * set dst.rate_last to the last seen redirected packet.
1da177e4 1421 */
92d86829
DM
1422 if (peer->rate_tokens >= ip_rt_redirect_number) {
1423 peer->rate_last = jiffies;
30038fc6 1424 return;
1da177e4
LT
1425 }
1426
1427 /* Check for load limit; set rate_last to the latest sent
1428 * redirect.
1429 */
92d86829 1430 if (peer->rate_tokens == 0 ||
14fb8a76 1431 time_after(jiffies,
92d86829
DM
1432 (peer->rate_last +
1433 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1434 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1435 peer->rate_last = jiffies;
1436 ++peer->rate_tokens;
1da177e4 1437#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1438 if (log_martians &&
92d86829 1439 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1440 net_ratelimit())
673d57e7
HH
1441 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1442 &rt->rt_src, rt->rt_iif,
1443 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1444#endif
1445 }
1da177e4
LT
1446}
1447
1448static int ip_error(struct sk_buff *skb)
1449{
511c3f92 1450 struct rtable *rt = skb_rtable(skb);
92d86829 1451 struct inet_peer *peer;
1da177e4 1452 unsigned long now;
92d86829 1453 bool send;
1da177e4
LT
1454 int code;
1455
d8d1f30b 1456 switch (rt->dst.error) {
1da177e4
LT
1457 case EINVAL:
1458 default:
1459 goto out;
1460 case EHOSTUNREACH:
1461 code = ICMP_HOST_UNREACH;
1462 break;
1463 case ENETUNREACH:
1464 code = ICMP_NET_UNREACH;
d8d1f30b 1465 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1466 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1467 break;
1468 case EACCES:
1469 code = ICMP_PKT_FILTERED;
1470 break;
1471 }
1472
92d86829
DM
1473 if (!rt->peer)
1474 rt_bind_peer(rt, 1);
1475 peer = rt->peer;
1476
1477 send = true;
1478 if (peer) {
1479 now = jiffies;
1480 peer->rate_tokens += now - peer->rate_last;
1481 if (peer->rate_tokens > ip_rt_error_burst)
1482 peer->rate_tokens = ip_rt_error_burst;
1483 peer->rate_last = now;
1484 if (peer->rate_tokens >= ip_rt_error_cost)
1485 peer->rate_tokens -= ip_rt_error_cost;
1486 else
1487 send = false;
1da177e4 1488 }
92d86829
DM
1489 if (send)
1490 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1491
1492out: kfree_skb(skb);
1493 return 0;
e905a9ed 1494}
1da177e4
LT
1495
1496/*
1497 * The last two values are not from the RFC but
1498 * are needed for AMPRnet AX.25 paths.
1499 */
1500
9b5b5cff 1501static const unsigned short mtu_plateau[] =
1da177e4
LT
1502{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1503
5969f71d 1504static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1505{
1506 int i;
e905a9ed 1507
1da177e4
LT
1508 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1509 if (old_mtu > mtu_plateau[i])
1510 return mtu_plateau[i];
1511 return 68;
1512}
1513
b5921910 1514unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
0010e465
TT
1515 unsigned short new_mtu,
1516 struct net_device *dev)
1da177e4 1517{
1da177e4 1518 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1519 unsigned short est_mtu = 0;
2c8cec5c 1520 struct inet_peer *peer;
1da177e4 1521
2c8cec5c
DM
1522 peer = inet_getpeer_v4(iph->daddr, 1);
1523 if (peer) {
1524 unsigned short mtu = new_mtu;
1da177e4 1525
2c8cec5c
DM
1526 if (new_mtu < 68 || new_mtu >= old_mtu) {
1527 /* BSD 4.2 derived systems incorrectly adjust
1528 * tot_len by the IP header length, and report
1529 * a zero MTU in the ICMP message.
1530 */
1531 if (mtu == 0 &&
1532 old_mtu >= 68 + (iph->ihl << 2))
1533 old_mtu -= iph->ihl << 2;
1534 mtu = guess_mtu(old_mtu);
1535 }
0010e465 1536
2c8cec5c
DM
1537 if (mtu < ip_rt_min_pmtu)
1538 mtu = ip_rt_min_pmtu;
1539 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1540 est_mtu = mtu;
1541 peer->pmtu_learned = mtu;
1542 peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
1543 }
1da177e4 1544
2c8cec5c 1545 inet_putpeer(peer);
1da177e4 1546
2c8cec5c 1547 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1548 }
1549 return est_mtu ? : new_mtu;
1550}
1551
2c8cec5c
DM
1552static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1553{
1554 unsigned long expires = peer->pmtu_expires;
1555
1556 if (time_before(expires, jiffies)) {
1557 u32 orig_dst_mtu = dst_mtu(dst);
1558 if (peer->pmtu_learned < orig_dst_mtu) {
1559 if (!peer->pmtu_orig)
1560 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1561 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1562 }
1563 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1564 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1565}
1566
1da177e4
LT
1567static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1568{
2c8cec5c
DM
1569 struct rtable *rt = (struct rtable *) dst;
1570 struct inet_peer *peer;
1571
1572 dst_confirm(dst);
1573
1574 if (!rt->peer)
1575 rt_bind_peer(rt, 1);
1576 peer = rt->peer;
1577 if (peer) {
1578 if (mtu < ip_rt_min_pmtu)
1da177e4 1579 mtu = ip_rt_min_pmtu;
2c8cec5c
DM
1580 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1581 peer->pmtu_learned = mtu;
1582 peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
1583
1584 atomic_inc(&__rt_peer_genid);
1585 rt->rt_peer_genid = rt_peer_genid();
1586
1587 check_peer_pmtu(dst, peer);
1da177e4 1588 }
2c8cec5c 1589 inet_putpeer(peer);
1da177e4
LT
1590 }
1591}
1592
f39925db
DM
1593static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1594{
1595 struct rtable *rt = (struct rtable *) dst;
1596 __be32 orig_gw = rt->rt_gateway;
1597
1598 dst_confirm(&rt->dst);
1599
1600 neigh_release(rt->dst.neighbour);
1601 rt->dst.neighbour = NULL;
1602
1603 rt->rt_gateway = peer->redirect_learned.a4;
1604 if (arp_bind_neighbour(&rt->dst) ||
1605 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1606 if (rt->dst.neighbour)
1607 neigh_event_send(rt->dst.neighbour, NULL);
1608 rt->rt_gateway = orig_gw;
1609 return -EAGAIN;
1610 } else {
1611 rt->rt_flags |= RTCF_REDIRECTED;
1612 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1613 rt->dst.neighbour);
1614 }
1615 return 0;
1616}
1617
1da177e4
LT
1618static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1619{
6431cbc2
DM
1620 struct rtable *rt = (struct rtable *) dst;
1621
1622 if (rt_is_expired(rt))
d11a4dc1 1623 return NULL;
6431cbc2 1624 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1625 struct inet_peer *peer;
1626
6431cbc2
DM
1627 if (!rt->peer)
1628 rt_bind_peer(rt, 0);
1629
2c8cec5c
DM
1630 peer = rt->peer;
1631 if (peer && peer->pmtu_expires)
1632 check_peer_pmtu(dst, peer);
1633
f39925db
DM
1634 if (peer && peer->redirect_learned.a4 &&
1635 peer->redirect_learned.a4 != rt->rt_gateway) {
1636 if (check_peer_redir(dst, peer))
1637 return NULL;
1638 }
1639
6431cbc2
DM
1640 rt->rt_peer_genid = rt_peer_genid();
1641 }
d11a4dc1 1642 return dst;
1da177e4
LT
1643}
1644
1645static void ipv4_dst_destroy(struct dst_entry *dst)
1646{
1647 struct rtable *rt = (struct rtable *) dst;
1648 struct inet_peer *peer = rt->peer;
1da177e4 1649
62fa8a84
DM
1650 if (rt->fi) {
1651 fib_info_put(rt->fi);
1652 rt->fi = NULL;
1653 }
1da177e4
LT
1654 if (peer) {
1655 rt->peer = NULL;
1656 inet_putpeer(peer);
1657 }
1da177e4
LT
1658}
1659
1da177e4
LT
1660
1661static void ipv4_link_failure(struct sk_buff *skb)
1662{
1663 struct rtable *rt;
1664
1665 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1666
511c3f92 1667 rt = skb_rtable(skb);
2c8cec5c
DM
1668 if (rt &&
1669 rt->peer &&
1670 rt->peer->pmtu_expires) {
1671 unsigned long orig = rt->peer->pmtu_expires;
1672
1673 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1674 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1675 }
1da177e4
LT
1676}
1677
1678static int ip_rt_bug(struct sk_buff *skb)
1679{
673d57e7
HH
1680 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1681 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1682 skb->dev ? skb->dev->name : "?");
1683 kfree_skb(skb);
1684 return 0;
1685}
1686
1687/*
1688 We do not cache source address of outgoing interface,
1689 because it is used only by IP RR, TS and SRR options,
1690 so that it out of fast path.
1691
1692 BTW remember: "addr" is allowed to be not aligned
1693 in IP options!
1694 */
1695
1696void ip_rt_get_source(u8 *addr, struct rtable *rt)
1697{
a61ced5d 1698 __be32 src;
1da177e4
LT
1699 struct fib_result res;
1700
c7537967 1701 if (rt_is_output_route(rt))
1da177e4 1702 src = rt->rt_src;
ebc0ffae
ED
1703 else {
1704 rcu_read_lock();
1705 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
1706 src = FIB_RES_PREFSRC(res);
1707 else
1708 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1709 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1710 rcu_read_unlock();
1711 }
1da177e4
LT
1712 memcpy(addr, &src, 4);
1713}
1714
c7066f70 1715#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1716static void set_class_tag(struct rtable *rt, u32 tag)
1717{
d8d1f30b
CG
1718 if (!(rt->dst.tclassid & 0xFFFF))
1719 rt->dst.tclassid |= tag & 0xFFFF;
1720 if (!(rt->dst.tclassid & 0xFFFF0000))
1721 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1722}
1723#endif
1724
0dbaee3b
DM
1725static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1726{
1727 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1728
1729 if (advmss == 0) {
1730 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1731 ip_rt_min_advmss);
1732 if (advmss > 65535 - 40)
1733 advmss = 65535 - 40;
1734 }
1735 return advmss;
1736}
1737
d33e4553
DM
1738static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1739{
1740 unsigned int mtu = dst->dev->mtu;
1741
1742 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1743 const struct rtable *rt = (const struct rtable *) dst;
1744
1745 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1746 mtu = 576;
1747 }
1748
1749 if (mtu > IP_MAX_MTU)
1750 mtu = IP_MAX_MTU;
1751
1752 return mtu;
1753}
1754
a4daad6b
DM
1755static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
1756{
0131ba45
DM
1757 struct inet_peer *peer;
1758 int create = 0;
a4daad6b 1759
0131ba45
DM
1760 /* If a peer entry exists for this destination, we must hook
1761 * it up in order to get at cached metrics.
1762 */
1763 if (rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)
1764 create = 1;
1765
1766 rt_bind_peer(rt, create);
1767 peer = rt->peer;
1768 if (peer) {
a4daad6b
DM
1769 if (inet_metrics_new(peer))
1770 memcpy(peer->metrics, fi->fib_metrics,
1771 sizeof(u32) * RTAX_MAX);
1772 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c
DM
1773
1774 if (peer->pmtu_expires)
1775 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1776 if (peer->redirect_learned.a4 &&
1777 peer->redirect_learned.a4 != rt->rt_gateway) {
1778 rt->rt_gateway = peer->redirect_learned.a4;
1779 rt->rt_flags |= RTCF_REDIRECTED;
1780 }
0131ba45
DM
1781 } else {
1782 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1783 rt->fi = fi;
1784 atomic_inc(&fi->fib_clntref);
1785 }
1786 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1787 }
1788}
1789
982721f3
DM
1790static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res,
1791 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1792{
defb3519 1793 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1794
1795 if (fi) {
1796 if (FIB_RES_GW(*res) &&
1797 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1798 rt->rt_gateway = FIB_RES_GW(*res);
a4daad6b 1799 rt_init_metrics(rt, fi);
c7066f70 1800#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1801 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1802#endif
d33e4553 1803 }
defb3519 1804
defb3519
DM
1805 if (dst_mtu(dst) > IP_MAX_MTU)
1806 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1807 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1808 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1809
c7066f70 1810#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1811#ifdef CONFIG_IP_MULTIPLE_TABLES
1812 set_class_tag(rt, fib_rules_tclass(res));
1813#endif
1814 set_class_tag(rt, itag);
1815#endif
982721f3 1816 rt->rt_type = type;
1da177e4
LT
1817}
1818
0c4dcd58
DM
1819static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
1820{
3c7bd1a1 1821 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
0c4dcd58
DM
1822 if (rt) {
1823 rt->dst.obsolete = -1;
1824
0c4dcd58
DM
1825 rt->dst.flags = DST_HOST |
1826 (nopolicy ? DST_NOPOLICY : 0) |
1827 (noxfrm ? DST_NOXFRM : 0);
1828 }
1829 return rt;
1830}
1831
96d36220 1832/* called in rcu_read_lock() section */
9e12bb22 1833static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1834 u8 tos, struct net_device *dev, int our)
1835{
96d36220 1836 unsigned int hash;
1da177e4 1837 struct rtable *rth;
a61ced5d 1838 __be32 spec_dst;
96d36220 1839 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1840 u32 itag = 0;
b5f7e755 1841 int err;
1da177e4
LT
1842
1843 /* Primary sanity checks. */
1844
1845 if (in_dev == NULL)
1846 return -EINVAL;
1847
1e637c74 1848 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1849 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1850 goto e_inval;
1851
f97c1e0c
JP
1852 if (ipv4_is_zeronet(saddr)) {
1853 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1854 goto e_inval;
1855 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755
ED
1856 } else {
1857 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1858 &itag, 0);
1859 if (err < 0)
1860 goto e_err;
1861 }
0c4dcd58 1862 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1863 if (!rth)
1864 goto e_nobufs;
1865
d8d1f30b 1866 rth->dst.output = ip_rt_bug;
1da177e4 1867
1da177e4
LT
1868 rth->fl.fl4_dst = daddr;
1869 rth->rt_dst = daddr;
1870 rth->fl.fl4_tos = tos;
47dcf0cb 1871 rth->fl.mark = skb->mark;
1da177e4
LT
1872 rth->fl.fl4_src = saddr;
1873 rth->rt_src = saddr;
c7066f70 1874#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 1875 rth->dst.tclassid = itag;
1da177e4
LT
1876#endif
1877 rth->rt_iif =
1878 rth->fl.iif = dev->ifindex;
d8d1f30b
CG
1879 rth->dst.dev = init_net.loopback_dev;
1880 dev_hold(rth->dst.dev);
1da177e4
LT
1881 rth->fl.oif = 0;
1882 rth->rt_gateway = daddr;
1883 rth->rt_spec_dst= spec_dst;
e84f84f2 1884 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1885 rth->rt_flags = RTCF_MULTICAST;
29e75252 1886 rth->rt_type = RTN_MULTICAST;
1da177e4 1887 if (our) {
d8d1f30b 1888 rth->dst.input= ip_local_deliver;
1da177e4
LT
1889 rth->rt_flags |= RTCF_LOCAL;
1890 }
1891
1892#ifdef CONFIG_IP_MROUTE
f97c1e0c 1893 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1894 rth->dst.input = ip_mr_input;
1da177e4
LT
1895#endif
1896 RT_CACHE_STAT_INC(in_slow_mc);
1897
e84f84f2 1898 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
6a2bad70 1899 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1da177e4
LT
1900
1901e_nobufs:
1da177e4 1902 return -ENOBUFS;
1da177e4 1903e_inval:
96d36220 1904 return -EINVAL;
b5f7e755 1905e_err:
b5f7e755 1906 return err;
1da177e4
LT
1907}
1908
1909
1910static void ip_handle_martian_source(struct net_device *dev,
1911 struct in_device *in_dev,
1912 struct sk_buff *skb,
9e12bb22
AV
1913 __be32 daddr,
1914 __be32 saddr)
1da177e4
LT
1915{
1916 RT_CACHE_STAT_INC(in_martian_src);
1917#ifdef CONFIG_IP_ROUTE_VERBOSE
1918 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1919 /*
1920 * RFC1812 recommendation, if source is martian,
1921 * the only hint is MAC header.
1922 */
673d57e7
HH
1923 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1924 &daddr, &saddr, dev->name);
98e399f8 1925 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1926 int i;
98e399f8 1927 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1928 printk(KERN_WARNING "ll header: ");
1929 for (i = 0; i < dev->hard_header_len; i++, p++) {
1930 printk("%02x", *p);
1931 if (i < (dev->hard_header_len - 1))
1932 printk(":");
1933 }
1934 printk("\n");
1935 }
1936 }
1937#endif
1938}
1939
47360228 1940/* called in rcu_read_lock() section */
5969f71d 1941static int __mkroute_input(struct sk_buff *skb,
982721f3 1942 const struct fib_result *res,
5969f71d
SH
1943 struct in_device *in_dev,
1944 __be32 daddr, __be32 saddr, u32 tos,
1945 struct rtable **result)
1da177e4 1946{
1da177e4
LT
1947 struct rtable *rth;
1948 int err;
1949 struct in_device *out_dev;
47360228 1950 unsigned int flags = 0;
d9c9df8c
AV
1951 __be32 spec_dst;
1952 u32 itag;
1da177e4
LT
1953
1954 /* get a working reference to the output device */
47360228 1955 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1956 if (out_dev == NULL) {
1957 if (net_ratelimit())
1958 printk(KERN_CRIT "Bug in ip_route_input" \
1959 "_slow(). Please, report\n");
1960 return -EINVAL;
1961 }
1962
1963
e905a9ed 1964 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
b0c110ca 1965 in_dev->dev, &spec_dst, &itag, skb->mark);
1da177e4 1966 if (err < 0) {
e905a9ed 1967 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1968 saddr);
e905a9ed 1969
1da177e4
LT
1970 goto cleanup;
1971 }
1972
1973 if (err)
1974 flags |= RTCF_DIRECTSRC;
1975
51b77cae 1976 if (out_dev == in_dev && err &&
1da177e4
LT
1977 (IN_DEV_SHARED_MEDIA(out_dev) ||
1978 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1979 flags |= RTCF_DOREDIRECT;
1980
1981 if (skb->protocol != htons(ETH_P_IP)) {
1982 /* Not IP (i.e. ARP). Do not create route, if it is
1983 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
1984 *
1985 * Proxy arp feature have been extended to allow, ARP
1986 * replies back to the same interface, to support
1987 * Private VLAN switch technologies. See arp.c.
1da177e4 1988 */
65324144
JDB
1989 if (out_dev == in_dev &&
1990 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
1991 err = -EINVAL;
1992 goto cleanup;
1993 }
1994 }
1995
0c4dcd58
DM
1996 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
1997 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
1998 if (!rth) {
1999 err = -ENOBUFS;
2000 goto cleanup;
2001 }
2002
1da177e4
LT
2003 rth->fl.fl4_dst = daddr;
2004 rth->rt_dst = daddr;
2005 rth->fl.fl4_tos = tos;
47dcf0cb 2006 rth->fl.mark = skb->mark;
1da177e4
LT
2007 rth->fl.fl4_src = saddr;
2008 rth->rt_src = saddr;
2009 rth->rt_gateway = daddr;
2010 rth->rt_iif =
2011 rth->fl.iif = in_dev->dev->ifindex;
d8d1f30b
CG
2012 rth->dst.dev = (out_dev)->dev;
2013 dev_hold(rth->dst.dev);
1da177e4
LT
2014 rth->fl.oif = 0;
2015 rth->rt_spec_dst= spec_dst;
2016
d8d1f30b
CG
2017 rth->dst.input = ip_forward;
2018 rth->dst.output = ip_output;
2019 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4 2020
982721f3 2021 rt_set_nexthop(rth, res, res->fi, res->type, itag);
1da177e4
LT
2022
2023 rth->rt_flags = flags;
2024
2025 *result = rth;
2026 err = 0;
2027 cleanup:
1da177e4 2028 return err;
e905a9ed 2029}
1da177e4 2030
5969f71d
SH
2031static int ip_mkroute_input(struct sk_buff *skb,
2032 struct fib_result *res,
2033 const struct flowi *fl,
2034 struct in_device *in_dev,
2035 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2036{
7abaa27c 2037 struct rtable* rth = NULL;
1da177e4
LT
2038 int err;
2039 unsigned hash;
2040
2041#ifdef CONFIG_IP_ROUTE_MULTIPATH
2042 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2043 fib_select_multipath(fl, res);
2044#endif
2045
2046 /* create a routing cache entry */
2047 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2048 if (err)
2049 return err;
1da177e4
LT
2050
2051 /* put it into the cache */
e84f84f2 2052 hash = rt_hash(daddr, saddr, fl->iif,
d8d1f30b 2053 rt_genid(dev_net(rth->dst.dev)));
6a2bad70 2054 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
1da177e4
LT
2055}
2056
1da177e4
LT
2057/*
2058 * NOTE. We drop all the packets that has local source
2059 * addresses, because every properly looped back packet
2060 * must have correct destination already attached by output routine.
2061 *
2062 * Such approach solves two big problems:
2063 * 1. Not simplex devices are handled properly.
2064 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2065 * called with rcu_read_lock()
1da177e4
LT
2066 */
2067
9e12bb22 2068static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2069 u8 tos, struct net_device *dev)
2070{
2071 struct fib_result res;
96d36220 2072 struct in_device *in_dev = __in_dev_get_rcu(dev);
5811662b
CG
2073 struct flowi fl = { .fl4_dst = daddr,
2074 .fl4_src = saddr,
2075 .fl4_tos = tos,
2076 .fl4_scope = RT_SCOPE_UNIVERSE,
47dcf0cb 2077 .mark = skb->mark,
1da177e4
LT
2078 .iif = dev->ifindex };
2079 unsigned flags = 0;
2080 u32 itag = 0;
2081 struct rtable * rth;
2082 unsigned hash;
9e12bb22 2083 __be32 spec_dst;
1da177e4 2084 int err = -EINVAL;
c346dca1 2085 struct net * net = dev_net(dev);
1da177e4
LT
2086
2087 /* IP on this device is disabled. */
2088
2089 if (!in_dev)
2090 goto out;
2091
2092 /* Check for the most weird martians, which can be not detected
2093 by fib_lookup.
2094 */
2095
1e637c74 2096 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2097 ipv4_is_loopback(saddr))
1da177e4
LT
2098 goto martian_source;
2099
27a954bd 2100 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2101 goto brd_input;
2102
2103 /* Accept zero addresses only to limited broadcast;
2104 * I even do not know to fix it or not. Waiting for complains :-)
2105 */
f97c1e0c 2106 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2107 goto martian_source;
2108
27a954bd 2109 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2110 goto martian_destination;
2111
2112 /*
2113 * Now we are ready to route packet.
2114 */
ebc0ffae
ED
2115 err = fib_lookup(net, &fl, &res);
2116 if (err != 0) {
1da177e4 2117 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2118 goto e_hostunreach;
1da177e4
LT
2119 goto no_route;
2120 }
1da177e4
LT
2121
2122 RT_CACHE_STAT_INC(in_slow_tot);
2123
2124 if (res.type == RTN_BROADCAST)
2125 goto brd_input;
2126
2127 if (res.type == RTN_LOCAL) {
b5f7e755 2128 err = fib_validate_source(saddr, daddr, tos,
ebc0ffae
ED
2129 net->loopback_dev->ifindex,
2130 dev, &spec_dst, &itag, skb->mark);
b5f7e755
ED
2131 if (err < 0)
2132 goto martian_source_keep_err;
2133 if (err)
1da177e4
LT
2134 flags |= RTCF_DIRECTSRC;
2135 spec_dst = daddr;
2136 goto local_input;
2137 }
2138
2139 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2140 goto e_hostunreach;
1da177e4
LT
2141 if (res.type != RTN_UNICAST)
2142 goto martian_destination;
2143
2144 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1da177e4
LT
2145out: return err;
2146
2147brd_input:
2148 if (skb->protocol != htons(ETH_P_IP))
2149 goto e_inval;
2150
f97c1e0c 2151 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2152 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2153 else {
2154 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
b0c110ca 2155 &itag, skb->mark);
1da177e4 2156 if (err < 0)
b5f7e755 2157 goto martian_source_keep_err;
1da177e4
LT
2158 if (err)
2159 flags |= RTCF_DIRECTSRC;
2160 }
2161 flags |= RTCF_BROADCAST;
2162 res.type = RTN_BROADCAST;
2163 RT_CACHE_STAT_INC(in_brd);
2164
2165local_input:
0c4dcd58 2166 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2167 if (!rth)
2168 goto e_nobufs;
2169
d8d1f30b 2170 rth->dst.output= ip_rt_bug;
e84f84f2 2171 rth->rt_genid = rt_genid(net);
1da177e4 2172
1da177e4
LT
2173 rth->fl.fl4_dst = daddr;
2174 rth->rt_dst = daddr;
2175 rth->fl.fl4_tos = tos;
47dcf0cb 2176 rth->fl.mark = skb->mark;
1da177e4
LT
2177 rth->fl.fl4_src = saddr;
2178 rth->rt_src = saddr;
c7066f70 2179#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2180 rth->dst.tclassid = itag;
1da177e4
LT
2181#endif
2182 rth->rt_iif =
2183 rth->fl.iif = dev->ifindex;
d8d1f30b
CG
2184 rth->dst.dev = net->loopback_dev;
2185 dev_hold(rth->dst.dev);
1da177e4
LT
2186 rth->rt_gateway = daddr;
2187 rth->rt_spec_dst= spec_dst;
d8d1f30b 2188 rth->dst.input= ip_local_deliver;
1da177e4
LT
2189 rth->rt_flags = flags|RTCF_LOCAL;
2190 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2191 rth->dst.input= ip_error;
2192 rth->dst.error= -err;
1da177e4
LT
2193 rth->rt_flags &= ~RTCF_LOCAL;
2194 }
2195 rth->rt_type = res.type;
e84f84f2 2196 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
6a2bad70 2197 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
ebc0ffae 2198 goto out;
1da177e4
LT
2199
2200no_route:
2201 RT_CACHE_STAT_INC(in_no_route);
2202 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2203 res.type = RTN_UNREACHABLE;
7f53878d
MC
2204 if (err == -ESRCH)
2205 err = -ENETUNREACH;
1da177e4
LT
2206 goto local_input;
2207
2208 /*
2209 * Do not cache martian addresses: they should be logged (RFC1812)
2210 */
2211martian_destination:
2212 RT_CACHE_STAT_INC(in_martian_dst);
2213#ifdef CONFIG_IP_ROUTE_VERBOSE
2214 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2215 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2216 &daddr, &saddr, dev->name);
1da177e4 2217#endif
2c2910a4
DE
2218
2219e_hostunreach:
e905a9ed 2220 err = -EHOSTUNREACH;
ebc0ffae 2221 goto out;
2c2910a4 2222
1da177e4
LT
2223e_inval:
2224 err = -EINVAL;
ebc0ffae 2225 goto out;
1da177e4
LT
2226
2227e_nobufs:
2228 err = -ENOBUFS;
ebc0ffae 2229 goto out;
1da177e4
LT
2230
2231martian_source:
b5f7e755
ED
2232 err = -EINVAL;
2233martian_source_keep_err:
1da177e4 2234 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2235 goto out;
1da177e4
LT
2236}
2237
407eadd9
ED
2238int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2239 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2240{
2241 struct rtable * rth;
2242 unsigned hash;
2243 int iif = dev->ifindex;
b5921910 2244 struct net *net;
96d36220 2245 int res;
1da177e4 2246
c346dca1 2247 net = dev_net(dev);
1080d709 2248
96d36220
ED
2249 rcu_read_lock();
2250
1080d709
NH
2251 if (!rt_caching(net))
2252 goto skip_cache;
2253
1da177e4 2254 tos &= IPTOS_RT_MASK;
e84f84f2 2255 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2256
1da177e4 2257 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2258 rth = rcu_dereference(rth->dst.rt_next)) {
0eae88f3
ED
2259 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2260 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
c0b8c32b
SH
2261 (rth->fl.iif ^ iif) |
2262 rth->fl.oif |
2263 (rth->fl.fl4_tos ^ tos)) == 0 &&
47dcf0cb 2264 rth->fl.mark == skb->mark &&
d8d1f30b 2265 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2266 !rt_is_expired(rth)) {
407eadd9 2267 if (noref) {
d8d1f30b
CG
2268 dst_use_noref(&rth->dst, jiffies);
2269 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2270 } else {
d8d1f30b
CG
2271 dst_use(&rth->dst, jiffies);
2272 skb_dst_set(skb, &rth->dst);
407eadd9 2273 }
1da177e4
LT
2274 RT_CACHE_STAT_INC(in_hit);
2275 rcu_read_unlock();
1da177e4
LT
2276 return 0;
2277 }
2278 RT_CACHE_STAT_INC(in_hlist_search);
2279 }
1da177e4 2280
1080d709 2281skip_cache:
1da177e4
LT
2282 /* Multicast recognition logic is moved from route cache to here.
2283 The problem was that too many Ethernet cards have broken/missing
2284 hardware multicast filters :-( As result the host on multicasting
2285 network acquires a lot of useless route cache entries, sort of
2286 SDR messages from all the world. Now we try to get rid of them.
2287 Really, provided software IP multicast filter is organized
2288 reasonably (at least, hashed), it does not result in a slowdown
2289 comparing with route cache reject entries.
2290 Note, that multicast routers are not affected, because
2291 route cache entry is created eventually.
2292 */
f97c1e0c 2293 if (ipv4_is_multicast(daddr)) {
96d36220 2294 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2295
96d36220 2296 if (in_dev) {
1da177e4 2297 int our = ip_check_mc(in_dev, daddr, saddr,
96d36220 2298 ip_hdr(skb)->protocol);
1da177e4
LT
2299 if (our
2300#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2301 ||
2302 (!ipv4_is_local_multicast(daddr) &&
2303 IN_DEV_MFORWARD(in_dev))
1da177e4 2304#endif
9d4fb27d 2305 ) {
96d36220
ED
2306 int res = ip_route_input_mc(skb, daddr, saddr,
2307 tos, dev, our);
1da177e4 2308 rcu_read_unlock();
96d36220 2309 return res;
1da177e4
LT
2310 }
2311 }
2312 rcu_read_unlock();
2313 return -EINVAL;
2314 }
96d36220
ED
2315 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2316 rcu_read_unlock();
2317 return res;
1da177e4 2318}
407eadd9 2319EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2320
ebc0ffae 2321/* called with rcu_read_lock() */
982721f3 2322static struct rtable *__mkroute_output(const struct fib_result *res,
5ada5527
DM
2323 const struct flowi *fl,
2324 const struct flowi *oldflp,
2325 struct net_device *dev_out,
2326 unsigned int flags)
1da177e4 2327{
982721f3 2328 struct fib_info *fi = res->fi;
1da177e4 2329 u32 tos = RT_FL_TOS(oldflp);
5ada5527 2330 struct in_device *in_dev;
982721f3 2331 u16 type = res->type;
5ada5527 2332 struct rtable *rth;
1da177e4 2333
dd28d1a0 2334 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2335 return ERR_PTR(-EINVAL);
1da177e4 2336
27a954bd 2337 if (ipv4_is_lbcast(fl->fl4_dst))
982721f3 2338 type = RTN_BROADCAST;
f97c1e0c 2339 else if (ipv4_is_multicast(fl->fl4_dst))
982721f3 2340 type = RTN_MULTICAST;
27a954bd 2341 else if (ipv4_is_zeronet(fl->fl4_dst))
5ada5527 2342 return ERR_PTR(-EINVAL);
1da177e4
LT
2343
2344 if (dev_out->flags & IFF_LOOPBACK)
2345 flags |= RTCF_LOCAL;
2346
dd28d1a0 2347 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2348 if (!in_dev)
5ada5527 2349 return ERR_PTR(-EINVAL);
ebc0ffae 2350
982721f3 2351 if (type == RTN_BROADCAST) {
1da177e4 2352 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2353 fi = NULL;
2354 } else if (type == RTN_MULTICAST) {
dd28d1a0 2355 flags |= RTCF_MULTICAST | RTCF_LOCAL;
e905a9ed 2356 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
1da177e4
LT
2357 oldflp->proto))
2358 flags &= ~RTCF_LOCAL;
2359 /* If multicast route do not exist use
dd28d1a0
ED
2360 * default one, but do not gateway in this case.
2361 * Yes, it is hack.
1da177e4 2362 */
982721f3
DM
2363 if (fi && res->prefixlen < 4)
2364 fi = NULL;
1da177e4
LT
2365 }
2366
0c4dcd58
DM
2367 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2368 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2369 if (!rth)
5ada5527 2370 return ERR_PTR(-ENOBUFS);
8391d07b 2371
1da177e4
LT
2372 rth->fl.fl4_dst = oldflp->fl4_dst;
2373 rth->fl.fl4_tos = tos;
2374 rth->fl.fl4_src = oldflp->fl4_src;
2375 rth->fl.oif = oldflp->oif;
47dcf0cb 2376 rth->fl.mark = oldflp->mark;
1da177e4
LT
2377 rth->rt_dst = fl->fl4_dst;
2378 rth->rt_src = fl->fl4_src;
2379 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
e905a9ed 2380 /* get references to the devices that are to be hold by the routing
1da177e4 2381 cache entry */
d8d1f30b 2382 rth->dst.dev = dev_out;
1da177e4 2383 dev_hold(dev_out);
1da177e4
LT
2384 rth->rt_gateway = fl->fl4_dst;
2385 rth->rt_spec_dst= fl->fl4_src;
2386
d8d1f30b 2387 rth->dst.output=ip_output;
e84f84f2 2388 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2389
2390 RT_CACHE_STAT_INC(out_slow_tot);
2391
2392 if (flags & RTCF_LOCAL) {
d8d1f30b 2393 rth->dst.input = ip_local_deliver;
1da177e4
LT
2394 rth->rt_spec_dst = fl->fl4_dst;
2395 }
2396 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2397 rth->rt_spec_dst = fl->fl4_src;
e905a9ed 2398 if (flags & RTCF_LOCAL &&
1da177e4 2399 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2400 rth->dst.output = ip_mc_output;
1da177e4
LT
2401 RT_CACHE_STAT_INC(out_slow_mc);
2402 }
2403#ifdef CONFIG_IP_MROUTE
982721f3 2404 if (type == RTN_MULTICAST) {
1da177e4 2405 if (IN_DEV_MFORWARD(in_dev) &&
f97c1e0c 2406 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
d8d1f30b
CG
2407 rth->dst.input = ip_mr_input;
2408 rth->dst.output = ip_mc_output;
1da177e4
LT
2409 }
2410 }
2411#endif
2412 }
2413
982721f3 2414 rt_set_nexthop(rth, res, fi, type, 0);
1da177e4
LT
2415
2416 rth->rt_flags = flags;
5ada5527 2417 return rth;
1da177e4
LT
2418}
2419
1da177e4
LT
2420/*
2421 * Major route resolver routine.
0197aa38 2422 * called with rcu_read_lock();
1da177e4
LT
2423 */
2424
b40afd0e
DL
2425static int ip_route_output_slow(struct net *net, struct rtable **rp,
2426 const struct flowi *oldflp)
1da177e4
LT
2427{
2428 u32 tos = RT_FL_TOS(oldflp);
5811662b
CG
2429 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2430 .fl4_src = oldflp->fl4_src,
2431 .fl4_tos = tos & IPTOS_RT_MASK,
2432 .fl4_scope = ((tos & RTO_ONLINK) ?
2433 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
47dcf0cb 2434 .mark = oldflp->mark,
b40afd0e 2435 .iif = net->loopback_dev->ifindex,
1da177e4
LT
2436 .oif = oldflp->oif };
2437 struct fib_result res;
0197aa38 2438 unsigned int flags = 0;
1da177e4 2439 struct net_device *dev_out = NULL;
5ada5527 2440 struct rtable *rth;
1da177e4
LT
2441 int err;
2442
2443
2444 res.fi = NULL;
2445#ifdef CONFIG_IP_MULTIPLE_TABLES
2446 res.r = NULL;
2447#endif
2448
010c2708 2449 rcu_read_lock();
1da177e4
LT
2450 if (oldflp->fl4_src) {
2451 err = -EINVAL;
f97c1e0c 2452 if (ipv4_is_multicast(oldflp->fl4_src) ||
1e637c74 2453 ipv4_is_lbcast(oldflp->fl4_src) ||
f97c1e0c 2454 ipv4_is_zeronet(oldflp->fl4_src))
1da177e4
LT
2455 goto out;
2456
1da177e4
LT
2457 /* I removed check for oif == dev_out->oif here.
2458 It was wrong for two reasons:
1ab35276
DL
2459 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2460 is assigned to multiple interfaces.
1da177e4
LT
2461 2. Moreover, we are allowed to send packets with saddr
2462 of another iface. --ANK
2463 */
2464
9d4fb27d
JP
2465 if (oldflp->oif == 0 &&
2466 (ipv4_is_multicast(oldflp->fl4_dst) ||
27a954bd 2467 ipv4_is_lbcast(oldflp->fl4_dst))) {
a210d01a 2468 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2469 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
a210d01a
JA
2470 if (dev_out == NULL)
2471 goto out;
2472
1da177e4
LT
2473 /* Special hack: user can direct multicasts
2474 and limited broadcast via necessary interface
2475 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2476 This hack is not just for fun, it allows
2477 vic,vat and friends to work.
2478 They bind socket to loopback, set ttl to zero
2479 and expect that it will work.
2480 From the viewpoint of routing cache they are broken,
2481 because we are not allowed to build multicast path
2482 with loopback source addr (look, routing cache
2483 cannot know, that ttl is zero, so that packet
2484 will not leave this host and route is valid).
2485 Luckily, this hack is good workaround.
2486 */
2487
2488 fl.oif = dev_out->ifindex;
2489 goto make_route;
2490 }
a210d01a
JA
2491
2492 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2493 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2494 if (!__ip_dev_find(net, oldflp->fl4_src, false))
a210d01a 2495 goto out;
a210d01a 2496 }
1da177e4
LT
2497 }
2498
2499
2500 if (oldflp->oif) {
0197aa38 2501 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
1da177e4
LT
2502 err = -ENODEV;
2503 if (dev_out == NULL)
2504 goto out;
e5ed6399
HX
2505
2506 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83
ED
2507 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2508 err = -ENETUNREACH;
2509 goto out;
2510 }
f97c1e0c 2511 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
27a954bd 2512 ipv4_is_lbcast(oldflp->fl4_dst)) {
1da177e4
LT
2513 if (!fl.fl4_src)
2514 fl.fl4_src = inet_select_addr(dev_out, 0,
2515 RT_SCOPE_LINK);
2516 goto make_route;
2517 }
2518 if (!fl.fl4_src) {
f97c1e0c 2519 if (ipv4_is_multicast(oldflp->fl4_dst))
1da177e4
LT
2520 fl.fl4_src = inet_select_addr(dev_out, 0,
2521 fl.fl4_scope);
2522 else if (!oldflp->fl4_dst)
2523 fl.fl4_src = inet_select_addr(dev_out, 0,
2524 RT_SCOPE_HOST);
2525 }
2526 }
2527
2528 if (!fl.fl4_dst) {
2529 fl.fl4_dst = fl.fl4_src;
2530 if (!fl.fl4_dst)
2531 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
b40afd0e 2532 dev_out = net->loopback_dev;
b40afd0e 2533 fl.oif = net->loopback_dev->ifindex;
1da177e4
LT
2534 res.type = RTN_LOCAL;
2535 flags |= RTCF_LOCAL;
2536 goto make_route;
2537 }
2538
b40afd0e 2539 if (fib_lookup(net, &fl, &res)) {
1da177e4
LT
2540 res.fi = NULL;
2541 if (oldflp->oif) {
2542 /* Apparently, routing tables are wrong. Assume,
2543 that the destination is on link.
2544
2545 WHY? DW.
2546 Because we are allowed to send to iface
2547 even if it has NO routes and NO assigned
2548 addresses. When oif is specified, routing
2549 tables are looked up with only one purpose:
2550 to catch if destination is gatewayed, rather than
2551 direct. Moreover, if MSG_DONTROUTE is set,
2552 we send packet, ignoring both routing tables
2553 and ifaddr state. --ANK
2554
2555
2556 We could make it even if oif is unknown,
2557 likely IPv6, but we do not.
2558 */
2559
2560 if (fl.fl4_src == 0)
2561 fl.fl4_src = inet_select_addr(dev_out, 0,
2562 RT_SCOPE_LINK);
2563 res.type = RTN_UNICAST;
2564 goto make_route;
2565 }
1da177e4
LT
2566 err = -ENETUNREACH;
2567 goto out;
2568 }
1da177e4
LT
2569
2570 if (res.type == RTN_LOCAL) {
9fc3bbb4
JS
2571 if (!fl.fl4_src) {
2572 if (res.fi->fib_prefsrc)
2573 fl.fl4_src = res.fi->fib_prefsrc;
2574 else
2575 fl.fl4_src = fl.fl4_dst;
2576 }
b40afd0e 2577 dev_out = net->loopback_dev;
1da177e4 2578 fl.oif = dev_out->ifindex;
1da177e4
LT
2579 res.fi = NULL;
2580 flags |= RTCF_LOCAL;
2581 goto make_route;
2582 }
2583
2584#ifdef CONFIG_IP_ROUTE_MULTIPATH
2585 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2586 fib_select_multipath(&fl, &res);
2587 else
2588#endif
2589 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
0c838ff1 2590 fib_select_default(&res);
1da177e4
LT
2591
2592 if (!fl.fl4_src)
2593 fl.fl4_src = FIB_RES_PREFSRC(res);
2594
1da177e4 2595 dev_out = FIB_RES_DEV(res);
1da177e4
LT
2596 fl.oif = dev_out->ifindex;
2597
2598
2599make_route:
5ada5527
DM
2600 rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
2601 if (IS_ERR(rth))
2602 err = PTR_ERR(rth);
2603 else {
2604 unsigned int hash;
2605
2606 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2607 rt_genid(dev_net(dev_out)));
2608 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2609 }
1da177e4 2610
010c2708
DM
2611out:
2612 rcu_read_unlock();
2613 return err;
1da177e4
LT
2614}
2615
611c183e
DL
2616int __ip_route_output_key(struct net *net, struct rtable **rp,
2617 const struct flowi *flp)
1da177e4 2618{
1da177e4 2619 struct rtable *rth;
010c2708 2620 unsigned int hash;
1da177e4 2621
1080d709
NH
2622 if (!rt_caching(net))
2623 goto slow_output;
2624
e84f84f2 2625 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
1da177e4
LT
2626
2627 rcu_read_lock_bh();
a898def2 2628 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2629 rth = rcu_dereference_bh(rth->dst.rt_next)) {
1da177e4
LT
2630 if (rth->fl.fl4_dst == flp->fl4_dst &&
2631 rth->fl.fl4_src == flp->fl4_src &&
c7537967 2632 rt_is_output_route(rth) &&
1da177e4 2633 rth->fl.oif == flp->oif &&
47dcf0cb 2634 rth->fl.mark == flp->mark &&
1da177e4 2635 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
b5921910 2636 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2637 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2638 !rt_is_expired(rth)) {
d8d1f30b 2639 dst_use(&rth->dst, jiffies);
1da177e4
LT
2640 RT_CACHE_STAT_INC(out_hit);
2641 rcu_read_unlock_bh();
2642 *rp = rth;
2643 return 0;
2644 }
2645 RT_CACHE_STAT_INC(out_hlist_search);
2646 }
2647 rcu_read_unlock_bh();
2648
1080d709 2649slow_output:
010c2708 2650 return ip_route_output_slow(net, rp, flp);
1da177e4 2651}
d8c97a94
ACM
2652EXPORT_SYMBOL_GPL(__ip_route_output_key);
2653
ae2688d5
JW
2654static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2655{
2656 return NULL;
2657}
2658
ec831ea7
RD
2659static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2660{
2661 return 0;
2662}
2663
14e50e57
DM
2664static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2665{
2666}
2667
2668static struct dst_ops ipv4_dst_blackhole_ops = {
2669 .family = AF_INET,
09640e63 2670 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2671 .destroy = ipv4_dst_destroy,
ae2688d5 2672 .check = ipv4_blackhole_dst_check,
ec831ea7 2673 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2674 .default_advmss = ipv4_default_advmss,
14e50e57 2675 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
14e50e57
DM
2676};
2677
2774c131 2678struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2679{
2774c131
DM
2680 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
2681 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2682
2683 if (rt) {
d8d1f30b 2684 struct dst_entry *new = &rt->dst;
14e50e57 2685
14e50e57 2686 new->__use = 1;
352e512c
HX
2687 new->input = dst_discard;
2688 new->output = dst_discard;
defb3519 2689 dst_copy_metrics(new, &ort->dst);
14e50e57 2690
d8d1f30b 2691 new->dev = ort->dst.dev;
14e50e57
DM
2692 if (new->dev)
2693 dev_hold(new->dev);
2694
2695 rt->fl = ort->fl;
2696
e84f84f2 2697 rt->rt_genid = rt_genid(net);
14e50e57
DM
2698 rt->rt_flags = ort->rt_flags;
2699 rt->rt_type = ort->rt_type;
2700 rt->rt_dst = ort->rt_dst;
2701 rt->rt_src = ort->rt_src;
2702 rt->rt_iif = ort->rt_iif;
2703 rt->rt_gateway = ort->rt_gateway;
2704 rt->rt_spec_dst = ort->rt_spec_dst;
2705 rt->peer = ort->peer;
2706 if (rt->peer)
2707 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2708 rt->fi = ort->fi;
2709 if (rt->fi)
2710 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2711
2712 dst_free(new);
2713 }
2714
2774c131
DM
2715 dst_release(dst_orig);
2716
2717 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2718}
2719
f1b050bf 2720int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
273447b3 2721 struct sock *sk)
1da177e4
LT
2722{
2723 int err;
2724
f1b050bf 2725 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
1da177e4
LT
2726 return err;
2727
2728 if (flp->proto) {
2729 if (!flp->fl4_src)
2730 flp->fl4_src = (*rp)->rt_src;
2731 if (!flp->fl4_dst)
2732 flp->fl4_dst = (*rp)->rt_dst;
452edd59
DM
2733 *rp = (struct rtable *) xfrm_lookup(net, &(*rp)->dst, flp, sk, 0);
2734 if (IS_ERR(*rp)) {
2735 err = PTR_ERR(*rp);
2736 *rp = NULL;
2737 return err;
2738 }
1da177e4
LT
2739 }
2740
2741 return 0;
2742}
d8c97a94
ACM
2743EXPORT_SYMBOL_GPL(ip_route_output_flow);
2744
f206351a 2745int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
1da177e4 2746{
273447b3 2747 return ip_route_output_flow(net, rp, flp, NULL);
1da177e4 2748}
4bc2f18b 2749EXPORT_SYMBOL(ip_route_output_key);
1da177e4 2750
4feb88e5
BT
2751static int rt_fill_info(struct net *net,
2752 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2753 int nowait, unsigned int flags)
1da177e4 2754{
511c3f92 2755 struct rtable *rt = skb_rtable(skb);
1da177e4 2756 struct rtmsg *r;
be403ea1 2757 struct nlmsghdr *nlh;
e3703b3d
TG
2758 long expires;
2759 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2760
2761 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2762 if (nlh == NULL)
26932566 2763 return -EMSGSIZE;
be403ea1
TG
2764
2765 r = nlmsg_data(nlh);
1da177e4
LT
2766 r->rtm_family = AF_INET;
2767 r->rtm_dst_len = 32;
2768 r->rtm_src_len = 0;
2769 r->rtm_tos = rt->fl.fl4_tos;
2770 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2771 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2772 r->rtm_type = rt->rt_type;
2773 r->rtm_scope = RT_SCOPE_UNIVERSE;
2774 r->rtm_protocol = RTPROT_UNSPEC;
2775 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2776 if (rt->rt_flags & RTCF_NOTIFY)
2777 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2778
17fb2c64 2779 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2780
1da177e4
LT
2781 if (rt->fl.fl4_src) {
2782 r->rtm_src_len = 32;
17fb2c64 2783 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
1da177e4 2784 }
d8d1f30b
CG
2785 if (rt->dst.dev)
2786 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2787#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2788 if (rt->dst.tclassid)
2789 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2790#endif
c7537967 2791 if (rt_is_input_route(rt))
17fb2c64 2792 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
1da177e4 2793 else if (rt->rt_src != rt->fl.fl4_src)
17fb2c64 2794 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2795
1da177e4 2796 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2797 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2798
defb3519 2799 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2800 goto nla_put_failure;
2801
963bfeee
ED
2802 if (rt->fl.mark)
2803 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2804
d8d1f30b 2805 error = rt->dst.error;
2c8cec5c
DM
2806 expires = (rt->peer && rt->peer->pmtu_expires) ?
2807 rt->peer->pmtu_expires - jiffies : 0;
1da177e4 2808 if (rt->peer) {
317fe0e6 2809 inet_peer_refcheck(rt->peer);
2c1409a0 2810 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2811 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2812 ts = rt->peer->tcp_ts;
9d729f72 2813 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2814 }
2815 }
be403ea1 2816
c7537967 2817 if (rt_is_input_route(rt)) {
1da177e4 2818#ifdef CONFIG_IP_MROUTE
e448515c 2819 __be32 dst = rt->rt_dst;
1da177e4 2820
f97c1e0c 2821 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2822 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2823 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2824 if (err <= 0) {
2825 if (!nowait) {
2826 if (err == 0)
2827 return 0;
be403ea1 2828 goto nla_put_failure;
1da177e4
LT
2829 } else {
2830 if (err == -EMSGSIZE)
be403ea1 2831 goto nla_put_failure;
e3703b3d 2832 error = err;
1da177e4
LT
2833 }
2834 }
2835 } else
2836#endif
be403ea1 2837 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
1da177e4
LT
2838 }
2839
d8d1f30b 2840 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2841 expires, error) < 0)
2842 goto nla_put_failure;
be403ea1
TG
2843
2844 return nlmsg_end(skb, nlh);
1da177e4 2845
be403ea1 2846nla_put_failure:
26932566
PM
2847 nlmsg_cancel(skb, nlh);
2848 return -EMSGSIZE;
1da177e4
LT
2849}
2850
63f3444f 2851static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2852{
3b1e0a65 2853 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2854 struct rtmsg *rtm;
2855 struct nlattr *tb[RTA_MAX+1];
1da177e4 2856 struct rtable *rt = NULL;
9e12bb22
AV
2857 __be32 dst = 0;
2858 __be32 src = 0;
2859 u32 iif;
d889ce3b 2860 int err;
963bfeee 2861 int mark;
1da177e4
LT
2862 struct sk_buff *skb;
2863
d889ce3b
TG
2864 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2865 if (err < 0)
2866 goto errout;
2867
2868 rtm = nlmsg_data(nlh);
2869
1da177e4 2870 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2871 if (skb == NULL) {
2872 err = -ENOBUFS;
2873 goto errout;
2874 }
1da177e4
LT
2875
2876 /* Reserve room for dummy headers, this skb can pass
2877 through good chunk of routing engine.
2878 */
459a98ed 2879 skb_reset_mac_header(skb);
c1d2bbe1 2880 skb_reset_network_header(skb);
d2c962b8
SH
2881
2882 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2883 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2884 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2885
17fb2c64
AV
2886 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2887 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2888 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2889 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2890
2891 if (iif) {
d889ce3b
TG
2892 struct net_device *dev;
2893
1937504d 2894 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2895 if (dev == NULL) {
2896 err = -ENODEV;
2897 goto errout_free;
2898 }
2899
1da177e4
LT
2900 skb->protocol = htons(ETH_P_IP);
2901 skb->dev = dev;
963bfeee 2902 skb->mark = mark;
1da177e4
LT
2903 local_bh_disable();
2904 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2905 local_bh_enable();
d889ce3b 2906
511c3f92 2907 rt = skb_rtable(skb);
d8d1f30b
CG
2908 if (err == 0 && rt->dst.error)
2909 err = -rt->dst.error;
1da177e4 2910 } else {
d889ce3b 2911 struct flowi fl = {
5811662b
CG
2912 .fl4_dst = dst,
2913 .fl4_src = src,
2914 .fl4_tos = rtm->rtm_tos,
d889ce3b 2915 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
963bfeee 2916 .mark = mark,
d889ce3b 2917 };
1937504d 2918 err = ip_route_output_key(net, &rt, &fl);
1da177e4 2919 }
d889ce3b 2920
1da177e4 2921 if (err)
d889ce3b 2922 goto errout_free;
1da177e4 2923
d8d1f30b 2924 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2925 if (rtm->rtm_flags & RTM_F_NOTIFY)
2926 rt->rt_flags |= RTCF_NOTIFY;
2927
4feb88e5 2928 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2929 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2930 if (err <= 0)
2931 goto errout_free;
1da177e4 2932
1937504d 2933 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2934errout:
2942e900 2935 return err;
1da177e4 2936
d889ce3b 2937errout_free:
1da177e4 2938 kfree_skb(skb);
d889ce3b 2939 goto errout;
1da177e4
LT
2940}
2941
2942int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2943{
2944 struct rtable *rt;
2945 int h, s_h;
2946 int idx, s_idx;
1937504d
DL
2947 struct net *net;
2948
3b1e0a65 2949 net = sock_net(skb->sk);
1da177e4
LT
2950
2951 s_h = cb->args[0];
d8c92830
ED
2952 if (s_h < 0)
2953 s_h = 0;
1da177e4 2954 s_idx = idx = cb->args[1];
a6272665
ED
2955 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2956 if (!rt_hash_table[h].chain)
2957 continue;
1da177e4 2958 rcu_read_lock_bh();
a898def2 2959 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2960 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2961 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2962 continue;
e84f84f2 2963 if (rt_is_expired(rt))
29e75252 2964 continue;
d8d1f30b 2965 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2966 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 2967 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 2968 1, NLM_F_MULTI) <= 0) {
adf30907 2969 skb_dst_drop(skb);
1da177e4
LT
2970 rcu_read_unlock_bh();
2971 goto done;
2972 }
adf30907 2973 skb_dst_drop(skb);
1da177e4
LT
2974 }
2975 rcu_read_unlock_bh();
2976 }
2977
2978done:
2979 cb->args[0] = h;
2980 cb->args[1] = idx;
2981 return skb->len;
2982}
2983
2984void ip_rt_multicast_event(struct in_device *in_dev)
2985{
76e6ebfb 2986 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
2987}
2988
2989#ifdef CONFIG_SYSCTL
81c684d1 2990static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 2991 void __user *buffer,
1da177e4
LT
2992 size_t *lenp, loff_t *ppos)
2993{
2994 if (write) {
639e104f 2995 int flush_delay;
81c684d1 2996 ctl_table ctl;
39a23e75 2997 struct net *net;
639e104f 2998
81c684d1
DL
2999 memcpy(&ctl, __ctl, sizeof(ctl));
3000 ctl.data = &flush_delay;
8d65af78 3001 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3002
81c684d1 3003 net = (struct net *)__ctl->extra1;
39a23e75 3004 rt_cache_flush(net, flush_delay);
1da177e4 3005 return 0;
e905a9ed 3006 }
1da177e4
LT
3007
3008 return -EINVAL;
3009}
3010
eeb61f71 3011static ctl_table ipv4_route_table[] = {
1da177e4 3012 {
1da177e4
LT
3013 .procname = "gc_thresh",
3014 .data = &ipv4_dst_ops.gc_thresh,
3015 .maxlen = sizeof(int),
3016 .mode = 0644,
6d9f239a 3017 .proc_handler = proc_dointvec,
1da177e4
LT
3018 },
3019 {
1da177e4
LT
3020 .procname = "max_size",
3021 .data = &ip_rt_max_size,
3022 .maxlen = sizeof(int),
3023 .mode = 0644,
6d9f239a 3024 .proc_handler = proc_dointvec,
1da177e4
LT
3025 },
3026 {
3027 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3028
1da177e4
LT
3029 .procname = "gc_min_interval",
3030 .data = &ip_rt_gc_min_interval,
3031 .maxlen = sizeof(int),
3032 .mode = 0644,
6d9f239a 3033 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3034 },
3035 {
1da177e4
LT
3036 .procname = "gc_min_interval_ms",
3037 .data = &ip_rt_gc_min_interval,
3038 .maxlen = sizeof(int),
3039 .mode = 0644,
6d9f239a 3040 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3041 },
3042 {
1da177e4
LT
3043 .procname = "gc_timeout",
3044 .data = &ip_rt_gc_timeout,
3045 .maxlen = sizeof(int),
3046 .mode = 0644,
6d9f239a 3047 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3048 },
3049 {
1da177e4
LT
3050 .procname = "gc_interval",
3051 .data = &ip_rt_gc_interval,
3052 .maxlen = sizeof(int),
3053 .mode = 0644,
6d9f239a 3054 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3055 },
3056 {
1da177e4
LT
3057 .procname = "redirect_load",
3058 .data = &ip_rt_redirect_load,
3059 .maxlen = sizeof(int),
3060 .mode = 0644,
6d9f239a 3061 .proc_handler = proc_dointvec,
1da177e4
LT
3062 },
3063 {
1da177e4
LT
3064 .procname = "redirect_number",
3065 .data = &ip_rt_redirect_number,
3066 .maxlen = sizeof(int),
3067 .mode = 0644,
6d9f239a 3068 .proc_handler = proc_dointvec,
1da177e4
LT
3069 },
3070 {
1da177e4
LT
3071 .procname = "redirect_silence",
3072 .data = &ip_rt_redirect_silence,
3073 .maxlen = sizeof(int),
3074 .mode = 0644,
6d9f239a 3075 .proc_handler = proc_dointvec,
1da177e4
LT
3076 },
3077 {
1da177e4
LT
3078 .procname = "error_cost",
3079 .data = &ip_rt_error_cost,
3080 .maxlen = sizeof(int),
3081 .mode = 0644,
6d9f239a 3082 .proc_handler = proc_dointvec,
1da177e4
LT
3083 },
3084 {
1da177e4
LT
3085 .procname = "error_burst",
3086 .data = &ip_rt_error_burst,
3087 .maxlen = sizeof(int),
3088 .mode = 0644,
6d9f239a 3089 .proc_handler = proc_dointvec,
1da177e4
LT
3090 },
3091 {
1da177e4
LT
3092 .procname = "gc_elasticity",
3093 .data = &ip_rt_gc_elasticity,
3094 .maxlen = sizeof(int),
3095 .mode = 0644,
6d9f239a 3096 .proc_handler = proc_dointvec,
1da177e4
LT
3097 },
3098 {
1da177e4
LT
3099 .procname = "mtu_expires",
3100 .data = &ip_rt_mtu_expires,
3101 .maxlen = sizeof(int),
3102 .mode = 0644,
6d9f239a 3103 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3104 },
3105 {
1da177e4
LT
3106 .procname = "min_pmtu",
3107 .data = &ip_rt_min_pmtu,
3108 .maxlen = sizeof(int),
3109 .mode = 0644,
6d9f239a 3110 .proc_handler = proc_dointvec,
1da177e4
LT
3111 },
3112 {
1da177e4
LT
3113 .procname = "min_adv_mss",
3114 .data = &ip_rt_min_advmss,
3115 .maxlen = sizeof(int),
3116 .mode = 0644,
6d9f239a 3117 .proc_handler = proc_dointvec,
1da177e4 3118 },
f8572d8f 3119 { }
1da177e4 3120};
39a23e75 3121
2f4520d3
AV
3122static struct ctl_table empty[1];
3123
3124static struct ctl_table ipv4_skeleton[] =
3125{
f8572d8f 3126 { .procname = "route",
d994af0d 3127 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3128 { .procname = "neigh",
d994af0d 3129 .mode = 0555, .child = empty},
2f4520d3
AV
3130 { }
3131};
3132
3133static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3134 { .procname = "net", },
3135 { .procname = "ipv4", },
39a23e75
DL
3136 { },
3137};
3138
39a23e75
DL
3139static struct ctl_table ipv4_route_flush_table[] = {
3140 {
39a23e75
DL
3141 .procname = "flush",
3142 .maxlen = sizeof(int),
3143 .mode = 0200,
6d9f239a 3144 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3145 },
f8572d8f 3146 { },
39a23e75
DL
3147};
3148
2f4520d3 3149static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3150 { .procname = "net", },
3151 { .procname = "ipv4", },
3152 { .procname = "route", },
2f4520d3
AV
3153 { },
3154};
3155
39a23e75
DL
3156static __net_init int sysctl_route_net_init(struct net *net)
3157{
3158 struct ctl_table *tbl;
3159
3160 tbl = ipv4_route_flush_table;
09ad9bc7 3161 if (!net_eq(net, &init_net)) {
39a23e75
DL
3162 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3163 if (tbl == NULL)
3164 goto err_dup;
3165 }
3166 tbl[0].extra1 = net;
3167
3168 net->ipv4.route_hdr =
3169 register_net_sysctl_table(net, ipv4_route_path, tbl);
3170 if (net->ipv4.route_hdr == NULL)
3171 goto err_reg;
3172 return 0;
3173
3174err_reg:
3175 if (tbl != ipv4_route_flush_table)
3176 kfree(tbl);
3177err_dup:
3178 return -ENOMEM;
3179}
3180
3181static __net_exit void sysctl_route_net_exit(struct net *net)
3182{
3183 struct ctl_table *tbl;
3184
3185 tbl = net->ipv4.route_hdr->ctl_table_arg;
3186 unregister_net_sysctl_table(net->ipv4.route_hdr);
3187 BUG_ON(tbl == ipv4_route_flush_table);
3188 kfree(tbl);
3189}
3190
3191static __net_initdata struct pernet_operations sysctl_route_ops = {
3192 .init = sysctl_route_net_init,
3193 .exit = sysctl_route_net_exit,
3194};
1da177e4
LT
3195#endif
3196
3ee94372 3197static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3198{
3ee94372
NH
3199 get_random_bytes(&net->ipv4.rt_genid,
3200 sizeof(net->ipv4.rt_genid));
9f5e97e5
DL
3201 return 0;
3202}
3203
3ee94372
NH
3204static __net_initdata struct pernet_operations rt_genid_ops = {
3205 .init = rt_genid_init,
9f5e97e5
DL
3206};
3207
3208
c7066f70 3209#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3210struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3211#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3212
3213static __initdata unsigned long rhash_entries;
3214static int __init set_rhash_entries(char *str)
3215{
3216 if (!str)
3217 return 0;
3218 rhash_entries = simple_strtoul(str, &str, 0);
3219 return 1;
3220}
3221__setup("rhash_entries=", set_rhash_entries);
3222
3223int __init ip_rt_init(void)
3224{
424c4b70 3225 int rc = 0;
1da177e4 3226
c7066f70 3227#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3228 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3229 if (!ip_rt_acct)
3230 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3231#endif
3232
e5d679f3
AD
3233 ipv4_dst_ops.kmem_cachep =
3234 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3235 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3236
14e50e57
DM
3237 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3238
fc66f95c
ED
3239 if (dst_entries_init(&ipv4_dst_ops) < 0)
3240 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3241
3242 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3243 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3244
424c4b70
ED
3245 rt_hash_table = (struct rt_hash_bucket *)
3246 alloc_large_system_hash("IP route cache",
3247 sizeof(struct rt_hash_bucket),
3248 rhash_entries,
4481374c 3249 (totalram_pages >= 128 * 1024) ?
18955cfc 3250 15 : 17,
8d1502de 3251 0,
424c4b70
ED
3252 &rt_hash_log,
3253 &rt_hash_mask,
c9503e0f 3254 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3255 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3256 rt_hash_lock_init();
1da177e4
LT
3257
3258 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3259 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3260
1da177e4
LT
3261 devinet_init();
3262 ip_fib_init();
3263
73b38711 3264 if (ip_rt_proc_init())
107f1634 3265 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3266#ifdef CONFIG_XFRM
3267 xfrm_init();
a33bc5c1 3268 xfrm4_init(ip_rt_max_size);
1da177e4 3269#endif
63f3444f
TG
3270 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3271
39a23e75
DL
3272#ifdef CONFIG_SYSCTL
3273 register_pernet_subsys(&sysctl_route_ops);
3274#endif
3ee94372 3275 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3276 return rc;
3277}
3278
a1bc6eb4 3279#ifdef CONFIG_SYSCTL
eeb61f71
AV
3280/*
3281 * We really need to sanitize the damn ipv4 init order, then all
3282 * this nonsense will go away.
3283 */
3284void __init ip_static_sysctl_init(void)
3285{
2f4520d3 3286 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3287}
a1bc6eb4 3288#endif