2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
112 #include <net/secure_seq.h>
113 #include <net/ip_tunnels.h>
114 #include <net/l3mdev.h>
116 #include "fib_lookup.h"
118 #define RT_FL_TOS(oldflp4) \
119 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size;
124 static int ip_rt_redirect_number __read_mostly = 9;
125 static int ip_rt_redirect_load __read_mostly = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly = HZ;
128 static int ip_rt_error_burst __read_mostly = 5 * HZ;
129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly = 256;
133 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
136 * Interface to generic destination cache.
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int ipv4_mtu(const struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void ipv4_link_failure(struct sk_buff *skb);
144 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb, u32 mtu);
146 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb);
148 static void ipv4_dst_destroy(struct dst_entry *dst);
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
159 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
161 static struct dst_ops ipv4_dst_ops = {
163 .check = ipv4_dst_check,
164 .default_advmss = ipv4_default_advmss,
166 .cow_metrics = ipv4_cow_metrics,
167 .destroy = ipv4_dst_destroy,
168 .negative_advice = ipv4_negative_advice,
169 .link_failure = ipv4_link_failure,
170 .update_pmtu = ip_rt_update_pmtu,
171 .redirect = ip_do_redirect,
172 .local_out = __ip_local_out,
173 .neigh_lookup = ipv4_neigh_lookup,
174 .confirm_neigh = ipv4_confirm_neigh,
177 #define ECN_OR_COST(class) TC_PRIO_##class
179 const __u8 ip_tos2prio[16] = {
181 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(BESTEFFORT),
189 ECN_OR_COST(INTERACTIVE),
191 ECN_OR_COST(INTERACTIVE),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK),
194 TC_PRIO_INTERACTIVE_BULK,
195 ECN_OR_COST(INTERACTIVE_BULK)
197 EXPORT_SYMBOL(ip_tos2prio);
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
207 return SEQ_START_TOKEN;
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
222 if (v == SEQ_START_TOKEN)
223 seq_printf(seq, "%-127s\n",
224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
230 static const struct seq_operations rt_cache_seq_ops = {
231 .start = rt_cache_seq_start,
232 .next = rt_cache_seq_next,
233 .stop = rt_cache_seq_stop,
234 .show = rt_cache_seq_show,
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
239 return seq_open(file, &rt_cache_seq_ops);
242 static const struct file_operations rt_cache_seq_fops = {
243 .open = rt_cache_seq_open,
246 .release = seq_release,
250 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
255 return SEQ_START_TOKEN;
257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
258 if (!cpu_possible(cpu))
261 return &per_cpu(rt_cache_stat, cpu);
266 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
271 if (!cpu_possible(cpu))
274 return &per_cpu(rt_cache_stat, cpu);
280 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
285 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287 struct rt_cache_stat *st = v;
289 if (v == SEQ_START_TOKEN) {
290 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
294 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
295 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
296 dst_entries_get_slow(&ipv4_dst_ops),
309 0, /* st->gc_total */
310 0, /* st->gc_ignored */
311 0, /* st->gc_goal_miss */
312 0, /* st->gc_dst_overflow */
313 0, /* st->in_hlist_search */
314 0 /* st->out_hlist_search */
319 static const struct seq_operations rt_cpu_seq_ops = {
320 .start = rt_cpu_seq_start,
321 .next = rt_cpu_seq_next,
322 .stop = rt_cpu_seq_stop,
323 .show = rt_cpu_seq_show,
327 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329 return seq_open(file, &rt_cpu_seq_ops);
332 static const struct file_operations rt_cpu_seq_fops = {
333 .open = rt_cpu_seq_open,
336 .release = seq_release,
339 #ifdef CONFIG_IP_ROUTE_CLASSID
340 static int rt_acct_proc_show(struct seq_file *m, void *v)
342 struct ip_rt_acct *dst, *src;
345 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
349 for_each_possible_cpu(i) {
350 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
351 for (j = 0; j < 256; j++) {
352 dst[j].o_bytes += src[j].o_bytes;
353 dst[j].o_packets += src[j].o_packets;
354 dst[j].i_bytes += src[j].i_bytes;
355 dst[j].i_packets += src[j].i_packets;
359 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
365 static int __net_init ip_rt_do_proc_init(struct net *net)
367 struct proc_dir_entry *pde;
369 pde = proc_create("rt_cache", 0444, net->proc_net,
374 pde = proc_create("rt_cache", 0444,
375 net->proc_net_stat, &rt_cpu_seq_fops);
379 #ifdef CONFIG_IP_ROUTE_CLASSID
380 pde = proc_create_single("rt_acct", 0, net->proc_net,
387 #ifdef CONFIG_IP_ROUTE_CLASSID
389 remove_proc_entry("rt_cache", net->proc_net_stat);
392 remove_proc_entry("rt_cache", net->proc_net);
397 static void __net_exit ip_rt_do_proc_exit(struct net *net)
399 remove_proc_entry("rt_cache", net->proc_net_stat);
400 remove_proc_entry("rt_cache", net->proc_net);
401 #ifdef CONFIG_IP_ROUTE_CLASSID
402 remove_proc_entry("rt_acct", net->proc_net);
406 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
407 .init = ip_rt_do_proc_init,
408 .exit = ip_rt_do_proc_exit,
411 static int __init ip_rt_proc_init(void)
413 return register_pernet_subsys(&ip_rt_proc_ops);
417 static inline int ip_rt_proc_init(void)
421 #endif /* CONFIG_PROC_FS */
423 static inline bool rt_is_expired(const struct rtable *rth)
425 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
428 void rt_cache_flush(struct net *net)
430 rt_genid_bump_ipv4(net);
433 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
437 struct net_device *dev = dst->dev;
438 const __be32 *pkey = daddr;
439 const struct rtable *rt;
442 rt = (const struct rtable *) dst;
444 pkey = (const __be32 *) &rt->rt_gateway;
446 pkey = &ip_hdr(skb)->daddr;
448 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
451 return neigh_create(&arp_tbl, pkey, dev);
454 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
456 struct net_device *dev = dst->dev;
457 const __be32 *pkey = daddr;
458 const struct rtable *rt;
460 rt = (const struct rtable *)dst;
462 pkey = (const __be32 *)&rt->rt_gateway;
465 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
468 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
471 #define IP_IDENTS_SZ 2048u
473 static atomic_t *ip_idents __read_mostly;
474 static u32 *ip_tstamps __read_mostly;
476 /* In order to protect privacy, we add a perturbation to identifiers
477 * if one generator is seldom used. This makes hard for an attacker
478 * to infer how many packets were sent between two points in time.
480 u32 ip_idents_reserve(u32 hash, int segs)
482 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
483 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
484 u32 old = READ_ONCE(*p_tstamp);
485 u32 now = (u32)jiffies;
488 if (old != now && cmpxchg(p_tstamp, old, now) == old)
489 delta = prandom_u32_max(now - old);
491 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
493 old = (u32)atomic_read(p_id);
494 new = old + delta + segs;
495 } while (atomic_cmpxchg(p_id, old, new) != old);
499 EXPORT_SYMBOL(ip_idents_reserve);
501 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
503 static u32 ip_idents_hashrnd __read_mostly;
506 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
508 hash = jhash_3words((__force u32)iph->daddr,
509 (__force u32)iph->saddr,
510 iph->protocol ^ net_hash_mix(net),
512 id = ip_idents_reserve(hash, segs);
515 EXPORT_SYMBOL(__ip_select_ident);
517 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
518 const struct sock *sk,
519 const struct iphdr *iph,
521 u8 prot, u32 mark, int flow_flags)
524 const struct inet_sock *inet = inet_sk(sk);
526 oif = sk->sk_bound_dev_if;
528 tos = RT_CONN_FLAGS(sk);
529 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
531 flowi4_init_output(fl4, oif, mark, tos,
532 RT_SCOPE_UNIVERSE, prot,
534 iph->daddr, iph->saddr, 0, 0,
535 sock_net_uid(net, sk));
538 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
539 const struct sock *sk)
541 const struct net *net = dev_net(skb->dev);
542 const struct iphdr *iph = ip_hdr(skb);
543 int oif = skb->dev->ifindex;
544 u8 tos = RT_TOS(iph->tos);
545 u8 prot = iph->protocol;
546 u32 mark = skb->mark;
548 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
551 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
553 const struct inet_sock *inet = inet_sk(sk);
554 const struct ip_options_rcu *inet_opt;
555 __be32 daddr = inet->inet_daddr;
558 inet_opt = rcu_dereference(inet->inet_opt);
559 if (inet_opt && inet_opt->opt.srr)
560 daddr = inet_opt->opt.faddr;
561 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
562 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
563 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
564 inet_sk_flowi_flags(sk),
565 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
569 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
570 const struct sk_buff *skb)
573 build_skb_flow_key(fl4, skb, sk);
575 build_sk_flow_key(fl4, sk);
578 static DEFINE_SPINLOCK(fnhe_lock);
580 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
584 rt = rcu_dereference(fnhe->fnhe_rth_input);
586 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
587 dst_dev_put(&rt->dst);
588 dst_release(&rt->dst);
590 rt = rcu_dereference(fnhe->fnhe_rth_output);
592 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
593 dst_dev_put(&rt->dst);
594 dst_release(&rt->dst);
598 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
600 struct fib_nh_exception *fnhe, *oldest;
602 oldest = rcu_dereference(hash->chain);
603 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
604 fnhe = rcu_dereference(fnhe->fnhe_next)) {
605 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
608 fnhe_flush_routes(oldest);
612 static inline u32 fnhe_hashfun(__be32 daddr)
614 static u32 fnhe_hashrnd __read_mostly;
617 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
618 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
619 return hash_32(hval, FNHE_HASH_SHIFT);
622 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
624 rt->rt_pmtu = fnhe->fnhe_pmtu;
625 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
626 rt->dst.expires = fnhe->fnhe_expires;
629 rt->rt_flags |= RTCF_REDIRECTED;
630 rt->rt_gateway = fnhe->fnhe_gw;
631 rt->rt_uses_gateway = 1;
635 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
636 u32 pmtu, bool lock, unsigned long expires)
638 struct fnhe_hash_bucket *hash;
639 struct fib_nh_exception *fnhe;
645 genid = fnhe_genid(dev_net(nh->nh_dev));
646 hval = fnhe_hashfun(daddr);
648 spin_lock_bh(&fnhe_lock);
650 hash = rcu_dereference(nh->nh_exceptions);
652 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
655 rcu_assign_pointer(nh->nh_exceptions, hash);
661 for (fnhe = rcu_dereference(hash->chain); fnhe;
662 fnhe = rcu_dereference(fnhe->fnhe_next)) {
663 if (fnhe->fnhe_daddr == daddr)
669 if (fnhe->fnhe_genid != genid)
670 fnhe->fnhe_genid = genid;
674 fnhe->fnhe_pmtu = pmtu;
675 fnhe->fnhe_mtu_locked = lock;
677 fnhe->fnhe_expires = max(1UL, expires);
678 /* Update all cached dsts too */
679 rt = rcu_dereference(fnhe->fnhe_rth_input);
681 fill_route_from_fnhe(rt, fnhe);
682 rt = rcu_dereference(fnhe->fnhe_rth_output);
684 fill_route_from_fnhe(rt, fnhe);
686 if (depth > FNHE_RECLAIM_DEPTH)
687 fnhe = fnhe_oldest(hash);
689 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
693 fnhe->fnhe_next = hash->chain;
694 rcu_assign_pointer(hash->chain, fnhe);
696 fnhe->fnhe_genid = genid;
697 fnhe->fnhe_daddr = daddr;
699 fnhe->fnhe_pmtu = pmtu;
700 fnhe->fnhe_mtu_locked = lock;
701 fnhe->fnhe_expires = max(1UL, expires);
703 /* Exception created; mark the cached routes for the nexthop
704 * stale, so anyone caching it rechecks if this exception
707 rt = rcu_dereference(nh->nh_rth_input);
709 rt->dst.obsolete = DST_OBSOLETE_KILL;
711 for_each_possible_cpu(i) {
712 struct rtable __rcu **prt;
713 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
714 rt = rcu_dereference(*prt);
716 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 fnhe->fnhe_stamp = jiffies;
723 spin_unlock_bh(&fnhe_lock);
726 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
729 __be32 new_gw = icmp_hdr(skb)->un.gateway;
730 __be32 old_gw = ip_hdr(skb)->saddr;
731 struct net_device *dev = skb->dev;
732 struct in_device *in_dev;
733 struct fib_result res;
737 switch (icmp_hdr(skb)->code & 7) {
739 case ICMP_REDIR_NETTOS:
740 case ICMP_REDIR_HOST:
741 case ICMP_REDIR_HOSTTOS:
748 if (rt->rt_gateway != old_gw)
751 in_dev = __in_dev_get_rcu(dev);
756 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
757 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
758 ipv4_is_zeronet(new_gw))
759 goto reject_redirect;
761 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
762 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
763 goto reject_redirect;
764 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
765 goto reject_redirect;
767 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
768 goto reject_redirect;
771 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
773 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
775 if (!(n->nud_state & NUD_VALID)) {
776 neigh_event_send(n, NULL);
778 if (fib_lookup(net, fl4, &res, 0) == 0) {
779 struct fib_nh *nh = &FIB_RES_NH(res);
781 update_or_create_fnhe(nh, fl4->daddr, new_gw,
783 jiffies + ip_rt_gc_timeout);
786 rt->dst.obsolete = DST_OBSOLETE_KILL;
787 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
794 #ifdef CONFIG_IP_ROUTE_VERBOSE
795 if (IN_DEV_LOG_MARTIANS(in_dev)) {
796 const struct iphdr *iph = (const struct iphdr *) skb->data;
797 __be32 daddr = iph->daddr;
798 __be32 saddr = iph->saddr;
800 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
801 " Advised path = %pI4 -> %pI4\n",
802 &old_gw, dev->name, &new_gw,
809 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
813 const struct iphdr *iph = (const struct iphdr *) skb->data;
814 struct net *net = dev_net(skb->dev);
815 int oif = skb->dev->ifindex;
816 u8 tos = RT_TOS(iph->tos);
817 u8 prot = iph->protocol;
818 u32 mark = skb->mark;
820 rt = (struct rtable *) dst;
822 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
823 __ip_do_redirect(rt, skb, &fl4, true);
826 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
828 struct rtable *rt = (struct rtable *)dst;
829 struct dst_entry *ret = dst;
832 if (dst->obsolete > 0) {
835 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
846 * 1. The first ip_rt_redirect_number redirects are sent
847 * with exponential backoff, then we stop sending them at all,
848 * assuming that the host ignores our redirects.
849 * 2. If we did not see packets requiring redirects
850 * during ip_rt_redirect_silence, we assume that the host
851 * forgot redirected route and start to send redirects again.
853 * This algorithm is much cheaper and more intelligent than dumb load limiting
856 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
857 * and "frag. need" (breaks PMTU discovery) in icmp.c.
860 void ip_rt_send_redirect(struct sk_buff *skb)
862 struct rtable *rt = skb_rtable(skb);
863 struct in_device *in_dev;
864 struct inet_peer *peer;
870 in_dev = __in_dev_get_rcu(rt->dst.dev);
871 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
875 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
876 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
879 net = dev_net(rt->dst.dev);
880 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
882 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
883 rt_nexthop(rt, ip_hdr(skb)->daddr));
887 /* No redirected packets during ip_rt_redirect_silence;
888 * reset the algorithm.
890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
891 peer->rate_tokens = 0;
892 peer->n_redirects = 0;
895 /* Too many ignored redirects; do not send anything
896 * set dst.rate_last to the last seen redirected packet.
898 if (peer->n_redirects >= ip_rt_redirect_number) {
899 peer->rate_last = jiffies;
903 /* Check for load limit; set rate_last to the latest sent
906 if (peer->rate_tokens == 0 ||
909 (ip_rt_redirect_load << peer->rate_tokens)))) {
910 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
912 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
913 peer->rate_last = jiffies;
916 #ifdef CONFIG_IP_ROUTE_VERBOSE
918 peer->rate_tokens == ip_rt_redirect_number)
919 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
920 &ip_hdr(skb)->saddr, inet_iif(skb),
921 &ip_hdr(skb)->daddr, &gw);
928 static int ip_error(struct sk_buff *skb)
930 struct rtable *rt = skb_rtable(skb);
931 struct net_device *dev = skb->dev;
932 struct in_device *in_dev;
933 struct inet_peer *peer;
939 if (netif_is_l3_master(skb->dev)) {
940 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
945 in_dev = __in_dev_get_rcu(dev);
947 /* IP on this device is disabled. */
951 net = dev_net(rt->dst.dev);
952 if (!IN_DEV_FORWARD(in_dev)) {
953 switch (rt->dst.error) {
955 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
959 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
965 switch (rt->dst.error) {
970 code = ICMP_HOST_UNREACH;
973 code = ICMP_NET_UNREACH;
974 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
977 code = ICMP_PKT_FILTERED;
981 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
982 l3mdev_master_ifindex(skb->dev), 1);
987 peer->rate_tokens += now - peer->rate_last;
988 if (peer->rate_tokens > ip_rt_error_burst)
989 peer->rate_tokens = ip_rt_error_burst;
990 peer->rate_last = now;
991 if (peer->rate_tokens >= ip_rt_error_cost)
992 peer->rate_tokens -= ip_rt_error_cost;
998 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1000 out: kfree_skb(skb);
1004 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1006 struct dst_entry *dst = &rt->dst;
1007 u32 old_mtu = ipv4_mtu(dst);
1008 struct fib_result res;
1011 if (ip_mtu_locked(dst))
1017 if (mtu < ip_rt_min_pmtu) {
1019 mtu = min(old_mtu, ip_rt_min_pmtu);
1022 if (rt->rt_pmtu == mtu && !lock &&
1023 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1027 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1028 struct fib_nh *nh = &FIB_RES_NH(res);
1030 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1031 jiffies + ip_rt_mtu_expires);
1036 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1037 struct sk_buff *skb, u32 mtu)
1039 struct rtable *rt = (struct rtable *) dst;
1042 ip_rt_build_flow_key(&fl4, sk, skb);
1043 __ip_rt_update_pmtu(rt, &fl4, mtu);
1046 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1047 int oif, u8 protocol)
1049 const struct iphdr *iph = (const struct iphdr *) skb->data;
1052 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1054 __build_flow_key(net, &fl4, NULL, iph, oif,
1055 RT_TOS(iph->tos), protocol, mark, 0);
1056 rt = __ip_route_output_key(net, &fl4);
1058 __ip_rt_update_pmtu(rt, &fl4, mtu);
1062 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1064 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1066 const struct iphdr *iph = (const struct iphdr *) skb->data;
1070 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1072 if (!fl4.flowi4_mark)
1073 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1075 rt = __ip_route_output_key(sock_net(sk), &fl4);
1077 __ip_rt_update_pmtu(rt, &fl4, mtu);
1082 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1084 const struct iphdr *iph = (const struct iphdr *) skb->data;
1087 struct dst_entry *odst = NULL;
1089 struct net *net = sock_net(sk);
1093 if (!ip_sk_accept_pmtu(sk))
1096 odst = sk_dst_get(sk);
1098 if (sock_owned_by_user(sk) || !odst) {
1099 __ipv4_sk_update_pmtu(skb, sk, mtu);
1103 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1105 rt = (struct rtable *)odst;
1106 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1107 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1114 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1116 if (!dst_check(&rt->dst, 0)) {
1118 dst_release(&rt->dst);
1120 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1128 sk_dst_set(sk, &rt->dst);
1134 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1136 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1137 int oif, u8 protocol)
1139 const struct iphdr *iph = (const struct iphdr *) skb->data;
1143 __build_flow_key(net, &fl4, NULL, iph, oif,
1144 RT_TOS(iph->tos), protocol, 0, 0);
1145 rt = __ip_route_output_key(net, &fl4);
1147 __ip_do_redirect(rt, skb, &fl4, false);
1151 EXPORT_SYMBOL_GPL(ipv4_redirect);
1153 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1155 const struct iphdr *iph = (const struct iphdr *) skb->data;
1158 struct net *net = sock_net(sk);
1160 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1161 rt = __ip_route_output_key(net, &fl4);
1163 __ip_do_redirect(rt, skb, &fl4, false);
1167 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1169 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1171 struct rtable *rt = (struct rtable *) dst;
1173 /* All IPV4 dsts are created with ->obsolete set to the value
1174 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1175 * into this function always.
1177 * When a PMTU/redirect information update invalidates a route,
1178 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1179 * DST_OBSOLETE_DEAD by dst_free().
1181 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1186 static void ipv4_link_failure(struct sk_buff *skb)
1190 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1192 rt = skb_rtable(skb);
1194 dst_set_expires(&rt->dst, 0);
1197 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1199 pr_debug("%s: %pI4 -> %pI4, %s\n",
1200 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1201 skb->dev ? skb->dev->name : "?");
1208 We do not cache source address of outgoing interface,
1209 because it is used only by IP RR, TS and SRR options,
1210 so that it out of fast path.
1212 BTW remember: "addr" is allowed to be not aligned
1216 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1220 if (rt_is_output_route(rt))
1221 src = ip_hdr(skb)->saddr;
1223 struct fib_result res;
1224 struct iphdr *iph = ip_hdr(skb);
1225 struct flowi4 fl4 = {
1226 .daddr = iph->daddr,
1227 .saddr = iph->saddr,
1228 .flowi4_tos = RT_TOS(iph->tos),
1229 .flowi4_oif = rt->dst.dev->ifindex,
1230 .flowi4_iif = skb->dev->ifindex,
1231 .flowi4_mark = skb->mark,
1235 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1236 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1238 src = inet_select_addr(rt->dst.dev,
1239 rt_nexthop(rt, iph->daddr),
1243 memcpy(addr, &src, 4);
1246 #ifdef CONFIG_IP_ROUTE_CLASSID
1247 static void set_class_tag(struct rtable *rt, u32 tag)
1249 if (!(rt->dst.tclassid & 0xFFFF))
1250 rt->dst.tclassid |= tag & 0xFFFF;
1251 if (!(rt->dst.tclassid & 0xFFFF0000))
1252 rt->dst.tclassid |= tag & 0xFFFF0000;
1256 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1258 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1259 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1262 return min(advmss, IPV4_MAX_PMTU - header_size);
1265 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1267 const struct rtable *rt = (const struct rtable *) dst;
1268 unsigned int mtu = rt->rt_pmtu;
1270 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1271 mtu = dst_metric_raw(dst, RTAX_MTU);
1276 mtu = READ_ONCE(dst->dev->mtu);
1278 if (unlikely(ip_mtu_locked(dst))) {
1279 if (rt->rt_uses_gateway && mtu > 576)
1283 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1285 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1288 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1290 struct fnhe_hash_bucket *hash;
1291 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1292 u32 hval = fnhe_hashfun(daddr);
1294 spin_lock_bh(&fnhe_lock);
1296 hash = rcu_dereference_protected(nh->nh_exceptions,
1297 lockdep_is_held(&fnhe_lock));
1300 fnhe_p = &hash->chain;
1301 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1303 if (fnhe->fnhe_daddr == daddr) {
1304 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1305 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1306 /* set fnhe_daddr to 0 to ensure it won't bind with
1307 * new dsts in rt_bind_exception().
1309 fnhe->fnhe_daddr = 0;
1310 fnhe_flush_routes(fnhe);
1311 kfree_rcu(fnhe, rcu);
1314 fnhe_p = &fnhe->fnhe_next;
1315 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1316 lockdep_is_held(&fnhe_lock));
1319 spin_unlock_bh(&fnhe_lock);
1322 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1324 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1325 struct fib_nh_exception *fnhe;
1331 hval = fnhe_hashfun(daddr);
1333 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1334 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1335 if (fnhe->fnhe_daddr == daddr) {
1336 if (fnhe->fnhe_expires &&
1337 time_after(jiffies, fnhe->fnhe_expires)) {
1338 ip_del_fnhe(nh, daddr);
1348 * 1. mtu on route is locked - use it
1349 * 2. mtu from nexthop exception
1350 * 3. mtu from egress device
1353 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1355 struct fib_info *fi = res->fi;
1356 struct fib_nh *nh = &fi->fib_nh[res->nh_sel];
1357 struct net_device *dev = nh->nh_dev;
1360 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1361 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1365 struct fib_nh_exception *fnhe;
1367 fnhe = find_exception(nh, daddr);
1368 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1369 mtu = fnhe->fnhe_pmtu;
1373 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1375 return mtu - lwtunnel_headroom(nh->nh_lwtstate, mtu);
1378 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1379 __be32 daddr, const bool do_cache)
1383 spin_lock_bh(&fnhe_lock);
1385 if (daddr == fnhe->fnhe_daddr) {
1386 struct rtable __rcu **porig;
1387 struct rtable *orig;
1388 int genid = fnhe_genid(dev_net(rt->dst.dev));
1390 if (rt_is_input_route(rt))
1391 porig = &fnhe->fnhe_rth_input;
1393 porig = &fnhe->fnhe_rth_output;
1394 orig = rcu_dereference(*porig);
1396 if (fnhe->fnhe_genid != genid) {
1397 fnhe->fnhe_genid = genid;
1399 fnhe->fnhe_pmtu = 0;
1400 fnhe->fnhe_expires = 0;
1401 fnhe->fnhe_mtu_locked = false;
1402 fnhe_flush_routes(fnhe);
1405 fill_route_from_fnhe(rt, fnhe);
1406 if (!rt->rt_gateway)
1407 rt->rt_gateway = daddr;
1411 rcu_assign_pointer(*porig, rt);
1413 dst_dev_put(&orig->dst);
1414 dst_release(&orig->dst);
1419 fnhe->fnhe_stamp = jiffies;
1421 spin_unlock_bh(&fnhe_lock);
1426 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1428 struct rtable *orig, *prev, **p;
1431 if (rt_is_input_route(rt)) {
1432 p = (struct rtable **)&nh->nh_rth_input;
1434 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1438 /* hold dst before doing cmpxchg() to avoid race condition
1442 prev = cmpxchg(p, orig, rt);
1445 dst_dev_put(&orig->dst);
1446 dst_release(&orig->dst);
1449 dst_release(&rt->dst);
1456 struct uncached_list {
1458 struct list_head head;
1461 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1463 void rt_add_uncached_list(struct rtable *rt)
1465 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1467 rt->rt_uncached_list = ul;
1469 spin_lock_bh(&ul->lock);
1470 list_add_tail(&rt->rt_uncached, &ul->head);
1471 spin_unlock_bh(&ul->lock);
1474 void rt_del_uncached_list(struct rtable *rt)
1476 if (!list_empty(&rt->rt_uncached)) {
1477 struct uncached_list *ul = rt->rt_uncached_list;
1479 spin_lock_bh(&ul->lock);
1480 list_del(&rt->rt_uncached);
1481 spin_unlock_bh(&ul->lock);
1485 static void ipv4_dst_destroy(struct dst_entry *dst)
1487 struct rtable *rt = (struct rtable *)dst;
1489 ip_dst_metrics_put(dst);
1490 rt_del_uncached_list(rt);
1493 void rt_flush_dev(struct net_device *dev)
1495 struct net *net = dev_net(dev);
1499 for_each_possible_cpu(cpu) {
1500 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1502 spin_lock_bh(&ul->lock);
1503 list_for_each_entry(rt, &ul->head, rt_uncached) {
1504 if (rt->dst.dev != dev)
1506 rt->dst.dev = net->loopback_dev;
1507 dev_hold(rt->dst.dev);
1510 spin_unlock_bh(&ul->lock);
1514 static bool rt_cache_valid(const struct rtable *rt)
1517 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1521 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1522 const struct fib_result *res,
1523 struct fib_nh_exception *fnhe,
1524 struct fib_info *fi, u16 type, u32 itag,
1525 const bool do_cache)
1527 bool cached = false;
1530 struct fib_nh *nh = &FIB_RES_NH(*res);
1532 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1533 rt->rt_gateway = nh->nh_gw;
1534 rt->rt_uses_gateway = 1;
1536 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1538 #ifdef CONFIG_IP_ROUTE_CLASSID
1539 rt->dst.tclassid = nh->nh_tclassid;
1541 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1543 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1545 cached = rt_cache_route(nh, rt);
1546 if (unlikely(!cached)) {
1547 /* Routes we intend to cache in nexthop exception or
1548 * FIB nexthop have the DST_NOCACHE bit clear.
1549 * However, if we are unsuccessful at storing this
1550 * route into the cache we really need to set it.
1552 if (!rt->rt_gateway)
1553 rt->rt_gateway = daddr;
1554 rt_add_uncached_list(rt);
1557 rt_add_uncached_list(rt);
1559 #ifdef CONFIG_IP_ROUTE_CLASSID
1560 #ifdef CONFIG_IP_MULTIPLE_TABLES
1561 set_class_tag(rt, res->tclassid);
1563 set_class_tag(rt, itag);
1567 struct rtable *rt_dst_alloc(struct net_device *dev,
1568 unsigned int flags, u16 type,
1569 bool nopolicy, bool noxfrm, bool will_cache)
1573 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1574 (will_cache ? 0 : DST_HOST) |
1575 (nopolicy ? DST_NOPOLICY : 0) |
1576 (noxfrm ? DST_NOXFRM : 0));
1579 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1580 rt->rt_flags = flags;
1582 rt->rt_is_input = 0;
1585 rt->rt_mtu_locked = 0;
1587 rt->rt_uses_gateway = 0;
1588 INIT_LIST_HEAD(&rt->rt_uncached);
1590 rt->dst.output = ip_output;
1591 if (flags & RTCF_LOCAL)
1592 rt->dst.input = ip_local_deliver;
1597 EXPORT_SYMBOL(rt_dst_alloc);
1599 /* called in rcu_read_lock() section */
1600 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1601 u8 tos, struct net_device *dev,
1602 struct in_device *in_dev, u32 *itag)
1606 /* Primary sanity checks. */
1610 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1611 skb->protocol != htons(ETH_P_IP))
1614 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1617 if (ipv4_is_zeronet(saddr)) {
1618 if (!ipv4_is_local_multicast(daddr) &&
1619 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1622 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1630 /* called in rcu_read_lock() section */
1631 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1632 u8 tos, struct net_device *dev, int our)
1634 struct in_device *in_dev = __in_dev_get_rcu(dev);
1635 unsigned int flags = RTCF_MULTICAST;
1640 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1645 flags |= RTCF_LOCAL;
1647 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1648 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1652 #ifdef CONFIG_IP_ROUTE_CLASSID
1653 rth->dst.tclassid = itag;
1655 rth->dst.output = ip_rt_bug;
1656 rth->rt_is_input= 1;
1658 #ifdef CONFIG_IP_MROUTE
1659 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1660 rth->dst.input = ip_mr_input;
1662 RT_CACHE_STAT_INC(in_slow_mc);
1664 skb_dst_set(skb, &rth->dst);
1669 static void ip_handle_martian_source(struct net_device *dev,
1670 struct in_device *in_dev,
1671 struct sk_buff *skb,
1675 RT_CACHE_STAT_INC(in_martian_src);
1676 #ifdef CONFIG_IP_ROUTE_VERBOSE
1677 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1679 * RFC1812 recommendation, if source is martian,
1680 * the only hint is MAC header.
1682 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1683 &daddr, &saddr, dev->name);
1684 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1685 print_hex_dump(KERN_WARNING, "ll header: ",
1686 DUMP_PREFIX_OFFSET, 16, 1,
1687 skb_mac_header(skb),
1688 dev->hard_header_len, false);
1694 /* called in rcu_read_lock() section */
1695 static int __mkroute_input(struct sk_buff *skb,
1696 const struct fib_result *res,
1697 struct in_device *in_dev,
1698 __be32 daddr, __be32 saddr, u32 tos)
1700 struct fib_nh_exception *fnhe;
1703 struct in_device *out_dev;
1707 /* get a working reference to the output device */
1708 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1710 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1714 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1715 in_dev->dev, in_dev, &itag);
1717 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1723 do_cache = res->fi && !itag;
1724 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1725 skb->protocol == htons(ETH_P_IP) &&
1726 (IN_DEV_SHARED_MEDIA(out_dev) ||
1727 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1728 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1730 if (skb->protocol != htons(ETH_P_IP)) {
1731 /* Not IP (i.e. ARP). Do not create route, if it is
1732 * invalid for proxy arp. DNAT routes are always valid.
1734 * Proxy arp feature have been extended to allow, ARP
1735 * replies back to the same interface, to support
1736 * Private VLAN switch technologies. See arp.c.
1738 if (out_dev == in_dev &&
1739 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1745 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1748 rth = rcu_dereference(fnhe->fnhe_rth_input);
1750 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1751 if (rt_cache_valid(rth)) {
1752 skb_dst_set_noref(skb, &rth->dst);
1757 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1758 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1759 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1765 rth->rt_is_input = 1;
1766 RT_CACHE_STAT_INC(in_slow_tot);
1768 rth->dst.input = ip_forward;
1770 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1772 lwtunnel_set_redirect(&rth->dst);
1773 skb_dst_set(skb, &rth->dst);
1780 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1781 /* To make ICMP packets follow the right flow, the multipath hash is
1782 * calculated from the inner IP addresses.
1784 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1785 struct flow_keys *hash_keys)
1787 const struct iphdr *outer_iph = ip_hdr(skb);
1788 const struct iphdr *key_iph = outer_iph;
1789 const struct iphdr *inner_iph;
1790 const struct icmphdr *icmph;
1791 struct iphdr _inner_iph;
1792 struct icmphdr _icmph;
1794 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1797 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1800 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1805 if (icmph->type != ICMP_DEST_UNREACH &&
1806 icmph->type != ICMP_REDIRECT &&
1807 icmph->type != ICMP_TIME_EXCEEDED &&
1808 icmph->type != ICMP_PARAMETERPROB)
1811 inner_iph = skb_header_pointer(skb,
1812 outer_iph->ihl * 4 + sizeof(_icmph),
1813 sizeof(_inner_iph), &_inner_iph);
1817 key_iph = inner_iph;
1819 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1820 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1823 /* if skb is set it will be used and fl4 can be NULL */
1824 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1825 const struct sk_buff *skb, struct flow_keys *flkeys)
1827 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1828 struct flow_keys hash_keys;
1831 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1833 memset(&hash_keys, 0, sizeof(hash_keys));
1834 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1836 ip_multipath_l3_keys(skb, &hash_keys);
1838 hash_keys.addrs.v4addrs.src = fl4->saddr;
1839 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1843 /* skb is currently provided only when forwarding */
1845 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1846 struct flow_keys keys;
1848 /* short-circuit if we already have L4 hash present */
1850 return skb_get_hash_raw(skb) >> 1;
1852 memset(&hash_keys, 0, sizeof(hash_keys));
1855 skb_flow_dissect_flow_keys(skb, &keys, flag);
1859 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1860 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1861 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1862 hash_keys.ports.src = flkeys->ports.src;
1863 hash_keys.ports.dst = flkeys->ports.dst;
1864 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1866 memset(&hash_keys, 0, sizeof(hash_keys));
1867 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1868 hash_keys.addrs.v4addrs.src = fl4->saddr;
1869 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1870 hash_keys.ports.src = fl4->fl4_sport;
1871 hash_keys.ports.dst = fl4->fl4_dport;
1872 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1876 mhash = flow_hash_from_keys(&hash_keys);
1879 mhash = jhash_2words(mhash, multipath_hash, 0);
1883 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1885 static int ip_mkroute_input(struct sk_buff *skb,
1886 struct fib_result *res,
1887 struct in_device *in_dev,
1888 __be32 daddr, __be32 saddr, u32 tos,
1889 struct flow_keys *hkeys)
1891 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1892 if (res->fi && res->fi->fib_nhs > 1) {
1893 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1895 fib_select_multipath(res, h);
1899 /* create a routing cache entry */
1900 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1904 * NOTE. We drop all the packets that has local source
1905 * addresses, because every properly looped back packet
1906 * must have correct destination already attached by output routine.
1908 * Such approach solves two big problems:
1909 * 1. Not simplex devices are handled properly.
1910 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1911 * called with rcu_read_lock()
1914 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1915 u8 tos, struct net_device *dev,
1916 struct fib_result *res)
1918 struct in_device *in_dev = __in_dev_get_rcu(dev);
1919 struct flow_keys *flkeys = NULL, _flkeys;
1920 struct net *net = dev_net(dev);
1921 struct ip_tunnel_info *tun_info;
1923 unsigned int flags = 0;
1929 /* IP on this device is disabled. */
1934 /* Check for the most weird martians, which can be not detected
1938 tun_info = skb_tunnel_info(skb);
1939 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1940 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1942 fl4.flowi4_tun_key.tun_id = 0;
1945 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1946 goto martian_source;
1950 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1953 /* Accept zero addresses only to limited broadcast;
1954 * I even do not know to fix it or not. Waiting for complains :-)
1956 if (ipv4_is_zeronet(saddr))
1957 goto martian_source;
1959 if (ipv4_is_zeronet(daddr))
1960 goto martian_destination;
1962 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1963 * and call it once if daddr or/and saddr are loopback addresses
1965 if (ipv4_is_loopback(daddr)) {
1966 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1967 goto martian_destination;
1968 } else if (ipv4_is_loopback(saddr)) {
1969 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1970 goto martian_source;
1974 * Now we are ready to route packet.
1977 fl4.flowi4_iif = dev->ifindex;
1978 fl4.flowi4_mark = skb->mark;
1979 fl4.flowi4_tos = tos;
1980 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1981 fl4.flowi4_flags = 0;
1984 fl4.flowi4_uid = sock_net_uid(net, NULL);
1986 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
1989 fl4.flowi4_proto = 0;
1994 err = fib_lookup(net, &fl4, res, 0);
1996 if (!IN_DEV_FORWARD(in_dev))
1997 err = -EHOSTUNREACH;
2001 if (res->type == RTN_BROADCAST) {
2002 if (IN_DEV_BFORWARD(in_dev))
2007 if (res->type == RTN_LOCAL) {
2008 err = fib_validate_source(skb, saddr, daddr, tos,
2009 0, dev, in_dev, &itag);
2011 goto martian_source;
2015 if (!IN_DEV_FORWARD(in_dev)) {
2016 err = -EHOSTUNREACH;
2019 if (res->type != RTN_UNICAST)
2020 goto martian_destination;
2023 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2027 if (skb->protocol != htons(ETH_P_IP))
2030 if (!ipv4_is_zeronet(saddr)) {
2031 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2034 goto martian_source;
2036 flags |= RTCF_BROADCAST;
2037 res->type = RTN_BROADCAST;
2038 RT_CACHE_STAT_INC(in_brd);
2044 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
2045 if (rt_cache_valid(rth)) {
2046 skb_dst_set_noref(skb, &rth->dst);
2054 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2055 flags | RTCF_LOCAL, res->type,
2056 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2060 rth->dst.output= ip_rt_bug;
2061 #ifdef CONFIG_IP_ROUTE_CLASSID
2062 rth->dst.tclassid = itag;
2064 rth->rt_is_input = 1;
2066 RT_CACHE_STAT_INC(in_slow_tot);
2067 if (res->type == RTN_UNREACHABLE) {
2068 rth->dst.input= ip_error;
2069 rth->dst.error= -err;
2070 rth->rt_flags &= ~RTCF_LOCAL;
2074 struct fib_nh *nh = &FIB_RES_NH(*res);
2076 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2077 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2078 WARN_ON(rth->dst.input == lwtunnel_input);
2079 rth->dst.lwtstate->orig_input = rth->dst.input;
2080 rth->dst.input = lwtunnel_input;
2083 if (unlikely(!rt_cache_route(nh, rth)))
2084 rt_add_uncached_list(rth);
2086 skb_dst_set(skb, &rth->dst);
2091 RT_CACHE_STAT_INC(in_no_route);
2092 res->type = RTN_UNREACHABLE;
2098 * Do not cache martian addresses: they should be logged (RFC1812)
2100 martian_destination:
2101 RT_CACHE_STAT_INC(in_martian_dst);
2102 #ifdef CONFIG_IP_ROUTE_VERBOSE
2103 if (IN_DEV_LOG_MARTIANS(in_dev))
2104 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2105 &daddr, &saddr, dev->name);
2117 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2121 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2122 u8 tos, struct net_device *dev)
2124 struct fib_result res;
2127 tos &= IPTOS_RT_MASK;
2129 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2134 EXPORT_SYMBOL(ip_route_input_noref);
2136 /* called with rcu_read_lock held */
2137 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2138 u8 tos, struct net_device *dev, struct fib_result *res)
2140 /* Multicast recognition logic is moved from route cache to here.
2141 The problem was that too many Ethernet cards have broken/missing
2142 hardware multicast filters :-( As result the host on multicasting
2143 network acquires a lot of useless route cache entries, sort of
2144 SDR messages from all the world. Now we try to get rid of them.
2145 Really, provided software IP multicast filter is organized
2146 reasonably (at least, hashed), it does not result in a slowdown
2147 comparing with route cache reject entries.
2148 Note, that multicast routers are not affected, because
2149 route cache entry is created eventually.
2151 if (ipv4_is_multicast(daddr)) {
2152 struct in_device *in_dev = __in_dev_get_rcu(dev);
2158 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2159 ip_hdr(skb)->protocol);
2161 /* check l3 master if no match yet */
2162 if (!our && netif_is_l3_slave(dev)) {
2163 struct in_device *l3_in_dev;
2165 l3_in_dev = __in_dev_get_rcu(skb->dev);
2167 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2168 ip_hdr(skb)->protocol);
2172 #ifdef CONFIG_IP_MROUTE
2174 (!ipv4_is_local_multicast(daddr) &&
2175 IN_DEV_MFORWARD(in_dev))
2178 err = ip_route_input_mc(skb, daddr, saddr,
2184 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2187 /* called with rcu_read_lock() */
2188 static struct rtable *__mkroute_output(const struct fib_result *res,
2189 const struct flowi4 *fl4, int orig_oif,
2190 struct net_device *dev_out,
2193 struct fib_info *fi = res->fi;
2194 struct fib_nh_exception *fnhe;
2195 struct in_device *in_dev;
2196 u16 type = res->type;
2200 in_dev = __in_dev_get_rcu(dev_out);
2202 return ERR_PTR(-EINVAL);
2204 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2205 if (ipv4_is_loopback(fl4->saddr) &&
2206 !(dev_out->flags & IFF_LOOPBACK) &&
2207 !netif_is_l3_master(dev_out))
2208 return ERR_PTR(-EINVAL);
2210 if (ipv4_is_lbcast(fl4->daddr))
2211 type = RTN_BROADCAST;
2212 else if (ipv4_is_multicast(fl4->daddr))
2213 type = RTN_MULTICAST;
2214 else if (ipv4_is_zeronet(fl4->daddr))
2215 return ERR_PTR(-EINVAL);
2217 if (dev_out->flags & IFF_LOOPBACK)
2218 flags |= RTCF_LOCAL;
2221 if (type == RTN_BROADCAST) {
2222 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2224 } else if (type == RTN_MULTICAST) {
2225 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2226 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2228 flags &= ~RTCF_LOCAL;
2231 /* If multicast route do not exist use
2232 * default one, but do not gateway in this case.
2235 if (fi && res->prefixlen < 4)
2237 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2238 (orig_oif != dev_out->ifindex)) {
2239 /* For local routes that require a particular output interface
2240 * we do not want to cache the result. Caching the result
2241 * causes incorrect behaviour when there are multiple source
2242 * addresses on the interface, the end result being that if the
2243 * intended recipient is waiting on that interface for the
2244 * packet he won't receive it because it will be delivered on
2245 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2246 * be set to the loopback interface as well.
2252 do_cache &= fi != NULL;
2254 struct rtable __rcu **prth;
2255 struct fib_nh *nh = &FIB_RES_NH(*res);
2257 fnhe = find_exception(nh, fl4->daddr);
2261 prth = &fnhe->fnhe_rth_output;
2263 if (unlikely(fl4->flowi4_flags &
2264 FLOWI_FLAG_KNOWN_NH &&
2266 nh->nh_scope == RT_SCOPE_LINK))) {
2270 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2272 rth = rcu_dereference(*prth);
2273 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2278 rth = rt_dst_alloc(dev_out, flags, type,
2279 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2280 IN_DEV_CONF_GET(in_dev, NOXFRM),
2283 return ERR_PTR(-ENOBUFS);
2285 rth->rt_iif = orig_oif;
2287 RT_CACHE_STAT_INC(out_slow_tot);
2289 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2290 if (flags & RTCF_LOCAL &&
2291 !(dev_out->flags & IFF_LOOPBACK)) {
2292 rth->dst.output = ip_mc_output;
2293 RT_CACHE_STAT_INC(out_slow_mc);
2295 #ifdef CONFIG_IP_MROUTE
2296 if (type == RTN_MULTICAST) {
2297 if (IN_DEV_MFORWARD(in_dev) &&
2298 !ipv4_is_local_multicast(fl4->daddr)) {
2299 rth->dst.input = ip_mr_input;
2300 rth->dst.output = ip_mc_output;
2306 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2307 lwtunnel_set_redirect(&rth->dst);
2313 * Major route resolver routine.
2316 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2317 const struct sk_buff *skb)
2319 __u8 tos = RT_FL_TOS(fl4);
2320 struct fib_result res = {
2328 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2329 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2330 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2331 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2334 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2339 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2341 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2342 struct fib_result *res,
2343 const struct sk_buff *skb)
2345 struct net_device *dev_out = NULL;
2346 int orig_oif = fl4->flowi4_oif;
2347 unsigned int flags = 0;
2349 int err = -ENETUNREACH;
2352 rth = ERR_PTR(-EINVAL);
2353 if (ipv4_is_multicast(fl4->saddr) ||
2354 ipv4_is_lbcast(fl4->saddr) ||
2355 ipv4_is_zeronet(fl4->saddr))
2358 /* I removed check for oif == dev_out->oif here.
2359 It was wrong for two reasons:
2360 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2361 is assigned to multiple interfaces.
2362 2. Moreover, we are allowed to send packets with saddr
2363 of another iface. --ANK
2366 if (fl4->flowi4_oif == 0 &&
2367 (ipv4_is_multicast(fl4->daddr) ||
2368 ipv4_is_lbcast(fl4->daddr))) {
2369 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2370 dev_out = __ip_dev_find(net, fl4->saddr, false);
2374 /* Special hack: user can direct multicasts
2375 and limited broadcast via necessary interface
2376 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2377 This hack is not just for fun, it allows
2378 vic,vat and friends to work.
2379 They bind socket to loopback, set ttl to zero
2380 and expect that it will work.
2381 From the viewpoint of routing cache they are broken,
2382 because we are not allowed to build multicast path
2383 with loopback source addr (look, routing cache
2384 cannot know, that ttl is zero, so that packet
2385 will not leave this host and route is valid).
2386 Luckily, this hack is good workaround.
2389 fl4->flowi4_oif = dev_out->ifindex;
2393 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2394 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2395 if (!__ip_dev_find(net, fl4->saddr, false))
2401 if (fl4->flowi4_oif) {
2402 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2403 rth = ERR_PTR(-ENODEV);
2407 /* RACE: Check return value of inet_select_addr instead. */
2408 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2409 rth = ERR_PTR(-ENETUNREACH);
2412 if (ipv4_is_local_multicast(fl4->daddr) ||
2413 ipv4_is_lbcast(fl4->daddr) ||
2414 fl4->flowi4_proto == IPPROTO_IGMP) {
2416 fl4->saddr = inet_select_addr(dev_out, 0,
2421 if (ipv4_is_multicast(fl4->daddr))
2422 fl4->saddr = inet_select_addr(dev_out, 0,
2424 else if (!fl4->daddr)
2425 fl4->saddr = inet_select_addr(dev_out, 0,
2431 fl4->daddr = fl4->saddr;
2433 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2434 dev_out = net->loopback_dev;
2435 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2436 res->type = RTN_LOCAL;
2437 flags |= RTCF_LOCAL;
2441 err = fib_lookup(net, fl4, res, 0);
2445 if (fl4->flowi4_oif &&
2446 (ipv4_is_multicast(fl4->daddr) ||
2447 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2448 /* Apparently, routing tables are wrong. Assume,
2449 that the destination is on link.
2452 Because we are allowed to send to iface
2453 even if it has NO routes and NO assigned
2454 addresses. When oif is specified, routing
2455 tables are looked up with only one purpose:
2456 to catch if destination is gatewayed, rather than
2457 direct. Moreover, if MSG_DONTROUTE is set,
2458 we send packet, ignoring both routing tables
2459 and ifaddr state. --ANK
2462 We could make it even if oif is unknown,
2463 likely IPv6, but we do not.
2466 if (fl4->saddr == 0)
2467 fl4->saddr = inet_select_addr(dev_out, 0,
2469 res->type = RTN_UNICAST;
2476 if (res->type == RTN_LOCAL) {
2478 if (res->fi->fib_prefsrc)
2479 fl4->saddr = res->fi->fib_prefsrc;
2481 fl4->saddr = fl4->daddr;
2484 /* L3 master device is the loopback for that domain */
2485 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2488 /* make sure orig_oif points to fib result device even
2489 * though packet rx/tx happens over loopback or l3mdev
2491 orig_oif = FIB_RES_OIF(*res);
2493 fl4->flowi4_oif = dev_out->ifindex;
2494 flags |= RTCF_LOCAL;
2498 fib_select_path(net, res, fl4, skb);
2500 dev_out = FIB_RES_DEV(*res);
2501 fl4->flowi4_oif = dev_out->ifindex;
2505 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2511 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2516 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2518 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2520 return mtu ? : dst->dev->mtu;
2523 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2524 struct sk_buff *skb, u32 mtu)
2528 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2529 struct sk_buff *skb)
2533 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2539 static struct dst_ops ipv4_dst_blackhole_ops = {
2541 .check = ipv4_blackhole_dst_check,
2542 .mtu = ipv4_blackhole_mtu,
2543 .default_advmss = ipv4_default_advmss,
2544 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2545 .redirect = ipv4_rt_blackhole_redirect,
2546 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2547 .neigh_lookup = ipv4_neigh_lookup,
2550 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2552 struct rtable *ort = (struct rtable *) dst_orig;
2555 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2557 struct dst_entry *new = &rt->dst;
2560 new->input = dst_discard;
2561 new->output = dst_discard_out;
2563 new->dev = net->loopback_dev;
2567 rt->rt_is_input = ort->rt_is_input;
2568 rt->rt_iif = ort->rt_iif;
2569 rt->rt_pmtu = ort->rt_pmtu;
2570 rt->rt_mtu_locked = ort->rt_mtu_locked;
2572 rt->rt_genid = rt_genid_ipv4(net);
2573 rt->rt_flags = ort->rt_flags;
2574 rt->rt_type = ort->rt_type;
2575 rt->rt_gateway = ort->rt_gateway;
2576 rt->rt_uses_gateway = ort->rt_uses_gateway;
2578 INIT_LIST_HEAD(&rt->rt_uncached);
2581 dst_release(dst_orig);
2583 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2586 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2587 const struct sock *sk)
2589 struct rtable *rt = __ip_route_output_key(net, flp4);
2594 if (flp4->flowi4_proto)
2595 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2596 flowi4_to_flowi(flp4),
2601 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2603 /* called with rcu_read_lock held */
2604 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2605 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2606 struct sk_buff *skb, u32 portid, u32 seq)
2609 struct nlmsghdr *nlh;
2610 unsigned long expires = 0;
2612 u32 metrics[RTAX_MAX];
2614 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2618 r = nlmsg_data(nlh);
2619 r->rtm_family = AF_INET;
2620 r->rtm_dst_len = 32;
2622 r->rtm_tos = fl4->flowi4_tos;
2623 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2624 if (nla_put_u32(skb, RTA_TABLE, table_id))
2625 goto nla_put_failure;
2626 r->rtm_type = rt->rt_type;
2627 r->rtm_scope = RT_SCOPE_UNIVERSE;
2628 r->rtm_protocol = RTPROT_UNSPEC;
2629 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2630 if (rt->rt_flags & RTCF_NOTIFY)
2631 r->rtm_flags |= RTM_F_NOTIFY;
2632 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2633 r->rtm_flags |= RTCF_DOREDIRECT;
2635 if (nla_put_in_addr(skb, RTA_DST, dst))
2636 goto nla_put_failure;
2638 r->rtm_src_len = 32;
2639 if (nla_put_in_addr(skb, RTA_SRC, src))
2640 goto nla_put_failure;
2643 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2644 goto nla_put_failure;
2645 #ifdef CONFIG_IP_ROUTE_CLASSID
2646 if (rt->dst.tclassid &&
2647 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2648 goto nla_put_failure;
2650 if (!rt_is_input_route(rt) &&
2651 fl4->saddr != src) {
2652 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2653 goto nla_put_failure;
2655 if (rt->rt_uses_gateway &&
2656 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2657 goto nla_put_failure;
2659 expires = rt->dst.expires;
2661 unsigned long now = jiffies;
2663 if (time_before(now, expires))
2669 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2670 if (rt->rt_pmtu && expires)
2671 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2672 if (rt->rt_mtu_locked && expires)
2673 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2674 if (rtnetlink_put_metrics(skb, metrics) < 0)
2675 goto nla_put_failure;
2677 if (fl4->flowi4_mark &&
2678 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2679 goto nla_put_failure;
2681 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2682 nla_put_u32(skb, RTA_UID,
2683 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2684 goto nla_put_failure;
2686 error = rt->dst.error;
2688 if (rt_is_input_route(rt)) {
2689 #ifdef CONFIG_IP_MROUTE
2690 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2691 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2692 int err = ipmr_get_route(net, skb,
2693 fl4->saddr, fl4->daddr,
2699 goto nla_put_failure;
2703 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2704 goto nla_put_failure;
2707 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2708 goto nla_put_failure;
2710 nlmsg_end(skb, nlh);
2714 nlmsg_cancel(skb, nlh);
2718 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2719 u8 ip_proto, __be16 sport,
2722 struct sk_buff *skb;
2725 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2729 /* Reserve room for dummy headers, this skb can pass
2730 * through good chunk of routing engine.
2732 skb_reset_mac_header(skb);
2733 skb_reset_network_header(skb);
2734 skb->protocol = htons(ETH_P_IP);
2735 iph = skb_put(skb, sizeof(struct iphdr));
2736 iph->protocol = ip_proto;
2742 skb_set_transport_header(skb, skb->len);
2744 switch (iph->protocol) {
2746 struct udphdr *udph;
2748 udph = skb_put_zero(skb, sizeof(struct udphdr));
2749 udph->source = sport;
2751 udph->len = sizeof(struct udphdr);
2756 struct tcphdr *tcph;
2758 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2759 tcph->source = sport;
2761 tcph->doff = sizeof(struct tcphdr) / 4;
2763 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2767 case IPPROTO_ICMP: {
2768 struct icmphdr *icmph;
2770 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2771 icmph->type = ICMP_ECHO;
2779 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2780 const struct nlmsghdr *nlh,
2782 struct netlink_ext_ack *extack)
2787 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2788 NL_SET_ERR_MSG(extack,
2789 "ipv4: Invalid header for route get request");
2793 if (!netlink_strict_get_check(skb))
2794 return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2795 rtm_ipv4_policy, extack);
2797 rtm = nlmsg_data(nlh);
2798 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2799 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2800 rtm->rtm_table || rtm->rtm_protocol ||
2801 rtm->rtm_scope || rtm->rtm_type) {
2802 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
2806 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
2807 RTM_F_LOOKUP_TABLE |
2809 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
2813 err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2814 rtm_ipv4_policy, extack);
2818 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2819 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2820 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2824 for (i = 0; i <= RTA_MAX; i++) {
2840 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
2848 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2849 struct netlink_ext_ack *extack)
2851 struct net *net = sock_net(in_skb->sk);
2852 struct nlattr *tb[RTA_MAX+1];
2853 u32 table_id = RT_TABLE_MAIN;
2854 __be16 sport = 0, dport = 0;
2855 struct fib_result res = {};
2856 u8 ip_proto = IPPROTO_UDP;
2857 struct rtable *rt = NULL;
2858 struct sk_buff *skb;
2860 struct flowi4 fl4 = {};
2868 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2872 rtm = nlmsg_data(nlh);
2873 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2874 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2875 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2876 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2878 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2880 uid = (iif ? INVALID_UID : current_uid());
2882 if (tb[RTA_IP_PROTO]) {
2883 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
2884 &ip_proto, AF_INET, extack);
2890 sport = nla_get_be16(tb[RTA_SPORT]);
2893 dport = nla_get_be16(tb[RTA_DPORT]);
2895 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
2901 fl4.flowi4_tos = rtm->rtm_tos;
2902 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2903 fl4.flowi4_mark = mark;
2904 fl4.flowi4_uid = uid;
2906 fl4.fl4_sport = sport;
2908 fl4.fl4_dport = dport;
2909 fl4.flowi4_proto = ip_proto;
2914 struct net_device *dev;
2916 dev = dev_get_by_index_rcu(net, iif);
2922 fl4.flowi4_iif = iif; /* for rt_fill_info */
2925 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2928 rt = skb_rtable(skb);
2929 if (err == 0 && rt->dst.error)
2930 err = -rt->dst.error;
2932 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2933 skb->dev = net->loopback_dev;
2934 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2939 skb_dst_set(skb, &rt->dst);
2945 if (rtm->rtm_flags & RTM_F_NOTIFY)
2946 rt->rt_flags |= RTCF_NOTIFY;
2948 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2949 table_id = res.table ? res.table->tb_id : 0;
2951 /* reset skb for netlink reply msg */
2953 skb_reset_network_header(skb);
2954 skb_reset_transport_header(skb);
2955 skb_reset_mac_header(skb);
2957 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2959 err = fib_props[res.type].error;
2961 err = -EHOSTUNREACH;
2964 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
2965 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
2966 rt->rt_type, res.prefix, res.prefixlen,
2967 fl4.flowi4_tos, res.fi, 0);
2969 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
2970 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2977 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2987 void ip_rt_multicast_event(struct in_device *in_dev)
2989 rt_cache_flush(dev_net(in_dev->dev));
2992 #ifdef CONFIG_SYSCTL
2993 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2994 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2995 static int ip_rt_gc_elasticity __read_mostly = 8;
2996 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2998 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2999 void __user *buffer,
3000 size_t *lenp, loff_t *ppos)
3002 struct net *net = (struct net *)__ctl->extra1;
3005 rt_cache_flush(net);
3006 fnhe_genid_bump(net);
3013 static struct ctl_table ipv4_route_table[] = {
3015 .procname = "gc_thresh",
3016 .data = &ipv4_dst_ops.gc_thresh,
3017 .maxlen = sizeof(int),
3019 .proc_handler = proc_dointvec,
3022 .procname = "max_size",
3023 .data = &ip_rt_max_size,
3024 .maxlen = sizeof(int),
3026 .proc_handler = proc_dointvec,
3029 /* Deprecated. Use gc_min_interval_ms */
3031 .procname = "gc_min_interval",
3032 .data = &ip_rt_gc_min_interval,
3033 .maxlen = sizeof(int),
3035 .proc_handler = proc_dointvec_jiffies,
3038 .procname = "gc_min_interval_ms",
3039 .data = &ip_rt_gc_min_interval,
3040 .maxlen = sizeof(int),
3042 .proc_handler = proc_dointvec_ms_jiffies,
3045 .procname = "gc_timeout",
3046 .data = &ip_rt_gc_timeout,
3047 .maxlen = sizeof(int),
3049 .proc_handler = proc_dointvec_jiffies,
3052 .procname = "gc_interval",
3053 .data = &ip_rt_gc_interval,
3054 .maxlen = sizeof(int),
3056 .proc_handler = proc_dointvec_jiffies,
3059 .procname = "redirect_load",
3060 .data = &ip_rt_redirect_load,
3061 .maxlen = sizeof(int),
3063 .proc_handler = proc_dointvec,
3066 .procname = "redirect_number",
3067 .data = &ip_rt_redirect_number,
3068 .maxlen = sizeof(int),
3070 .proc_handler = proc_dointvec,
3073 .procname = "redirect_silence",
3074 .data = &ip_rt_redirect_silence,
3075 .maxlen = sizeof(int),
3077 .proc_handler = proc_dointvec,
3080 .procname = "error_cost",
3081 .data = &ip_rt_error_cost,
3082 .maxlen = sizeof(int),
3084 .proc_handler = proc_dointvec,
3087 .procname = "error_burst",
3088 .data = &ip_rt_error_burst,
3089 .maxlen = sizeof(int),
3091 .proc_handler = proc_dointvec,
3094 .procname = "gc_elasticity",
3095 .data = &ip_rt_gc_elasticity,
3096 .maxlen = sizeof(int),
3098 .proc_handler = proc_dointvec,
3101 .procname = "mtu_expires",
3102 .data = &ip_rt_mtu_expires,
3103 .maxlen = sizeof(int),
3105 .proc_handler = proc_dointvec_jiffies,
3108 .procname = "min_pmtu",
3109 .data = &ip_rt_min_pmtu,
3110 .maxlen = sizeof(int),
3112 .proc_handler = proc_dointvec_minmax,
3113 .extra1 = &ip_min_valid_pmtu,
3116 .procname = "min_adv_mss",
3117 .data = &ip_rt_min_advmss,
3118 .maxlen = sizeof(int),
3120 .proc_handler = proc_dointvec,
3125 static struct ctl_table ipv4_route_flush_table[] = {
3127 .procname = "flush",
3128 .maxlen = sizeof(int),
3130 .proc_handler = ipv4_sysctl_rtcache_flush,
3135 static __net_init int sysctl_route_net_init(struct net *net)
3137 struct ctl_table *tbl;
3139 tbl = ipv4_route_flush_table;
3140 if (!net_eq(net, &init_net)) {
3141 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3145 /* Don't export sysctls to unprivileged users */
3146 if (net->user_ns != &init_user_ns)
3147 tbl[0].procname = NULL;
3149 tbl[0].extra1 = net;
3151 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3152 if (!net->ipv4.route_hdr)
3157 if (tbl != ipv4_route_flush_table)
3163 static __net_exit void sysctl_route_net_exit(struct net *net)
3165 struct ctl_table *tbl;
3167 tbl = net->ipv4.route_hdr->ctl_table_arg;
3168 unregister_net_sysctl_table(net->ipv4.route_hdr);
3169 BUG_ON(tbl == ipv4_route_flush_table);
3173 static __net_initdata struct pernet_operations sysctl_route_ops = {
3174 .init = sysctl_route_net_init,
3175 .exit = sysctl_route_net_exit,
3179 static __net_init int rt_genid_init(struct net *net)
3181 atomic_set(&net->ipv4.rt_genid, 0);
3182 atomic_set(&net->fnhe_genid, 0);
3183 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3187 static __net_initdata struct pernet_operations rt_genid_ops = {
3188 .init = rt_genid_init,
3191 static int __net_init ipv4_inetpeer_init(struct net *net)
3193 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3197 inet_peer_base_init(bp);
3198 net->ipv4.peers = bp;
3202 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3204 struct inet_peer_base *bp = net->ipv4.peers;
3206 net->ipv4.peers = NULL;
3207 inetpeer_invalidate_tree(bp);
3211 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3212 .init = ipv4_inetpeer_init,
3213 .exit = ipv4_inetpeer_exit,
3216 #ifdef CONFIG_IP_ROUTE_CLASSID
3217 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3218 #endif /* CONFIG_IP_ROUTE_CLASSID */
3220 int __init ip_rt_init(void)
3224 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3227 panic("IP: failed to allocate ip_idents\n");
3229 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3231 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3233 panic("IP: failed to allocate ip_tstamps\n");
3235 for_each_possible_cpu(cpu) {
3236 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3238 INIT_LIST_HEAD(&ul->head);
3239 spin_lock_init(&ul->lock);
3241 #ifdef CONFIG_IP_ROUTE_CLASSID
3242 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3244 panic("IP: failed to allocate ip_rt_acct\n");
3247 ipv4_dst_ops.kmem_cachep =
3248 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3249 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3251 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3253 if (dst_entries_init(&ipv4_dst_ops) < 0)
3254 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3256 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3257 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3259 ipv4_dst_ops.gc_thresh = ~0;
3260 ip_rt_max_size = INT_MAX;
3265 if (ip_rt_proc_init())
3266 pr_err("Unable to create route proc files\n");
3271 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3272 RTNL_FLAG_DOIT_UNLOCKED);
3274 #ifdef CONFIG_SYSCTL
3275 register_pernet_subsys(&sysctl_route_ops);
3277 register_pernet_subsys(&rt_genid_ops);
3278 register_pernet_subsys(&ipv4_inetpeer_ops);
3282 #ifdef CONFIG_SYSCTL
3284 * We really need to sanitize the damn ipv4 init order, then all
3285 * this nonsense will go away.
3287 void __init ip_static_sysctl_init(void)
3289 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);