2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
112 #include <net/secure_seq.h>
113 #include <net/ip_tunnels.h>
114 #include <net/l3mdev.h>
116 #include "fib_lookup.h"
118 #define RT_FL_TOS(oldflp4) \
119 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size;
124 static int ip_rt_redirect_number __read_mostly = 9;
125 static int ip_rt_redirect_load __read_mostly = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly = HZ;
128 static int ip_rt_error_burst __read_mostly = 5 * HZ;
129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly = 256;
133 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
136 * Interface to generic destination cache.
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int ipv4_mtu(const struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void ipv4_link_failure(struct sk_buff *skb);
144 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb, u32 mtu);
146 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb);
148 static void ipv4_dst_destroy(struct dst_entry *dst);
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
159 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
161 static struct dst_ops ipv4_dst_ops = {
163 .check = ipv4_dst_check,
164 .default_advmss = ipv4_default_advmss,
166 .cow_metrics = ipv4_cow_metrics,
167 .destroy = ipv4_dst_destroy,
168 .negative_advice = ipv4_negative_advice,
169 .link_failure = ipv4_link_failure,
170 .update_pmtu = ip_rt_update_pmtu,
171 .redirect = ip_do_redirect,
172 .local_out = __ip_local_out,
173 .neigh_lookup = ipv4_neigh_lookup,
174 .confirm_neigh = ipv4_confirm_neigh,
177 #define ECN_OR_COST(class) TC_PRIO_##class
179 const __u8 ip_tos2prio[16] = {
181 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(BESTEFFORT),
189 ECN_OR_COST(INTERACTIVE),
191 ECN_OR_COST(INTERACTIVE),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK),
194 TC_PRIO_INTERACTIVE_BULK,
195 ECN_OR_COST(INTERACTIVE_BULK)
197 EXPORT_SYMBOL(ip_tos2prio);
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
207 return SEQ_START_TOKEN;
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
222 if (v == SEQ_START_TOKEN)
223 seq_printf(seq, "%-127s\n",
224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
230 static const struct seq_operations rt_cache_seq_ops = {
231 .start = rt_cache_seq_start,
232 .next = rt_cache_seq_next,
233 .stop = rt_cache_seq_stop,
234 .show = rt_cache_seq_show,
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
239 return seq_open(file, &rt_cache_seq_ops);
242 static const struct file_operations rt_cache_seq_fops = {
243 .open = rt_cache_seq_open,
246 .release = seq_release,
250 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
255 return SEQ_START_TOKEN;
257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
258 if (!cpu_possible(cpu))
261 return &per_cpu(rt_cache_stat, cpu);
266 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
271 if (!cpu_possible(cpu))
274 return &per_cpu(rt_cache_stat, cpu);
280 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
285 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287 struct rt_cache_stat *st = v;
289 if (v == SEQ_START_TOKEN) {
290 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
294 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
295 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
296 dst_entries_get_slow(&ipv4_dst_ops),
309 0, /* st->gc_total */
310 0, /* st->gc_ignored */
311 0, /* st->gc_goal_miss */
312 0, /* st->gc_dst_overflow */
313 0, /* st->in_hlist_search */
314 0 /* st->out_hlist_search */
319 static const struct seq_operations rt_cpu_seq_ops = {
320 .start = rt_cpu_seq_start,
321 .next = rt_cpu_seq_next,
322 .stop = rt_cpu_seq_stop,
323 .show = rt_cpu_seq_show,
327 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329 return seq_open(file, &rt_cpu_seq_ops);
332 static const struct file_operations rt_cpu_seq_fops = {
333 .open = rt_cpu_seq_open,
336 .release = seq_release,
339 #ifdef CONFIG_IP_ROUTE_CLASSID
340 static int rt_acct_proc_show(struct seq_file *m, void *v)
342 struct ip_rt_acct *dst, *src;
345 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
349 for_each_possible_cpu(i) {
350 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
351 for (j = 0; j < 256; j++) {
352 dst[j].o_bytes += src[j].o_bytes;
353 dst[j].o_packets += src[j].o_packets;
354 dst[j].i_bytes += src[j].i_bytes;
355 dst[j].i_packets += src[j].i_packets;
359 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
364 static int rt_acct_proc_open(struct inode *inode, struct file *file)
366 return single_open(file, rt_acct_proc_show, NULL);
369 static const struct file_operations rt_acct_proc_fops = {
370 .open = rt_acct_proc_open,
373 .release = single_release,
377 static int __net_init ip_rt_do_proc_init(struct net *net)
379 struct proc_dir_entry *pde;
381 pde = proc_create("rt_cache", 0444, net->proc_net,
386 pde = proc_create("rt_cache", 0444,
387 net->proc_net_stat, &rt_cpu_seq_fops);
391 #ifdef CONFIG_IP_ROUTE_CLASSID
392 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
400 remove_proc_entry("rt_cache", net->proc_net_stat);
403 remove_proc_entry("rt_cache", net->proc_net);
408 static void __net_exit ip_rt_do_proc_exit(struct net *net)
410 remove_proc_entry("rt_cache", net->proc_net_stat);
411 remove_proc_entry("rt_cache", net->proc_net);
412 #ifdef CONFIG_IP_ROUTE_CLASSID
413 remove_proc_entry("rt_acct", net->proc_net);
417 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
418 .init = ip_rt_do_proc_init,
419 .exit = ip_rt_do_proc_exit,
422 static int __init ip_rt_proc_init(void)
424 return register_pernet_subsys(&ip_rt_proc_ops);
428 static inline int ip_rt_proc_init(void)
432 #endif /* CONFIG_PROC_FS */
434 static inline bool rt_is_expired(const struct rtable *rth)
436 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
439 void rt_cache_flush(struct net *net)
441 rt_genid_bump_ipv4(net);
444 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
448 struct net_device *dev = dst->dev;
449 const __be32 *pkey = daddr;
450 const struct rtable *rt;
453 rt = (const struct rtable *) dst;
455 pkey = (const __be32 *) &rt->rt_gateway;
457 pkey = &ip_hdr(skb)->daddr;
459 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
462 return neigh_create(&arp_tbl, pkey, dev);
465 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
467 struct net_device *dev = dst->dev;
468 const __be32 *pkey = daddr;
469 const struct rtable *rt;
471 rt = (const struct rtable *)dst;
473 pkey = (const __be32 *)&rt->rt_gateway;
476 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
479 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
482 #define IP_IDENTS_SZ 2048u
484 static atomic_t *ip_idents __read_mostly;
485 static u32 *ip_tstamps __read_mostly;
487 /* In order to protect privacy, we add a perturbation to identifiers
488 * if one generator is seldom used. This makes hard for an attacker
489 * to infer how many packets were sent between two points in time.
491 u32 ip_idents_reserve(u32 hash, int segs)
493 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
494 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
495 u32 old = READ_ONCE(*p_tstamp);
496 u32 now = (u32)jiffies;
499 if (old != now && cmpxchg(p_tstamp, old, now) == old)
500 delta = prandom_u32_max(now - old);
502 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
504 old = (u32)atomic_read(p_id);
505 new = old + delta + segs;
506 } while (atomic_cmpxchg(p_id, old, new) != old);
510 EXPORT_SYMBOL(ip_idents_reserve);
512 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
514 static u32 ip_idents_hashrnd __read_mostly;
517 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
519 hash = jhash_3words((__force u32)iph->daddr,
520 (__force u32)iph->saddr,
521 iph->protocol ^ net_hash_mix(net),
523 id = ip_idents_reserve(hash, segs);
526 EXPORT_SYMBOL(__ip_select_ident);
528 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
529 const struct sock *sk,
530 const struct iphdr *iph,
532 u8 prot, u32 mark, int flow_flags)
535 const struct inet_sock *inet = inet_sk(sk);
537 oif = sk->sk_bound_dev_if;
539 tos = RT_CONN_FLAGS(sk);
540 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
542 flowi4_init_output(fl4, oif, mark, tos,
543 RT_SCOPE_UNIVERSE, prot,
545 iph->daddr, iph->saddr, 0, 0,
546 sock_net_uid(net, sk));
549 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
550 const struct sock *sk)
552 const struct net *net = dev_net(skb->dev);
553 const struct iphdr *iph = ip_hdr(skb);
554 int oif = skb->dev->ifindex;
555 u8 tos = RT_TOS(iph->tos);
556 u8 prot = iph->protocol;
557 u32 mark = skb->mark;
559 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
562 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
564 const struct inet_sock *inet = inet_sk(sk);
565 const struct ip_options_rcu *inet_opt;
566 __be32 daddr = inet->inet_daddr;
569 inet_opt = rcu_dereference(inet->inet_opt);
570 if (inet_opt && inet_opt->opt.srr)
571 daddr = inet_opt->opt.faddr;
572 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
573 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
574 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
575 inet_sk_flowi_flags(sk),
576 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
580 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
581 const struct sk_buff *skb)
584 build_skb_flow_key(fl4, skb, sk);
586 build_sk_flow_key(fl4, sk);
589 static DEFINE_SPINLOCK(fnhe_lock);
591 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
595 rt = rcu_dereference(fnhe->fnhe_rth_input);
597 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
598 dst_dev_put(&rt->dst);
599 dst_release(&rt->dst);
601 rt = rcu_dereference(fnhe->fnhe_rth_output);
603 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
604 dst_dev_put(&rt->dst);
605 dst_release(&rt->dst);
609 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
611 struct fib_nh_exception *fnhe, *oldest;
613 oldest = rcu_dereference(hash->chain);
614 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
615 fnhe = rcu_dereference(fnhe->fnhe_next)) {
616 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
619 fnhe_flush_routes(oldest);
623 static inline u32 fnhe_hashfun(__be32 daddr)
625 static u32 fnhe_hashrnd __read_mostly;
628 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
629 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
630 return hash_32(hval, FNHE_HASH_SHIFT);
633 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
635 rt->rt_pmtu = fnhe->fnhe_pmtu;
636 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
637 rt->dst.expires = fnhe->fnhe_expires;
640 rt->rt_flags |= RTCF_REDIRECTED;
641 rt->rt_gateway = fnhe->fnhe_gw;
642 rt->rt_uses_gateway = 1;
646 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
647 u32 pmtu, bool lock, unsigned long expires)
649 struct fnhe_hash_bucket *hash;
650 struct fib_nh_exception *fnhe;
656 genid = fnhe_genid(dev_net(nh->nh_dev));
657 hval = fnhe_hashfun(daddr);
659 spin_lock_bh(&fnhe_lock);
661 hash = rcu_dereference(nh->nh_exceptions);
663 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
666 rcu_assign_pointer(nh->nh_exceptions, hash);
672 for (fnhe = rcu_dereference(hash->chain); fnhe;
673 fnhe = rcu_dereference(fnhe->fnhe_next)) {
674 if (fnhe->fnhe_daddr == daddr)
680 if (fnhe->fnhe_genid != genid)
681 fnhe->fnhe_genid = genid;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_mtu_locked = lock;
688 fnhe->fnhe_expires = max(1UL, expires);
689 /* Update all cached dsts too */
690 rt = rcu_dereference(fnhe->fnhe_rth_input);
692 fill_route_from_fnhe(rt, fnhe);
693 rt = rcu_dereference(fnhe->fnhe_rth_output);
695 fill_route_from_fnhe(rt, fnhe);
697 if (depth > FNHE_RECLAIM_DEPTH)
698 fnhe = fnhe_oldest(hash);
700 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
704 fnhe->fnhe_next = hash->chain;
705 rcu_assign_pointer(hash->chain, fnhe);
707 fnhe->fnhe_genid = genid;
708 fnhe->fnhe_daddr = daddr;
710 fnhe->fnhe_pmtu = pmtu;
711 fnhe->fnhe_mtu_locked = lock;
712 fnhe->fnhe_expires = expires;
714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception
718 rt = rcu_dereference(nh->nh_rth_input);
720 rt->dst.obsolete = DST_OBSOLETE_KILL;
722 for_each_possible_cpu(i) {
723 struct rtable __rcu **prt;
724 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
725 rt = rcu_dereference(*prt);
727 rt->dst.obsolete = DST_OBSOLETE_KILL;
731 fnhe->fnhe_stamp = jiffies;
734 spin_unlock_bh(&fnhe_lock);
737 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
740 __be32 new_gw = icmp_hdr(skb)->un.gateway;
741 __be32 old_gw = ip_hdr(skb)->saddr;
742 struct net_device *dev = skb->dev;
743 struct in_device *in_dev;
744 struct fib_result res;
748 switch (icmp_hdr(skb)->code & 7) {
750 case ICMP_REDIR_NETTOS:
751 case ICMP_REDIR_HOST:
752 case ICMP_REDIR_HOSTTOS:
759 if (rt->rt_gateway != old_gw)
762 in_dev = __in_dev_get_rcu(dev);
767 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
768 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
769 ipv4_is_zeronet(new_gw))
770 goto reject_redirect;
772 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
773 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
774 goto reject_redirect;
775 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
776 goto reject_redirect;
778 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
779 goto reject_redirect;
782 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
784 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
786 if (!(n->nud_state & NUD_VALID)) {
787 neigh_event_send(n, NULL);
789 if (fib_lookup(net, fl4, &res, 0) == 0) {
790 struct fib_nh *nh = &FIB_RES_NH(res);
792 update_or_create_fnhe(nh, fl4->daddr, new_gw,
794 jiffies + ip_rt_gc_timeout);
797 rt->dst.obsolete = DST_OBSOLETE_KILL;
798 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
805 #ifdef CONFIG_IP_ROUTE_VERBOSE
806 if (IN_DEV_LOG_MARTIANS(in_dev)) {
807 const struct iphdr *iph = (const struct iphdr *) skb->data;
808 __be32 daddr = iph->daddr;
809 __be32 saddr = iph->saddr;
811 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
812 " Advised path = %pI4 -> %pI4\n",
813 &old_gw, dev->name, &new_gw,
820 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
824 const struct iphdr *iph = (const struct iphdr *) skb->data;
825 struct net *net = dev_net(skb->dev);
826 int oif = skb->dev->ifindex;
827 u8 tos = RT_TOS(iph->tos);
828 u8 prot = iph->protocol;
829 u32 mark = skb->mark;
831 rt = (struct rtable *) dst;
833 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
834 __ip_do_redirect(rt, skb, &fl4, true);
837 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
839 struct rtable *rt = (struct rtable *)dst;
840 struct dst_entry *ret = dst;
843 if (dst->obsolete > 0) {
846 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
857 * 1. The first ip_rt_redirect_number redirects are sent
858 * with exponential backoff, then we stop sending them at all,
859 * assuming that the host ignores our redirects.
860 * 2. If we did not see packets requiring redirects
861 * during ip_rt_redirect_silence, we assume that the host
862 * forgot redirected route and start to send redirects again.
864 * This algorithm is much cheaper and more intelligent than dumb load limiting
867 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
868 * and "frag. need" (breaks PMTU discovery) in icmp.c.
871 void ip_rt_send_redirect(struct sk_buff *skb)
873 struct rtable *rt = skb_rtable(skb);
874 struct in_device *in_dev;
875 struct inet_peer *peer;
881 in_dev = __in_dev_get_rcu(rt->dst.dev);
882 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
886 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
887 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
890 net = dev_net(rt->dst.dev);
891 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
893 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
894 rt_nexthop(rt, ip_hdr(skb)->daddr));
898 /* No redirected packets during ip_rt_redirect_silence;
899 * reset the algorithm.
901 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
902 peer->rate_tokens = 0;
904 /* Too many ignored redirects; do not send anything
905 * set dst.rate_last to the last seen redirected packet.
907 if (peer->rate_tokens >= ip_rt_redirect_number) {
908 peer->rate_last = jiffies;
912 /* Check for load limit; set rate_last to the latest sent
915 if (peer->rate_tokens == 0 ||
918 (ip_rt_redirect_load << peer->rate_tokens)))) {
919 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
921 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
922 peer->rate_last = jiffies;
924 #ifdef CONFIG_IP_ROUTE_VERBOSE
926 peer->rate_tokens == ip_rt_redirect_number)
927 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
928 &ip_hdr(skb)->saddr, inet_iif(skb),
929 &ip_hdr(skb)->daddr, &gw);
936 static int ip_error(struct sk_buff *skb)
938 struct rtable *rt = skb_rtable(skb);
939 struct net_device *dev = skb->dev;
940 struct in_device *in_dev;
941 struct inet_peer *peer;
947 if (netif_is_l3_master(skb->dev)) {
948 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
953 in_dev = __in_dev_get_rcu(dev);
955 /* IP on this device is disabled. */
959 net = dev_net(rt->dst.dev);
960 if (!IN_DEV_FORWARD(in_dev)) {
961 switch (rt->dst.error) {
963 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
967 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
973 switch (rt->dst.error) {
978 code = ICMP_HOST_UNREACH;
981 code = ICMP_NET_UNREACH;
982 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
985 code = ICMP_PKT_FILTERED;
989 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
990 l3mdev_master_ifindex(skb->dev), 1);
995 peer->rate_tokens += now - peer->rate_last;
996 if (peer->rate_tokens > ip_rt_error_burst)
997 peer->rate_tokens = ip_rt_error_burst;
998 peer->rate_last = now;
999 if (peer->rate_tokens >= ip_rt_error_cost)
1000 peer->rate_tokens -= ip_rt_error_cost;
1006 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1008 out: kfree_skb(skb);
1012 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1014 struct dst_entry *dst = &rt->dst;
1015 struct fib_result res;
1018 if (ip_mtu_locked(dst))
1021 if (ipv4_mtu(dst) < mtu)
1024 if (mtu < ip_rt_min_pmtu) {
1026 mtu = ip_rt_min_pmtu;
1029 if (rt->rt_pmtu == mtu &&
1030 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1034 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1035 struct fib_nh *nh = &FIB_RES_NH(res);
1037 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1038 jiffies + ip_rt_mtu_expires);
1043 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1044 struct sk_buff *skb, u32 mtu)
1046 struct rtable *rt = (struct rtable *) dst;
1049 ip_rt_build_flow_key(&fl4, sk, skb);
1050 __ip_rt_update_pmtu(rt, &fl4, mtu);
1053 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1054 int oif, u32 mark, u8 protocol, int flow_flags)
1056 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, flow_flags);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u32 mark, u8 protocol, int flow_flags)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, mark, flow_flags);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD by dst_free().
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_link_failure(struct sk_buff *skb)
1199 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1201 rt = skb_rtable(skb);
1203 dst_set_expires(&rt->dst, 0);
1206 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1208 pr_debug("%s: %pI4 -> %pI4, %s\n",
1209 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1210 skb->dev ? skb->dev->name : "?");
1217 We do not cache source address of outgoing interface,
1218 because it is used only by IP RR, TS and SRR options,
1219 so that it out of fast path.
1221 BTW remember: "addr" is allowed to be not aligned
1225 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1229 if (rt_is_output_route(rt))
1230 src = ip_hdr(skb)->saddr;
1232 struct fib_result res;
1238 memset(&fl4, 0, sizeof(fl4));
1239 fl4.daddr = iph->daddr;
1240 fl4.saddr = iph->saddr;
1241 fl4.flowi4_tos = RT_TOS(iph->tos);
1242 fl4.flowi4_oif = rt->dst.dev->ifindex;
1243 fl4.flowi4_iif = skb->dev->ifindex;
1244 fl4.flowi4_mark = skb->mark;
1247 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1248 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1250 src = inet_select_addr(rt->dst.dev,
1251 rt_nexthop(rt, iph->daddr),
1255 memcpy(addr, &src, 4);
1258 #ifdef CONFIG_IP_ROUTE_CLASSID
1259 static void set_class_tag(struct rtable *rt, u32 tag)
1261 if (!(rt->dst.tclassid & 0xFFFF))
1262 rt->dst.tclassid |= tag & 0xFFFF;
1263 if (!(rt->dst.tclassid & 0xFFFF0000))
1264 rt->dst.tclassid |= tag & 0xFFFF0000;
1268 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1270 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1271 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1274 return min(advmss, IPV4_MAX_PMTU - header_size);
1277 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1279 const struct rtable *rt = (const struct rtable *) dst;
1280 unsigned int mtu = rt->rt_pmtu;
1282 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1283 mtu = dst_metric_raw(dst, RTAX_MTU);
1288 mtu = READ_ONCE(dst->dev->mtu);
1290 if (unlikely(ip_mtu_locked(dst))) {
1291 if (rt->rt_uses_gateway && mtu > 576)
1295 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1300 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1302 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1303 struct fib_nh_exception *fnhe;
1309 hval = fnhe_hashfun(daddr);
1311 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1312 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1313 if (fnhe->fnhe_daddr == daddr)
1319 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1320 __be32 daddr, const bool do_cache)
1324 spin_lock_bh(&fnhe_lock);
1326 if (daddr == fnhe->fnhe_daddr) {
1327 struct rtable __rcu **porig;
1328 struct rtable *orig;
1329 int genid = fnhe_genid(dev_net(rt->dst.dev));
1331 if (rt_is_input_route(rt))
1332 porig = &fnhe->fnhe_rth_input;
1334 porig = &fnhe->fnhe_rth_output;
1335 orig = rcu_dereference(*porig);
1337 if (fnhe->fnhe_genid != genid) {
1338 fnhe->fnhe_genid = genid;
1340 fnhe->fnhe_pmtu = 0;
1341 fnhe->fnhe_expires = 0;
1342 fnhe_flush_routes(fnhe);
1345 fill_route_from_fnhe(rt, fnhe);
1346 if (!rt->rt_gateway)
1347 rt->rt_gateway = daddr;
1351 rcu_assign_pointer(*porig, rt);
1353 dst_dev_put(&orig->dst);
1354 dst_release(&orig->dst);
1359 fnhe->fnhe_stamp = jiffies;
1361 spin_unlock_bh(&fnhe_lock);
1366 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1368 struct rtable *orig, *prev, **p;
1371 if (rt_is_input_route(rt)) {
1372 p = (struct rtable **)&nh->nh_rth_input;
1374 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1378 /* hold dst before doing cmpxchg() to avoid race condition
1382 prev = cmpxchg(p, orig, rt);
1385 dst_dev_put(&orig->dst);
1386 dst_release(&orig->dst);
1389 dst_release(&rt->dst);
1396 struct uncached_list {
1398 struct list_head head;
1401 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1403 void rt_add_uncached_list(struct rtable *rt)
1405 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1407 rt->rt_uncached_list = ul;
1409 spin_lock_bh(&ul->lock);
1410 list_add_tail(&rt->rt_uncached, &ul->head);
1411 spin_unlock_bh(&ul->lock);
1414 void rt_del_uncached_list(struct rtable *rt)
1416 if (!list_empty(&rt->rt_uncached)) {
1417 struct uncached_list *ul = rt->rt_uncached_list;
1419 spin_lock_bh(&ul->lock);
1420 list_del(&rt->rt_uncached);
1421 spin_unlock_bh(&ul->lock);
1425 static void ipv4_dst_destroy(struct dst_entry *dst)
1427 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1428 struct rtable *rt = (struct rtable *)dst;
1430 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1433 rt_del_uncached_list(rt);
1436 void rt_flush_dev(struct net_device *dev)
1438 struct net *net = dev_net(dev);
1442 for_each_possible_cpu(cpu) {
1443 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1445 spin_lock_bh(&ul->lock);
1446 list_for_each_entry(rt, &ul->head, rt_uncached) {
1447 if (rt->dst.dev != dev)
1449 rt->dst.dev = net->loopback_dev;
1450 dev_hold(rt->dst.dev);
1453 spin_unlock_bh(&ul->lock);
1457 static bool rt_cache_valid(const struct rtable *rt)
1460 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1464 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1465 const struct fib_result *res,
1466 struct fib_nh_exception *fnhe,
1467 struct fib_info *fi, u16 type, u32 itag,
1468 const bool do_cache)
1470 bool cached = false;
1473 struct fib_nh *nh = &FIB_RES_NH(*res);
1475 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1476 rt->rt_gateway = nh->nh_gw;
1477 rt->rt_uses_gateway = 1;
1479 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1480 if (fi->fib_metrics != &dst_default_metrics) {
1481 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1482 refcount_inc(&fi->fib_metrics->refcnt);
1484 #ifdef CONFIG_IP_ROUTE_CLASSID
1485 rt->dst.tclassid = nh->nh_tclassid;
1487 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1489 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1491 cached = rt_cache_route(nh, rt);
1492 if (unlikely(!cached)) {
1493 /* Routes we intend to cache in nexthop exception or
1494 * FIB nexthop have the DST_NOCACHE bit clear.
1495 * However, if we are unsuccessful at storing this
1496 * route into the cache we really need to set it.
1498 if (!rt->rt_gateway)
1499 rt->rt_gateway = daddr;
1500 rt_add_uncached_list(rt);
1503 rt_add_uncached_list(rt);
1505 #ifdef CONFIG_IP_ROUTE_CLASSID
1506 #ifdef CONFIG_IP_MULTIPLE_TABLES
1507 set_class_tag(rt, res->tclassid);
1509 set_class_tag(rt, itag);
1513 struct rtable *rt_dst_alloc(struct net_device *dev,
1514 unsigned int flags, u16 type,
1515 bool nopolicy, bool noxfrm, bool will_cache)
1519 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1520 (will_cache ? 0 : DST_HOST) |
1521 (nopolicy ? DST_NOPOLICY : 0) |
1522 (noxfrm ? DST_NOXFRM : 0));
1525 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1526 rt->rt_flags = flags;
1528 rt->rt_is_input = 0;
1531 rt->rt_mtu_locked = 0;
1533 rt->rt_uses_gateway = 0;
1534 INIT_LIST_HEAD(&rt->rt_uncached);
1536 rt->dst.output = ip_output;
1537 if (flags & RTCF_LOCAL)
1538 rt->dst.input = ip_local_deliver;
1543 EXPORT_SYMBOL(rt_dst_alloc);
1545 /* called in rcu_read_lock() section */
1546 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1547 u8 tos, struct net_device *dev,
1548 struct in_device *in_dev, u32 *itag)
1552 /* Primary sanity checks. */
1556 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1557 skb->protocol != htons(ETH_P_IP))
1560 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1563 if (ipv4_is_zeronet(saddr)) {
1564 if (!ipv4_is_local_multicast(daddr))
1567 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1575 /* called in rcu_read_lock() section */
1576 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1577 u8 tos, struct net_device *dev, int our)
1579 struct in_device *in_dev = __in_dev_get_rcu(dev);
1580 unsigned int flags = RTCF_MULTICAST;
1585 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1590 flags |= RTCF_LOCAL;
1592 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1593 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1597 #ifdef CONFIG_IP_ROUTE_CLASSID
1598 rth->dst.tclassid = itag;
1600 rth->dst.output = ip_rt_bug;
1601 rth->rt_is_input= 1;
1603 #ifdef CONFIG_IP_MROUTE
1604 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1605 rth->dst.input = ip_mr_input;
1607 RT_CACHE_STAT_INC(in_slow_mc);
1609 skb_dst_set(skb, &rth->dst);
1614 static void ip_handle_martian_source(struct net_device *dev,
1615 struct in_device *in_dev,
1616 struct sk_buff *skb,
1620 RT_CACHE_STAT_INC(in_martian_src);
1621 #ifdef CONFIG_IP_ROUTE_VERBOSE
1622 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1624 * RFC1812 recommendation, if source is martian,
1625 * the only hint is MAC header.
1627 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1628 &daddr, &saddr, dev->name);
1629 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1630 print_hex_dump(KERN_WARNING, "ll header: ",
1631 DUMP_PREFIX_OFFSET, 16, 1,
1632 skb_mac_header(skb),
1633 dev->hard_header_len, true);
1639 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1641 struct fnhe_hash_bucket *hash;
1642 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1643 u32 hval = fnhe_hashfun(daddr);
1645 spin_lock_bh(&fnhe_lock);
1647 hash = rcu_dereference_protected(nh->nh_exceptions,
1648 lockdep_is_held(&fnhe_lock));
1651 fnhe_p = &hash->chain;
1652 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1654 if (fnhe->fnhe_daddr == daddr) {
1655 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1656 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1657 fnhe_flush_routes(fnhe);
1658 kfree_rcu(fnhe, rcu);
1661 fnhe_p = &fnhe->fnhe_next;
1662 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1663 lockdep_is_held(&fnhe_lock));
1666 spin_unlock_bh(&fnhe_lock);
1669 /* called in rcu_read_lock() section */
1670 static int __mkroute_input(struct sk_buff *skb,
1671 const struct fib_result *res,
1672 struct in_device *in_dev,
1673 __be32 daddr, __be32 saddr, u32 tos)
1675 struct fib_nh_exception *fnhe;
1678 struct in_device *out_dev;
1682 /* get a working reference to the output device */
1683 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1685 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1689 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1690 in_dev->dev, in_dev, &itag);
1692 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1698 do_cache = res->fi && !itag;
1699 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1700 skb->protocol == htons(ETH_P_IP) &&
1701 (IN_DEV_SHARED_MEDIA(out_dev) ||
1702 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1703 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1705 if (skb->protocol != htons(ETH_P_IP)) {
1706 /* Not IP (i.e. ARP). Do not create route, if it is
1707 * invalid for proxy arp. DNAT routes are always valid.
1709 * Proxy arp feature have been extended to allow, ARP
1710 * replies back to the same interface, to support
1711 * Private VLAN switch technologies. See arp.c.
1713 if (out_dev == in_dev &&
1714 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1720 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1723 rth = rcu_dereference(fnhe->fnhe_rth_input);
1724 if (rth && rth->dst.expires &&
1725 time_after(jiffies, rth->dst.expires)) {
1726 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1733 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1736 if (rt_cache_valid(rth)) {
1737 skb_dst_set_noref(skb, &rth->dst);
1742 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1743 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1744 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1750 rth->rt_is_input = 1;
1751 RT_CACHE_STAT_INC(in_slow_tot);
1753 rth->dst.input = ip_forward;
1755 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1757 lwtunnel_set_redirect(&rth->dst);
1758 skb_dst_set(skb, &rth->dst);
1765 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1766 /* To make ICMP packets follow the right flow, the multipath hash is
1767 * calculated from the inner IP addresses.
1769 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1770 struct flow_keys *hash_keys)
1772 const struct iphdr *outer_iph = ip_hdr(skb);
1773 const struct iphdr *key_iph = outer_iph;
1774 const struct iphdr *inner_iph;
1775 const struct icmphdr *icmph;
1776 struct iphdr _inner_iph;
1777 struct icmphdr _icmph;
1779 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1782 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1785 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1790 if (icmph->type != ICMP_DEST_UNREACH &&
1791 icmph->type != ICMP_REDIRECT &&
1792 icmph->type != ICMP_TIME_EXCEEDED &&
1793 icmph->type != ICMP_PARAMETERPROB)
1796 inner_iph = skb_header_pointer(skb,
1797 outer_iph->ihl * 4 + sizeof(_icmph),
1798 sizeof(_inner_iph), &_inner_iph);
1802 key_iph = inner_iph;
1804 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1805 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1808 /* if skb is set it will be used and fl4 can be NULL */
1809 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1810 const struct sk_buff *skb, struct flow_keys *flkeys)
1812 struct flow_keys hash_keys;
1815 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1817 memset(&hash_keys, 0, sizeof(hash_keys));
1818 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1820 ip_multipath_l3_keys(skb, &hash_keys);
1822 hash_keys.addrs.v4addrs.src = fl4->saddr;
1823 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1827 /* skb is currently provided only when forwarding */
1829 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1830 struct flow_keys keys;
1832 /* short-circuit if we already have L4 hash present */
1834 return skb_get_hash_raw(skb) >> 1;
1836 memset(&hash_keys, 0, sizeof(hash_keys));
1839 skb_flow_dissect_flow_keys(skb, &keys, flag);
1843 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1844 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1845 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1846 hash_keys.ports.src = flkeys->ports.src;
1847 hash_keys.ports.dst = flkeys->ports.dst;
1848 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1850 memset(&hash_keys, 0, sizeof(hash_keys));
1851 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1852 hash_keys.addrs.v4addrs.src = fl4->saddr;
1853 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1854 hash_keys.ports.src = fl4->fl4_sport;
1855 hash_keys.ports.dst = fl4->fl4_dport;
1856 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1860 mhash = flow_hash_from_keys(&hash_keys);
1864 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1866 static int ip_mkroute_input(struct sk_buff *skb,
1867 struct fib_result *res,
1868 struct in_device *in_dev,
1869 __be32 daddr, __be32 saddr, u32 tos,
1870 struct flow_keys *hkeys)
1872 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1873 if (res->fi && res->fi->fib_nhs > 1) {
1874 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1876 fib_select_multipath(res, h);
1880 /* create a routing cache entry */
1881 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1885 * NOTE. We drop all the packets that has local source
1886 * addresses, because every properly looped back packet
1887 * must have correct destination already attached by output routine.
1889 * Such approach solves two big problems:
1890 * 1. Not simplex devices are handled properly.
1891 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1892 * called with rcu_read_lock()
1895 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1896 u8 tos, struct net_device *dev,
1897 struct fib_result *res)
1899 struct in_device *in_dev = __in_dev_get_rcu(dev);
1900 struct flow_keys *flkeys = NULL, _flkeys;
1901 struct net *net = dev_net(dev);
1902 struct ip_tunnel_info *tun_info;
1904 unsigned int flags = 0;
1910 /* IP on this device is disabled. */
1915 /* Check for the most weird martians, which can be not detected
1919 tun_info = skb_tunnel_info(skb);
1920 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1921 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1923 fl4.flowi4_tun_key.tun_id = 0;
1926 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1927 goto martian_source;
1931 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1934 /* Accept zero addresses only to limited broadcast;
1935 * I even do not know to fix it or not. Waiting for complains :-)
1937 if (ipv4_is_zeronet(saddr))
1938 goto martian_source;
1940 if (ipv4_is_zeronet(daddr))
1941 goto martian_destination;
1943 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1944 * and call it once if daddr or/and saddr are loopback addresses
1946 if (ipv4_is_loopback(daddr)) {
1947 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1948 goto martian_destination;
1949 } else if (ipv4_is_loopback(saddr)) {
1950 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1951 goto martian_source;
1955 * Now we are ready to route packet.
1958 fl4.flowi4_iif = dev->ifindex;
1959 fl4.flowi4_mark = skb->mark;
1960 fl4.flowi4_tos = tos;
1961 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1962 fl4.flowi4_flags = 0;
1965 fl4.flowi4_uid = sock_net_uid(net, NULL);
1967 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys))
1970 err = fib_lookup(net, &fl4, res, 0);
1972 if (!IN_DEV_FORWARD(in_dev))
1973 err = -EHOSTUNREACH;
1977 if (res->type == RTN_BROADCAST)
1980 if (res->type == RTN_LOCAL) {
1981 err = fib_validate_source(skb, saddr, daddr, tos,
1982 0, dev, in_dev, &itag);
1984 goto martian_source;
1988 if (!IN_DEV_FORWARD(in_dev)) {
1989 err = -EHOSTUNREACH;
1992 if (res->type != RTN_UNICAST)
1993 goto martian_destination;
1995 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
1999 if (skb->protocol != htons(ETH_P_IP))
2002 if (!ipv4_is_zeronet(saddr)) {
2003 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2006 goto martian_source;
2008 flags |= RTCF_BROADCAST;
2009 res->type = RTN_BROADCAST;
2010 RT_CACHE_STAT_INC(in_brd);
2016 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
2017 if (rt_cache_valid(rth)) {
2018 skb_dst_set_noref(skb, &rth->dst);
2026 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2027 flags | RTCF_LOCAL, res->type,
2028 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2032 rth->dst.output= ip_rt_bug;
2033 #ifdef CONFIG_IP_ROUTE_CLASSID
2034 rth->dst.tclassid = itag;
2036 rth->rt_is_input = 1;
2038 RT_CACHE_STAT_INC(in_slow_tot);
2039 if (res->type == RTN_UNREACHABLE) {
2040 rth->dst.input= ip_error;
2041 rth->dst.error= -err;
2042 rth->rt_flags &= ~RTCF_LOCAL;
2046 struct fib_nh *nh = &FIB_RES_NH(*res);
2048 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2049 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2050 WARN_ON(rth->dst.input == lwtunnel_input);
2051 rth->dst.lwtstate->orig_input = rth->dst.input;
2052 rth->dst.input = lwtunnel_input;
2055 if (unlikely(!rt_cache_route(nh, rth)))
2056 rt_add_uncached_list(rth);
2058 skb_dst_set(skb, &rth->dst);
2063 RT_CACHE_STAT_INC(in_no_route);
2064 res->type = RTN_UNREACHABLE;
2070 * Do not cache martian addresses: they should be logged (RFC1812)
2072 martian_destination:
2073 RT_CACHE_STAT_INC(in_martian_dst);
2074 #ifdef CONFIG_IP_ROUTE_VERBOSE
2075 if (IN_DEV_LOG_MARTIANS(in_dev))
2076 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2077 &daddr, &saddr, dev->name);
2089 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2093 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2094 u8 tos, struct net_device *dev)
2096 struct fib_result res;
2099 tos &= IPTOS_RT_MASK;
2101 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2106 EXPORT_SYMBOL(ip_route_input_noref);
2108 /* called with rcu_read_lock held */
2109 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2110 u8 tos, struct net_device *dev, struct fib_result *res)
2112 /* Multicast recognition logic is moved from route cache to here.
2113 The problem was that too many Ethernet cards have broken/missing
2114 hardware multicast filters :-( As result the host on multicasting
2115 network acquires a lot of useless route cache entries, sort of
2116 SDR messages from all the world. Now we try to get rid of them.
2117 Really, provided software IP multicast filter is organized
2118 reasonably (at least, hashed), it does not result in a slowdown
2119 comparing with route cache reject entries.
2120 Note, that multicast routers are not affected, because
2121 route cache entry is created eventually.
2123 if (ipv4_is_multicast(daddr)) {
2124 struct in_device *in_dev = __in_dev_get_rcu(dev);
2129 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2130 ip_hdr(skb)->protocol);
2132 /* check l3 master if no match yet */
2133 if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
2134 struct in_device *l3_in_dev;
2136 l3_in_dev = __in_dev_get_rcu(skb->dev);
2138 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2139 ip_hdr(skb)->protocol);
2143 #ifdef CONFIG_IP_MROUTE
2145 (!ipv4_is_local_multicast(daddr) &&
2146 IN_DEV_MFORWARD(in_dev))
2149 err = ip_route_input_mc(skb, daddr, saddr,
2155 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2158 /* called with rcu_read_lock() */
2159 static struct rtable *__mkroute_output(const struct fib_result *res,
2160 const struct flowi4 *fl4, int orig_oif,
2161 struct net_device *dev_out,
2164 struct fib_info *fi = res->fi;
2165 struct fib_nh_exception *fnhe;
2166 struct in_device *in_dev;
2167 u16 type = res->type;
2171 in_dev = __in_dev_get_rcu(dev_out);
2173 return ERR_PTR(-EINVAL);
2175 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2176 if (ipv4_is_loopback(fl4->saddr) &&
2177 !(dev_out->flags & IFF_LOOPBACK) &&
2178 !netif_is_l3_master(dev_out))
2179 return ERR_PTR(-EINVAL);
2181 if (ipv4_is_lbcast(fl4->daddr))
2182 type = RTN_BROADCAST;
2183 else if (ipv4_is_multicast(fl4->daddr))
2184 type = RTN_MULTICAST;
2185 else if (ipv4_is_zeronet(fl4->daddr))
2186 return ERR_PTR(-EINVAL);
2188 if (dev_out->flags & IFF_LOOPBACK)
2189 flags |= RTCF_LOCAL;
2192 if (type == RTN_BROADCAST) {
2193 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2195 } else if (type == RTN_MULTICAST) {
2196 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2197 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2199 flags &= ~RTCF_LOCAL;
2202 /* If multicast route do not exist use
2203 * default one, but do not gateway in this case.
2206 if (fi && res->prefixlen < 4)
2208 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2209 (orig_oif != dev_out->ifindex)) {
2210 /* For local routes that require a particular output interface
2211 * we do not want to cache the result. Caching the result
2212 * causes incorrect behaviour when there are multiple source
2213 * addresses on the interface, the end result being that if the
2214 * intended recipient is waiting on that interface for the
2215 * packet he won't receive it because it will be delivered on
2216 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2217 * be set to the loopback interface as well.
2223 do_cache &= fi != NULL;
2225 struct rtable __rcu **prth;
2226 struct fib_nh *nh = &FIB_RES_NH(*res);
2228 fnhe = find_exception(nh, fl4->daddr);
2230 prth = &fnhe->fnhe_rth_output;
2231 rth = rcu_dereference(*prth);
2232 if (rth && rth->dst.expires &&
2233 time_after(jiffies, rth->dst.expires)) {
2234 ip_del_fnhe(nh, fl4->daddr);
2241 if (unlikely(fl4->flowi4_flags &
2242 FLOWI_FLAG_KNOWN_NH &&
2244 nh->nh_scope == RT_SCOPE_LINK))) {
2248 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2249 rth = rcu_dereference(*prth);
2252 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2257 rth = rt_dst_alloc(dev_out, flags, type,
2258 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2259 IN_DEV_CONF_GET(in_dev, NOXFRM),
2262 return ERR_PTR(-ENOBUFS);
2264 rth->rt_iif = orig_oif;
2266 RT_CACHE_STAT_INC(out_slow_tot);
2268 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2269 if (flags & RTCF_LOCAL &&
2270 !(dev_out->flags & IFF_LOOPBACK)) {
2271 rth->dst.output = ip_mc_output;
2272 RT_CACHE_STAT_INC(out_slow_mc);
2274 #ifdef CONFIG_IP_MROUTE
2275 if (type == RTN_MULTICAST) {
2276 if (IN_DEV_MFORWARD(in_dev) &&
2277 !ipv4_is_local_multicast(fl4->daddr)) {
2278 rth->dst.input = ip_mr_input;
2279 rth->dst.output = ip_mc_output;
2285 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2286 lwtunnel_set_redirect(&rth->dst);
2292 * Major route resolver routine.
2295 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2296 const struct sk_buff *skb)
2298 __u8 tos = RT_FL_TOS(fl4);
2299 struct fib_result res = {
2307 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2308 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2309 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2310 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2313 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2318 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2320 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2321 struct fib_result *res,
2322 const struct sk_buff *skb)
2324 struct net_device *dev_out = NULL;
2325 int orig_oif = fl4->flowi4_oif;
2326 unsigned int flags = 0;
2328 int err = -ENETUNREACH;
2331 rth = ERR_PTR(-EINVAL);
2332 if (ipv4_is_multicast(fl4->saddr) ||
2333 ipv4_is_lbcast(fl4->saddr) ||
2334 ipv4_is_zeronet(fl4->saddr))
2337 /* I removed check for oif == dev_out->oif here.
2338 It was wrong for two reasons:
2339 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2340 is assigned to multiple interfaces.
2341 2. Moreover, we are allowed to send packets with saddr
2342 of another iface. --ANK
2345 if (fl4->flowi4_oif == 0 &&
2346 (ipv4_is_multicast(fl4->daddr) ||
2347 ipv4_is_lbcast(fl4->daddr))) {
2348 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2349 dev_out = __ip_dev_find(net, fl4->saddr, false);
2353 /* Special hack: user can direct multicasts
2354 and limited broadcast via necessary interface
2355 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2356 This hack is not just for fun, it allows
2357 vic,vat and friends to work.
2358 They bind socket to loopback, set ttl to zero
2359 and expect that it will work.
2360 From the viewpoint of routing cache they are broken,
2361 because we are not allowed to build multicast path
2362 with loopback source addr (look, routing cache
2363 cannot know, that ttl is zero, so that packet
2364 will not leave this host and route is valid).
2365 Luckily, this hack is good workaround.
2368 fl4->flowi4_oif = dev_out->ifindex;
2372 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2373 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2374 if (!__ip_dev_find(net, fl4->saddr, false))
2380 if (fl4->flowi4_oif) {
2381 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2382 rth = ERR_PTR(-ENODEV);
2386 /* RACE: Check return value of inet_select_addr instead. */
2387 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2388 rth = ERR_PTR(-ENETUNREACH);
2391 if (ipv4_is_local_multicast(fl4->daddr) ||
2392 ipv4_is_lbcast(fl4->daddr) ||
2393 fl4->flowi4_proto == IPPROTO_IGMP) {
2395 fl4->saddr = inet_select_addr(dev_out, 0,
2400 if (ipv4_is_multicast(fl4->daddr))
2401 fl4->saddr = inet_select_addr(dev_out, 0,
2403 else if (!fl4->daddr)
2404 fl4->saddr = inet_select_addr(dev_out, 0,
2410 fl4->daddr = fl4->saddr;
2412 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2413 dev_out = net->loopback_dev;
2414 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2415 res->type = RTN_LOCAL;
2416 flags |= RTCF_LOCAL;
2420 err = fib_lookup(net, fl4, res, 0);
2424 if (fl4->flowi4_oif &&
2425 (ipv4_is_multicast(fl4->daddr) ||
2426 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2427 /* Apparently, routing tables are wrong. Assume,
2428 that the destination is on link.
2431 Because we are allowed to send to iface
2432 even if it has NO routes and NO assigned
2433 addresses. When oif is specified, routing
2434 tables are looked up with only one purpose:
2435 to catch if destination is gatewayed, rather than
2436 direct. Moreover, if MSG_DONTROUTE is set,
2437 we send packet, ignoring both routing tables
2438 and ifaddr state. --ANK
2441 We could make it even if oif is unknown,
2442 likely IPv6, but we do not.
2445 if (fl4->saddr == 0)
2446 fl4->saddr = inet_select_addr(dev_out, 0,
2448 res->type = RTN_UNICAST;
2455 if (res->type == RTN_LOCAL) {
2457 if (res->fi->fib_prefsrc)
2458 fl4->saddr = res->fi->fib_prefsrc;
2460 fl4->saddr = fl4->daddr;
2463 /* L3 master device is the loopback for that domain */
2464 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2467 /* make sure orig_oif points to fib result device even
2468 * though packet rx/tx happens over loopback or l3mdev
2470 orig_oif = FIB_RES_OIF(*res);
2472 fl4->flowi4_oif = dev_out->ifindex;
2473 flags |= RTCF_LOCAL;
2477 fib_select_path(net, res, fl4, skb);
2479 dev_out = FIB_RES_DEV(*res);
2480 fl4->flowi4_oif = dev_out->ifindex;
2484 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2490 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2495 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2497 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2499 return mtu ? : dst->dev->mtu;
2502 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2503 struct sk_buff *skb, u32 mtu)
2507 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2508 struct sk_buff *skb)
2512 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2518 static struct dst_ops ipv4_dst_blackhole_ops = {
2520 .check = ipv4_blackhole_dst_check,
2521 .mtu = ipv4_blackhole_mtu,
2522 .default_advmss = ipv4_default_advmss,
2523 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2524 .redirect = ipv4_rt_blackhole_redirect,
2525 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2526 .neigh_lookup = ipv4_neigh_lookup,
2529 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2531 struct rtable *ort = (struct rtable *) dst_orig;
2534 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2536 struct dst_entry *new = &rt->dst;
2539 new->input = dst_discard;
2540 new->output = dst_discard_out;
2542 new->dev = net->loopback_dev;
2546 rt->rt_is_input = ort->rt_is_input;
2547 rt->rt_iif = ort->rt_iif;
2548 rt->rt_pmtu = ort->rt_pmtu;
2549 rt->rt_mtu_locked = ort->rt_mtu_locked;
2551 rt->rt_genid = rt_genid_ipv4(net);
2552 rt->rt_flags = ort->rt_flags;
2553 rt->rt_type = ort->rt_type;
2554 rt->rt_gateway = ort->rt_gateway;
2555 rt->rt_uses_gateway = ort->rt_uses_gateway;
2557 INIT_LIST_HEAD(&rt->rt_uncached);
2560 dst_release(dst_orig);
2562 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2565 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2566 const struct sock *sk)
2568 struct rtable *rt = __ip_route_output_key(net, flp4);
2573 if (flp4->flowi4_proto)
2574 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2575 flowi4_to_flowi(flp4),
2580 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2582 /* called with rcu_read_lock held */
2583 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2584 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2587 struct rtable *rt = skb_rtable(skb);
2589 struct nlmsghdr *nlh;
2590 unsigned long expires = 0;
2592 u32 metrics[RTAX_MAX];
2594 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2598 r = nlmsg_data(nlh);
2599 r->rtm_family = AF_INET;
2600 r->rtm_dst_len = 32;
2602 r->rtm_tos = fl4->flowi4_tos;
2603 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2604 if (nla_put_u32(skb, RTA_TABLE, table_id))
2605 goto nla_put_failure;
2606 r->rtm_type = rt->rt_type;
2607 r->rtm_scope = RT_SCOPE_UNIVERSE;
2608 r->rtm_protocol = RTPROT_UNSPEC;
2609 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2610 if (rt->rt_flags & RTCF_NOTIFY)
2611 r->rtm_flags |= RTM_F_NOTIFY;
2612 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2613 r->rtm_flags |= RTCF_DOREDIRECT;
2615 if (nla_put_in_addr(skb, RTA_DST, dst))
2616 goto nla_put_failure;
2618 r->rtm_src_len = 32;
2619 if (nla_put_in_addr(skb, RTA_SRC, src))
2620 goto nla_put_failure;
2623 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2624 goto nla_put_failure;
2625 #ifdef CONFIG_IP_ROUTE_CLASSID
2626 if (rt->dst.tclassid &&
2627 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2628 goto nla_put_failure;
2630 if (!rt_is_input_route(rt) &&
2631 fl4->saddr != src) {
2632 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2633 goto nla_put_failure;
2635 if (rt->rt_uses_gateway &&
2636 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2637 goto nla_put_failure;
2639 expires = rt->dst.expires;
2641 unsigned long now = jiffies;
2643 if (time_before(now, expires))
2649 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2650 if (rt->rt_pmtu && expires)
2651 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2652 if (rt->rt_mtu_locked && expires)
2653 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2654 if (rtnetlink_put_metrics(skb, metrics) < 0)
2655 goto nla_put_failure;
2657 if (fl4->flowi4_mark &&
2658 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2659 goto nla_put_failure;
2661 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2662 nla_put_u32(skb, RTA_UID,
2663 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2664 goto nla_put_failure;
2666 error = rt->dst.error;
2668 if (rt_is_input_route(rt)) {
2669 #ifdef CONFIG_IP_MROUTE
2670 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2671 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2672 int err = ipmr_get_route(net, skb,
2673 fl4->saddr, fl4->daddr,
2679 goto nla_put_failure;
2683 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2684 goto nla_put_failure;
2687 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2688 goto nla_put_failure;
2690 nlmsg_end(skb, nlh);
2694 nlmsg_cancel(skb, nlh);
2698 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2699 struct netlink_ext_ack *extack)
2701 struct net *net = sock_net(in_skb->sk);
2703 struct nlattr *tb[RTA_MAX+1];
2704 struct fib_result res = {};
2705 struct rtable *rt = NULL;
2712 struct sk_buff *skb;
2713 u32 table_id = RT_TABLE_MAIN;
2716 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2721 rtm = nlmsg_data(nlh);
2723 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2729 /* Reserve room for dummy headers, this skb can pass
2730 through good chunk of routing engine.
2732 skb_reset_mac_header(skb);
2733 skb_reset_network_header(skb);
2735 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2736 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2737 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2738 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2740 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2742 uid = (iif ? INVALID_UID : current_uid());
2744 /* Bugfix: need to give ip_route_input enough of an IP header to
2747 ip_hdr(skb)->protocol = IPPROTO_UDP;
2748 ip_hdr(skb)->saddr = src;
2749 ip_hdr(skb)->daddr = dst;
2751 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2753 memset(&fl4, 0, sizeof(fl4));
2756 fl4.flowi4_tos = rtm->rtm_tos;
2757 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2758 fl4.flowi4_mark = mark;
2759 fl4.flowi4_uid = uid;
2764 struct net_device *dev;
2766 dev = dev_get_by_index_rcu(net, iif);
2772 skb->protocol = htons(ETH_P_IP);
2775 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2778 rt = skb_rtable(skb);
2779 if (err == 0 && rt->dst.error)
2780 err = -rt->dst.error;
2782 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2783 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2788 skb_dst_set(skb, &rt->dst);
2794 if (rtm->rtm_flags & RTM_F_NOTIFY)
2795 rt->rt_flags |= RTCF_NOTIFY;
2797 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2798 table_id = res.table ? res.table->tb_id : 0;
2800 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2802 err = fib_props[res.type].error;
2804 err = -EHOSTUNREACH;
2807 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
2808 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
2809 rt->rt_type, res.prefix, res.prefixlen,
2810 fl4.flowi4_tos, res.fi, 0);
2812 err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
2813 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2820 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2830 void ip_rt_multicast_event(struct in_device *in_dev)
2832 rt_cache_flush(dev_net(in_dev->dev));
2835 #ifdef CONFIG_SYSCTL
2836 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2837 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2838 static int ip_rt_gc_elasticity __read_mostly = 8;
2839 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2841 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2842 void __user *buffer,
2843 size_t *lenp, loff_t *ppos)
2845 struct net *net = (struct net *)__ctl->extra1;
2848 rt_cache_flush(net);
2849 fnhe_genid_bump(net);
2856 static struct ctl_table ipv4_route_table[] = {
2858 .procname = "gc_thresh",
2859 .data = &ipv4_dst_ops.gc_thresh,
2860 .maxlen = sizeof(int),
2862 .proc_handler = proc_dointvec,
2865 .procname = "max_size",
2866 .data = &ip_rt_max_size,
2867 .maxlen = sizeof(int),
2869 .proc_handler = proc_dointvec,
2872 /* Deprecated. Use gc_min_interval_ms */
2874 .procname = "gc_min_interval",
2875 .data = &ip_rt_gc_min_interval,
2876 .maxlen = sizeof(int),
2878 .proc_handler = proc_dointvec_jiffies,
2881 .procname = "gc_min_interval_ms",
2882 .data = &ip_rt_gc_min_interval,
2883 .maxlen = sizeof(int),
2885 .proc_handler = proc_dointvec_ms_jiffies,
2888 .procname = "gc_timeout",
2889 .data = &ip_rt_gc_timeout,
2890 .maxlen = sizeof(int),
2892 .proc_handler = proc_dointvec_jiffies,
2895 .procname = "gc_interval",
2896 .data = &ip_rt_gc_interval,
2897 .maxlen = sizeof(int),
2899 .proc_handler = proc_dointvec_jiffies,
2902 .procname = "redirect_load",
2903 .data = &ip_rt_redirect_load,
2904 .maxlen = sizeof(int),
2906 .proc_handler = proc_dointvec,
2909 .procname = "redirect_number",
2910 .data = &ip_rt_redirect_number,
2911 .maxlen = sizeof(int),
2913 .proc_handler = proc_dointvec,
2916 .procname = "redirect_silence",
2917 .data = &ip_rt_redirect_silence,
2918 .maxlen = sizeof(int),
2920 .proc_handler = proc_dointvec,
2923 .procname = "error_cost",
2924 .data = &ip_rt_error_cost,
2925 .maxlen = sizeof(int),
2927 .proc_handler = proc_dointvec,
2930 .procname = "error_burst",
2931 .data = &ip_rt_error_burst,
2932 .maxlen = sizeof(int),
2934 .proc_handler = proc_dointvec,
2937 .procname = "gc_elasticity",
2938 .data = &ip_rt_gc_elasticity,
2939 .maxlen = sizeof(int),
2941 .proc_handler = proc_dointvec,
2944 .procname = "mtu_expires",
2945 .data = &ip_rt_mtu_expires,
2946 .maxlen = sizeof(int),
2948 .proc_handler = proc_dointvec_jiffies,
2951 .procname = "min_pmtu",
2952 .data = &ip_rt_min_pmtu,
2953 .maxlen = sizeof(int),
2955 .proc_handler = proc_dointvec_minmax,
2956 .extra1 = &ip_min_valid_pmtu,
2959 .procname = "min_adv_mss",
2960 .data = &ip_rt_min_advmss,
2961 .maxlen = sizeof(int),
2963 .proc_handler = proc_dointvec,
2968 static struct ctl_table ipv4_route_flush_table[] = {
2970 .procname = "flush",
2971 .maxlen = sizeof(int),
2973 .proc_handler = ipv4_sysctl_rtcache_flush,
2978 static __net_init int sysctl_route_net_init(struct net *net)
2980 struct ctl_table *tbl;
2982 tbl = ipv4_route_flush_table;
2983 if (!net_eq(net, &init_net)) {
2984 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2988 /* Don't export sysctls to unprivileged users */
2989 if (net->user_ns != &init_user_ns)
2990 tbl[0].procname = NULL;
2992 tbl[0].extra1 = net;
2994 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2995 if (!net->ipv4.route_hdr)
3000 if (tbl != ipv4_route_flush_table)
3006 static __net_exit void sysctl_route_net_exit(struct net *net)
3008 struct ctl_table *tbl;
3010 tbl = net->ipv4.route_hdr->ctl_table_arg;
3011 unregister_net_sysctl_table(net->ipv4.route_hdr);
3012 BUG_ON(tbl == ipv4_route_flush_table);
3016 static __net_initdata struct pernet_operations sysctl_route_ops = {
3017 .init = sysctl_route_net_init,
3018 .exit = sysctl_route_net_exit,
3022 static __net_init int rt_genid_init(struct net *net)
3024 atomic_set(&net->ipv4.rt_genid, 0);
3025 atomic_set(&net->fnhe_genid, 0);
3026 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3030 static __net_initdata struct pernet_operations rt_genid_ops = {
3031 .init = rt_genid_init,
3034 static int __net_init ipv4_inetpeer_init(struct net *net)
3036 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3040 inet_peer_base_init(bp);
3041 net->ipv4.peers = bp;
3045 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3047 struct inet_peer_base *bp = net->ipv4.peers;
3049 net->ipv4.peers = NULL;
3050 inetpeer_invalidate_tree(bp);
3054 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3055 .init = ipv4_inetpeer_init,
3056 .exit = ipv4_inetpeer_exit,
3059 #ifdef CONFIG_IP_ROUTE_CLASSID
3060 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3061 #endif /* CONFIG_IP_ROUTE_CLASSID */
3063 int __init ip_rt_init(void)
3067 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
3069 panic("IP: failed to allocate ip_idents\n");
3071 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3073 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3075 panic("IP: failed to allocate ip_tstamps\n");
3077 for_each_possible_cpu(cpu) {
3078 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3080 INIT_LIST_HEAD(&ul->head);
3081 spin_lock_init(&ul->lock);
3083 #ifdef CONFIG_IP_ROUTE_CLASSID
3084 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3086 panic("IP: failed to allocate ip_rt_acct\n");
3089 ipv4_dst_ops.kmem_cachep =
3090 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3091 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3093 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3095 if (dst_entries_init(&ipv4_dst_ops) < 0)
3096 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3098 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3099 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3101 ipv4_dst_ops.gc_thresh = ~0;
3102 ip_rt_max_size = INT_MAX;
3107 if (ip_rt_proc_init())
3108 pr_err("Unable to create route proc files\n");
3113 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3114 RTNL_FLAG_DOIT_UNLOCKED);
3116 #ifdef CONFIG_SYSCTL
3117 register_pernet_subsys(&sysctl_route_ops);
3119 register_pernet_subsys(&rt_genid_ops);
3120 register_pernet_subsys(&ipv4_inetpeer_ops);
3124 #ifdef CONFIG_SYSCTL
3126 * We really need to sanitize the damn ipv4 init order, then all
3127 * this nonsense will go away.
3129 void __init ip_static_sysctl_init(void)
3131 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);