2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
43 #include <linux/err.h>
48 #include <net/protocol.h>
49 #include <net/ip_tunnels.h>
51 #include <net/checksum.h>
52 #include <net/dsfield.h>
53 #include <net/inet_ecn.h>
55 #include <net/net_namespace.h>
56 #include <net/netns/generic.h>
57 #include <net/rtnetlink.h>
59 #if IS_ENABLED(CONFIG_IPV6)
61 #include <net/ip6_fib.h>
62 #include <net/ip6_route.h>
65 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
67 return hash_32((__force u32)key ^ (__force u32)remote,
71 static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
72 struct dst_entry *dst)
74 struct dst_entry *old_dst;
77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
81 static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
83 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
86 static void tunnel_dst_reset(struct ip_tunnel *t)
88 tunnel_dst_set(t, NULL);
91 void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
95 for_each_possible_cpu(i)
96 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
98 EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
100 static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
102 struct dst_entry *dst;
105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
116 return (struct rtable *)dst;
119 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
120 __be16 flags, __be32 key)
122 if (p->i_flags & TUNNEL_KEY) {
123 if (flags & TUNNEL_KEY)
124 return key == p->i_key;
126 /* key expected, none present */
129 return !(flags & TUNNEL_KEY);
132 /* Fallback tunnel: no source, no destination, no key, no options
135 We require exact key match i.e. if a key is present in packet
136 it will match only tunnel with the same key; if it is not present,
137 it will match only keyless tunnel.
139 All keysless packets, if not matched configured keyless tunnels
140 will match fallback tunnel.
141 Given src, dst and key, find appropriate for input tunnel.
143 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
144 int link, __be16 flags,
145 __be32 remote, __be32 local,
149 struct ip_tunnel *t, *cand = NULL;
150 struct hlist_head *head;
152 hash = ip_tunnel_hash(key, remote);
153 head = &itn->tunnels[hash];
155 hlist_for_each_entry_rcu(t, head, hash_node) {
156 if (local != t->parms.iph.saddr ||
157 remote != t->parms.iph.daddr ||
158 !(t->dev->flags & IFF_UP))
161 if (!ip_tunnel_key_match(&t->parms, flags, key))
164 if (t->parms.link == link)
170 hlist_for_each_entry_rcu(t, head, hash_node) {
171 if (remote != t->parms.iph.daddr ||
172 !(t->dev->flags & IFF_UP))
175 if (!ip_tunnel_key_match(&t->parms, flags, key))
178 if (t->parms.link == link)
184 hash = ip_tunnel_hash(key, 0);
185 head = &itn->tunnels[hash];
187 hlist_for_each_entry_rcu(t, head, hash_node) {
188 if ((local != t->parms.iph.saddr &&
189 (local != t->parms.iph.daddr ||
190 !ipv4_is_multicast(local))) ||
191 !(t->dev->flags & IFF_UP))
194 if (!ip_tunnel_key_match(&t->parms, flags, key))
197 if (t->parms.link == link)
203 if (flags & TUNNEL_NO_KEY)
204 goto skip_key_lookup;
206 hlist_for_each_entry_rcu(t, head, hash_node) {
207 if (t->parms.i_key != key ||
208 !(t->dev->flags & IFF_UP))
211 if (t->parms.link == link)
221 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
222 return netdev_priv(itn->fb_tunnel_dev);
227 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
229 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
230 struct ip_tunnel_parm *parms)
234 __be32 i_key = parms->i_key;
236 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
237 remote = parms->iph.daddr;
241 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
244 h = ip_tunnel_hash(i_key, remote);
245 return &itn->tunnels[h];
248 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
250 struct hlist_head *head = ip_bucket(itn, &t->parms);
252 hlist_add_head_rcu(&t->hash_node, head);
255 static void ip_tunnel_del(struct ip_tunnel *t)
257 hlist_del_init_rcu(&t->hash_node);
260 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
261 struct ip_tunnel_parm *parms,
264 __be32 remote = parms->iph.daddr;
265 __be32 local = parms->iph.saddr;
266 __be32 key = parms->i_key;
267 __be16 flags = parms->i_flags;
268 int link = parms->link;
269 struct ip_tunnel *t = NULL;
270 struct hlist_head *head = ip_bucket(itn, parms);
272 hlist_for_each_entry_rcu(t, head, hash_node) {
273 if (local == t->parms.iph.saddr &&
274 remote == t->parms.iph.daddr &&
275 link == t->parms.link &&
276 type == t->dev->type &&
277 ip_tunnel_key_match(&t->parms, flags, key))
283 static struct net_device *__ip_tunnel_create(struct net *net,
284 const struct rtnl_link_ops *ops,
285 struct ip_tunnel_parm *parms)
288 struct ip_tunnel *tunnel;
289 struct net_device *dev;
293 strlcpy(name, parms->name, IFNAMSIZ);
295 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
299 strlcpy(name, ops->kind, IFNAMSIZ);
300 strncat(name, "%d", 2);
304 dev = alloc_netdev(ops->priv_size, name, ops->setup);
309 dev_net_set(dev, net);
311 dev->rtnl_link_ops = ops;
313 tunnel = netdev_priv(dev);
314 tunnel->parms = *parms;
317 err = register_netdevice(dev);
329 static inline void init_tunnel_flow(struct flowi4 *fl4,
331 __be32 daddr, __be32 saddr,
332 __be32 key, __u8 tos, int oif)
334 memset(fl4, 0, sizeof(*fl4));
335 fl4->flowi4_oif = oif;
338 fl4->flowi4_tos = tos;
339 fl4->flowi4_proto = proto;
340 fl4->fl4_gre_key = key;
343 static int ip_tunnel_bind_dev(struct net_device *dev)
345 struct net_device *tdev = NULL;
346 struct ip_tunnel *tunnel = netdev_priv(dev);
347 const struct iphdr *iph;
348 int hlen = LL_MAX_HEADER;
349 int mtu = ETH_DATA_LEN;
350 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
352 iph = &tunnel->parms.iph;
354 /* Guess output device to choose reasonable mtu and needed_headroom */
359 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
360 iph->saddr, tunnel->parms.o_key,
361 RT_TOS(iph->tos), tunnel->parms.link);
362 rt = ip_route_output_key(tunnel->net, &fl4);
366 tunnel_dst_set(tunnel, &rt->dst);
369 if (dev->type != ARPHRD_ETHER)
370 dev->flags |= IFF_POINTOPOINT;
373 if (!tdev && tunnel->parms.link)
374 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
377 hlen = tdev->hard_header_len + tdev->needed_headroom;
380 dev->iflink = tunnel->parms.link;
382 dev->needed_headroom = t_hlen + hlen;
383 mtu -= (dev->hard_header_len + t_hlen);
391 static struct ip_tunnel *ip_tunnel_create(struct net *net,
392 struct ip_tunnel_net *itn,
393 struct ip_tunnel_parm *parms)
395 struct ip_tunnel *nt;
396 struct net_device *dev;
398 BUG_ON(!itn->fb_tunnel_dev);
399 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
401 return ERR_CAST(dev);
403 dev->mtu = ip_tunnel_bind_dev(dev);
405 nt = netdev_priv(dev);
406 ip_tunnel_add(itn, nt);
410 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
411 const struct tnl_ptk_info *tpi, bool log_ecn_error)
413 struct pcpu_sw_netstats *tstats;
414 const struct iphdr *iph = ip_hdr(skb);
417 #ifdef CONFIG_NET_IPGRE_BROADCAST
418 if (ipv4_is_multicast(iph->daddr)) {
419 tunnel->dev->stats.multicast++;
420 skb->pkt_type = PACKET_BROADCAST;
424 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
425 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
426 tunnel->dev->stats.rx_crc_errors++;
427 tunnel->dev->stats.rx_errors++;
431 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
432 if (!(tpi->flags&TUNNEL_SEQ) ||
433 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
434 tunnel->dev->stats.rx_fifo_errors++;
435 tunnel->dev->stats.rx_errors++;
438 tunnel->i_seqno = ntohl(tpi->seq) + 1;
441 skb_reset_network_header(skb);
443 err = IP_ECN_decapsulate(iph, skb);
446 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
447 &iph->saddr, iph->tos);
449 ++tunnel->dev->stats.rx_frame_errors;
450 ++tunnel->dev->stats.rx_errors;
455 tstats = this_cpu_ptr(tunnel->dev->tstats);
456 u64_stats_update_begin(&tstats->syncp);
457 tstats->rx_packets++;
458 tstats->rx_bytes += skb->len;
459 u64_stats_update_end(&tstats->syncp);
461 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
463 if (tunnel->dev->type == ARPHRD_ETHER) {
464 skb->protocol = eth_type_trans(skb, tunnel->dev);
465 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
467 skb->dev = tunnel->dev;
470 gro_cells_receive(&tunnel->gro_cells, skb);
477 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
479 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
480 struct rtable *rt, __be16 df)
482 struct ip_tunnel *tunnel = netdev_priv(dev);
483 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
487 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
488 - sizeof(struct iphdr) - tunnel->hlen;
490 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
493 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
495 if (skb->protocol == htons(ETH_P_IP)) {
496 if (!skb_is_gso(skb) &&
497 (df & htons(IP_DF)) && mtu < pkt_size) {
498 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
499 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
503 #if IS_ENABLED(CONFIG_IPV6)
504 else if (skb->protocol == htons(ETH_P_IPV6)) {
505 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
507 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
508 mtu >= IPV6_MIN_MTU) {
509 if ((tunnel->parms.iph.daddr &&
510 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
511 rt6->rt6i_dst.plen == 128) {
512 rt6->rt6i_flags |= RTF_MODIFIED;
513 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
517 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
519 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
527 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
528 const struct iphdr *tnl_params, const u8 protocol)
530 struct ip_tunnel *tunnel = netdev_priv(dev);
531 const struct iphdr *inner_iph;
535 struct rtable *rt; /* Route to the other host */
536 unsigned int max_headroom; /* The extra header space needed */
541 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
542 connected = (tunnel->parms.iph.daddr != 0);
544 dst = tnl_params->daddr;
548 if (skb_dst(skb) == NULL) {
549 dev->stats.tx_fifo_errors++;
553 if (skb->protocol == htons(ETH_P_IP)) {
554 rt = skb_rtable(skb);
555 dst = rt_nexthop(rt, inner_iph->daddr);
557 #if IS_ENABLED(CONFIG_IPV6)
558 else if (skb->protocol == htons(ETH_P_IPV6)) {
559 const struct in6_addr *addr6;
560 struct neighbour *neigh;
561 bool do_tx_error_icmp;
564 neigh = dst_neigh_lookup(skb_dst(skb),
565 &ipv6_hdr(skb)->daddr);
569 addr6 = (const struct in6_addr *)&neigh->primary_key;
570 addr_type = ipv6_addr_type(addr6);
572 if (addr_type == IPV6_ADDR_ANY) {
573 addr6 = &ipv6_hdr(skb)->daddr;
574 addr_type = ipv6_addr_type(addr6);
577 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
578 do_tx_error_icmp = true;
580 do_tx_error_icmp = false;
581 dst = addr6->s6_addr32[3];
583 neigh_release(neigh);
584 if (do_tx_error_icmp)
594 tos = tnl_params->tos;
597 if (skb->protocol == htons(ETH_P_IP)) {
598 tos = inner_iph->tos;
600 } else if (skb->protocol == htons(ETH_P_IPV6)) {
601 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
606 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
607 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
609 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
612 rt = ip_route_output_key(tunnel->net, &fl4);
615 dev->stats.tx_carrier_errors++;
619 tunnel_dst_set(tunnel, &rt->dst);
622 if (rt->dst.dev == dev) {
624 dev->stats.collisions++;
628 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
633 if (tunnel->err_count > 0) {
634 if (time_before(jiffies,
635 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
638 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
639 dst_link_failure(skb);
641 tunnel->err_count = 0;
644 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
645 ttl = tnl_params->ttl;
647 if (skb->protocol == htons(ETH_P_IP))
648 ttl = inner_iph->ttl;
649 #if IS_ENABLED(CONFIG_IPV6)
650 else if (skb->protocol == htons(ETH_P_IPV6))
651 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
654 ttl = ip4_dst_hoplimit(&rt->dst);
657 df = tnl_params->frag_off;
658 if (skb->protocol == htons(ETH_P_IP))
659 df |= (inner_iph->frag_off&htons(IP_DF));
661 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
662 + rt->dst.header_len;
663 if (max_headroom > dev->needed_headroom)
664 dev->needed_headroom = max_headroom;
666 if (skb_cow_head(skb, dev->needed_headroom)) {
668 dev->stats.tx_dropped++;
673 err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
674 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
675 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
679 #if IS_ENABLED(CONFIG_IPV6)
681 dst_link_failure(skb);
684 dev->stats.tx_errors++;
687 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
689 static void ip_tunnel_update(struct ip_tunnel_net *itn,
691 struct net_device *dev,
692 struct ip_tunnel_parm *p,
696 t->parms.iph.saddr = p->iph.saddr;
697 t->parms.iph.daddr = p->iph.daddr;
698 t->parms.i_key = p->i_key;
699 t->parms.o_key = p->o_key;
700 if (dev->type != ARPHRD_ETHER) {
701 memcpy(dev->dev_addr, &p->iph.saddr, 4);
702 memcpy(dev->broadcast, &p->iph.daddr, 4);
704 ip_tunnel_add(itn, t);
706 t->parms.iph.ttl = p->iph.ttl;
707 t->parms.iph.tos = p->iph.tos;
708 t->parms.iph.frag_off = p->iph.frag_off;
710 if (t->parms.link != p->link) {
713 t->parms.link = p->link;
714 mtu = ip_tunnel_bind_dev(dev);
718 ip_tunnel_dst_reset_all(t);
719 netdev_state_change(dev);
722 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
725 struct ip_tunnel *t = netdev_priv(dev);
726 struct net *net = t->net;
727 struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
729 BUG_ON(!itn->fb_tunnel_dev);
732 if (dev == itn->fb_tunnel_dev) {
733 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
735 t = netdev_priv(dev);
737 memcpy(p, &t->parms, sizeof(*p));
743 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
746 p->iph.frag_off |= htons(IP_DF);
747 if (!(p->i_flags & VTI_ISVTI)) {
748 if (!(p->i_flags & TUNNEL_KEY))
750 if (!(p->o_flags & TUNNEL_KEY))
754 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
756 if (!t && (cmd == SIOCADDTUNNEL)) {
757 t = ip_tunnel_create(net, itn, p);
758 err = PTR_ERR_OR_ZERO(t);
761 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
768 unsigned int nflags = 0;
770 if (ipv4_is_multicast(p->iph.daddr))
771 nflags = IFF_BROADCAST;
772 else if (p->iph.daddr)
773 nflags = IFF_POINTOPOINT;
775 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
780 t = netdev_priv(dev);
786 ip_tunnel_update(itn, t, dev, p, true);
794 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
797 if (dev == itn->fb_tunnel_dev) {
799 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
803 if (t == netdev_priv(itn->fb_tunnel_dev))
807 unregister_netdevice(dev);
818 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
820 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
822 struct ip_tunnel *tunnel = netdev_priv(dev);
823 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
826 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
831 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
833 static void ip_tunnel_dev_free(struct net_device *dev)
835 struct ip_tunnel *tunnel = netdev_priv(dev);
837 gro_cells_destroy(&tunnel->gro_cells);
838 free_percpu(tunnel->dst_cache);
839 free_percpu(dev->tstats);
843 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
845 struct ip_tunnel *tunnel = netdev_priv(dev);
846 struct ip_tunnel_net *itn;
848 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
850 if (itn->fb_tunnel_dev != dev) {
851 ip_tunnel_del(netdev_priv(dev));
852 unregister_netdevice_queue(dev, head);
855 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
857 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
858 struct rtnl_link_ops *ops, char *devname)
860 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
861 struct ip_tunnel_parm parms;
864 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
865 INIT_HLIST_HEAD(&itn->tunnels[i]);
868 itn->fb_tunnel_dev = NULL;
872 memset(&parms, 0, sizeof(parms));
874 strlcpy(parms.name, devname, IFNAMSIZ);
877 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
878 /* FB netdevice is special: we have one, and only one per netns.
879 * Allowing to move it to another netns is clearly unsafe.
881 if (!IS_ERR(itn->fb_tunnel_dev)) {
882 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
883 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
884 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
888 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
890 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
892 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
893 struct rtnl_link_ops *ops)
895 struct net *net = dev_net(itn->fb_tunnel_dev);
896 struct net_device *dev, *aux;
899 for_each_netdev_safe(net, dev, aux)
900 if (dev->rtnl_link_ops == ops)
901 unregister_netdevice_queue(dev, head);
903 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
905 struct hlist_node *n;
906 struct hlist_head *thead = &itn->tunnels[h];
908 hlist_for_each_entry_safe(t, n, thead, hash_node)
909 /* If dev is in the same netns, it has already
910 * been added to the list by the previous loop.
912 if (!net_eq(dev_net(t->dev), net))
913 unregister_netdevice_queue(t->dev, head);
917 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
922 ip_tunnel_destroy(itn, &list, ops);
923 unregister_netdevice_many(&list);
926 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
928 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
929 struct ip_tunnel_parm *p)
931 struct ip_tunnel *nt;
932 struct net *net = dev_net(dev);
933 struct ip_tunnel_net *itn;
937 nt = netdev_priv(dev);
938 itn = net_generic(net, nt->ip_tnl_net_id);
940 if (ip_tunnel_find(itn, p, dev->type))
945 err = register_netdevice(dev);
949 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
950 eth_hw_addr_random(dev);
952 mtu = ip_tunnel_bind_dev(dev);
956 ip_tunnel_add(itn, nt);
961 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
963 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
964 struct ip_tunnel_parm *p)
967 struct ip_tunnel *tunnel = netdev_priv(dev);
968 struct net *net = tunnel->net;
969 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
971 if (dev == itn->fb_tunnel_dev)
974 t = ip_tunnel_find(itn, p, dev->type);
982 if (dev->type != ARPHRD_ETHER) {
983 unsigned int nflags = 0;
985 if (ipv4_is_multicast(p->iph.daddr))
986 nflags = IFF_BROADCAST;
987 else if (p->iph.daddr)
988 nflags = IFF_POINTOPOINT;
990 if ((dev->flags ^ nflags) &
991 (IFF_POINTOPOINT | IFF_BROADCAST))
996 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
999 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1001 int ip_tunnel_init(struct net_device *dev)
1003 struct ip_tunnel *tunnel = netdev_priv(dev);
1004 struct iphdr *iph = &tunnel->parms.iph;
1007 dev->destructor = ip_tunnel_dev_free;
1008 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1012 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1013 if (!tunnel->dst_cache) {
1014 free_percpu(dev->tstats);
1018 err = gro_cells_init(&tunnel->gro_cells, dev);
1020 free_percpu(tunnel->dst_cache);
1021 free_percpu(dev->tstats);
1026 tunnel->net = dev_net(dev);
1027 strcpy(tunnel->parms.name, dev->name);
1033 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1035 void ip_tunnel_uninit(struct net_device *dev)
1037 struct ip_tunnel *tunnel = netdev_priv(dev);
1038 struct net *net = tunnel->net;
1039 struct ip_tunnel_net *itn;
1041 itn = net_generic(net, tunnel->ip_tnl_net_id);
1042 /* fb_tunnel_dev will be unregisted in net-exit call. */
1043 if (itn->fb_tunnel_dev != dev)
1044 ip_tunnel_del(netdev_priv(dev));
1046 ip_tunnel_dst_reset_all(tunnel);
1048 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1050 /* Do least required initialization, rest of init is done in tunnel_init call */
1051 void ip_tunnel_setup(struct net_device *dev, int net_id)
1053 struct ip_tunnel *tunnel = netdev_priv(dev);
1054 tunnel->ip_tnl_net_id = net_id;
1056 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1058 MODULE_LICENSE("GPL");