2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <asm/uaccess.h>
19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h>
22 #include <linux/tcp.h>
23 #include <linux/udp.h>
24 #include <linux/if_arp.h>
25 #include <linux/mroute.h>
26 #include <linux/init.h>
27 #include <linux/in6.h>
28 #include <linux/inetdevice.h>
29 #include <linux/igmp.h>
30 #include <linux/netfilter_ipv4.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
37 #include <net/protocol.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
49 #if IS_ENABLED(CONFIG_IPV6)
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
59 1. The most important issue is detecting local dead loops.
60 They would cause complete host lockup in transmit, which
61 would be "resolved" by stack overflow or, if queueing is enabled,
62 with infinite looping in net_bh.
64 We cannot track such dead loops during route installation,
65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaining new variable in ALL
69 skb, even if no tunneling is used.
71 Current solution: xmit_recursion breaks dead loops. This is a percpu
72 counter, since when we enter the first ndo_xmit(), cpu migration is
73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
75 2. Networking dead loops would not kill routers, but would really
76 kill network. IP hop limit plays role of "t->recursion" in this case,
77 if we copy it from packet being encapsulated to upper header.
78 It is very good solution, but it introduces two problems:
80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
81 do not work over tunnels.
82 - traceroute does not work. I planned to relay ICMP from tunnel,
83 so that this problem would be solved and traceroute output
84 would even more informative. This idea appeared to be wrong:
85 only Linux complies to rfc1812 now (yes, guys, Linux is the only
86 true router now :-)), all routers (at least, in neighbourhood of mine)
87 return only 8 bytes of payload. It is the end.
89 Hence, if we want that OSPF worked or traceroute said something reasonable,
90 we should search for another solution.
92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, ttl is not solution at all.
96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 rapidly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made
105 all that we could make. Even if it is your gated who injected
106 fatal route to network, even if it were you who configured
107 fatal static route: you are innocent. :-)
111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
112 practically identical code. It would be good to glue them
113 together, but it is not very evident, how to make them modular.
114 sit is integral part of IPv6, ipip and gre are naturally modular.
115 We could extract common parts (hash table, ioctl etc)
116 to a separate module (ip_tunnel.c).
121 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
122 static int ipgre_tunnel_init(struct net_device *dev);
123 static void ipgre_tunnel_setup(struct net_device *dev);
124 static int ipgre_tunnel_bind_dev(struct net_device *dev);
126 /* Fallback tunnel: no source, no destination, no key, no options */
130 static int ipgre_net_id __read_mostly;
132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
134 struct net_device *fb_tunnel_dev;
137 /* Tunnel hash table */
147 We require exact key match i.e. if a key is present in packet
148 it will match only tunnel with the same key; if it is not present,
149 it will match only keyless tunnel.
151 All keysless packets, if not matched configured keyless tunnels
152 will match fallback tunnel.
155 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
157 #define tunnels_r_l tunnels[3]
158 #define tunnels_r tunnels[2]
159 #define tunnels_l tunnels[1]
160 #define tunnels_wc tunnels[0]
162 * Locking : hash tables are protected by RCU and RTNL
165 #define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
168 /* often modified stats are per cpu, other are shared (netdev->stats) */
170 unsigned long rx_packets;
171 unsigned long rx_bytes;
172 unsigned long tx_packets;
173 unsigned long tx_bytes;
174 } __attribute__((aligned(4*sizeof(unsigned long))));
176 static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
178 struct pcpu_tstats sum = { 0 };
181 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
184 sum.rx_packets += tstats->rx_packets;
185 sum.rx_bytes += tstats->rx_bytes;
186 sum.tx_packets += tstats->tx_packets;
187 sum.tx_bytes += tstats->tx_bytes;
189 dev->stats.rx_packets = sum.rx_packets;
190 dev->stats.rx_bytes = sum.rx_bytes;
191 dev->stats.tx_packets = sum.tx_packets;
192 dev->stats.tx_bytes = sum.tx_bytes;
196 /* Given src, dst and key, find appropriate for input tunnel. */
198 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
199 __be32 remote, __be32 local,
200 __be32 key, __be16 gre_proto)
202 struct net *net = dev_net(dev);
203 int link = dev->ifindex;
204 unsigned int h0 = HASH(remote);
205 unsigned int h1 = HASH(key);
206 struct ip_tunnel *t, *cand = NULL;
207 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
208 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
209 ARPHRD_ETHER : ARPHRD_IPGRE;
210 int score, cand_score = 4;
212 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
213 if (local != t->parms.iph.saddr ||
214 remote != t->parms.iph.daddr ||
215 key != t->parms.i_key ||
216 !(t->dev->flags & IFF_UP))
219 if (t->dev->type != ARPHRD_IPGRE &&
220 t->dev->type != dev_type)
224 if (t->parms.link != link)
226 if (t->dev->type != dev_type)
231 if (score < cand_score) {
237 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
238 if (remote != t->parms.iph.daddr ||
239 key != t->parms.i_key ||
240 !(t->dev->flags & IFF_UP))
243 if (t->dev->type != ARPHRD_IPGRE &&
244 t->dev->type != dev_type)
248 if (t->parms.link != link)
250 if (t->dev->type != dev_type)
255 if (score < cand_score) {
261 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
262 if ((local != t->parms.iph.saddr &&
263 (local != t->parms.iph.daddr ||
264 !ipv4_is_multicast(local))) ||
265 key != t->parms.i_key ||
266 !(t->dev->flags & IFF_UP))
269 if (t->dev->type != ARPHRD_IPGRE &&
270 t->dev->type != dev_type)
274 if (t->parms.link != link)
276 if (t->dev->type != dev_type)
281 if (score < cand_score) {
287 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
288 if (t->parms.i_key != key ||
289 !(t->dev->flags & IFF_UP))
292 if (t->dev->type != ARPHRD_IPGRE &&
293 t->dev->type != dev_type)
297 if (t->parms.link != link)
299 if (t->dev->type != dev_type)
304 if (score < cand_score) {
313 dev = ign->fb_tunnel_dev;
314 if (dev->flags & IFF_UP)
315 return netdev_priv(dev);
320 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
321 struct ip_tunnel_parm *parms)
323 __be32 remote = parms->iph.daddr;
324 __be32 local = parms->iph.saddr;
325 __be32 key = parms->i_key;
326 unsigned int h = HASH(key);
331 if (remote && !ipv4_is_multicast(remote)) {
336 return &ign->tunnels[prio][h];
339 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
342 return __ipgre_bucket(ign, &t->parms);
345 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
347 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
349 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
350 rcu_assign_pointer(*tp, t);
353 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
355 struct ip_tunnel __rcu **tp;
356 struct ip_tunnel *iter;
358 for (tp = ipgre_bucket(ign, t);
359 (iter = rtnl_dereference(*tp)) != NULL;
362 rcu_assign_pointer(*tp, t->next);
368 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
369 struct ip_tunnel_parm *parms,
372 __be32 remote = parms->iph.daddr;
373 __be32 local = parms->iph.saddr;
374 __be32 key = parms->i_key;
375 int link = parms->link;
377 struct ip_tunnel __rcu **tp;
378 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
380 for (tp = __ipgre_bucket(ign, parms);
381 (t = rtnl_dereference(*tp)) != NULL;
383 if (local == t->parms.iph.saddr &&
384 remote == t->parms.iph.daddr &&
385 key == t->parms.i_key &&
386 link == t->parms.link &&
387 type == t->dev->type)
393 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
394 struct ip_tunnel_parm *parms, int create)
396 struct ip_tunnel *t, *nt;
397 struct net_device *dev;
399 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
401 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
406 strlcpy(name, parms->name, IFNAMSIZ);
408 strcpy(name, "gre%d");
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
414 dev_net_set(dev, net);
416 nt = netdev_priv(dev);
418 dev->rtnl_link_ops = &ipgre_link_ops;
420 dev->mtu = ipgre_tunnel_bind_dev(dev);
422 if (register_netdevice(dev) < 0)
425 /* Can use a lockless transmit, unless we generate output sequences */
426 if (!(nt->parms.o_flags & GRE_SEQ))
427 dev->features |= NETIF_F_LLTX;
430 ipgre_tunnel_link(ign, nt);
438 static void ipgre_tunnel_uninit(struct net_device *dev)
440 struct net *net = dev_net(dev);
441 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
443 ipgre_tunnel_unlink(ign, netdev_priv(dev));
448 static void ipgre_err(struct sk_buff *skb, u32 info)
451 /* All the routers (except for Linux) return only
452 8 bytes of packet payload. It means, that precise relaying of
453 ICMP in the real Internet is absolutely infeasible.
455 Moreover, Cisco "wise men" put GRE key to the third word
456 in GRE header. It makes impossible maintaining even soft state for keyed
457 GRE tunnels with enabled checksum. Tell them "thank you".
459 Well, I wonder, rfc1812 was written by Cisco employee,
460 what the hell these idiots break standards established
464 const struct iphdr *iph = (const struct iphdr *)skb->data;
465 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
466 int grehlen = (iph->ihl<<2) + 4;
467 const int type = icmp_hdr(skb)->type;
468 const int code = icmp_hdr(skb)->code;
473 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
474 if (flags&(GRE_VERSION|GRE_ROUTING))
483 /* If only 8 bytes returned, keyed message will be dropped here */
484 if (skb_headlen(skb) < grehlen)
489 case ICMP_PARAMETERPROB:
492 case ICMP_DEST_UNREACH:
495 case ICMP_PORT_UNREACH:
496 /* Impossible event. */
498 case ICMP_FRAG_NEEDED:
499 /* Soft state for pmtu is maintained by IP core. */
502 /* All others are translated to HOST_UNREACH.
503 rfc2003 contains "deep thoughts" about NET_UNREACH,
504 I believe they are just ether pollution. --ANK
509 case ICMP_TIME_EXCEEDED:
510 if (code != ICMP_EXC_TTL)
516 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
518 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
520 if (t == NULL || t->parms.iph.daddr == 0 ||
521 ipv4_is_multicast(t->parms.iph.daddr))
524 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
527 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
531 t->err_time = jiffies;
536 static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
538 if (INET_ECN_is_ce(iph->tos)) {
539 if (skb->protocol == htons(ETH_P_IP)) {
540 IP_ECN_set_ce(ip_hdr(skb));
541 } else if (skb->protocol == htons(ETH_P_IPV6)) {
542 IP6_ECN_set_ce(ipv6_hdr(skb));
548 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
551 if (skb->protocol == htons(ETH_P_IP))
552 inner = old_iph->tos;
553 else if (skb->protocol == htons(ETH_P_IPV6))
554 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
555 return INET_ECN_encapsulate(tos, inner);
558 static int ipgre_rcv(struct sk_buff *skb)
560 const struct iphdr *iph;
566 struct ip_tunnel *tunnel;
570 if (!pskb_may_pull(skb, 16))
577 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
578 /* - Version must be 0.
579 - We do not support routing headers.
581 if (flags&(GRE_VERSION|GRE_ROUTING))
584 if (flags&GRE_CSUM) {
585 switch (skb->ip_summed) {
586 case CHECKSUM_COMPLETE:
587 csum = csum_fold(skb->csum);
593 csum = __skb_checksum_complete(skb);
594 skb->ip_summed = CHECKSUM_COMPLETE;
599 key = *(__be32*)(h + offset);
603 seqno = ntohl(*(__be32*)(h + offset));
608 gre_proto = *(__be16 *)(h + 2);
611 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
612 iph->saddr, iph->daddr, key,
614 struct pcpu_tstats *tstats;
618 skb->protocol = gre_proto;
619 /* WCCP version 1 and 2 protocol decoding.
620 * - Change protocol to IP
621 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
623 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
624 skb->protocol = htons(ETH_P_IP);
625 if ((*(h + offset) & 0xF0) != 0x40)
629 skb->mac_header = skb->network_header;
630 __pskb_pull(skb, offset);
631 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
632 skb->pkt_type = PACKET_HOST;
633 #ifdef CONFIG_NET_IPGRE_BROADCAST
634 if (ipv4_is_multicast(iph->daddr)) {
635 /* Looped back packet, drop it! */
636 if (rt_is_output_route(skb_rtable(skb)))
638 tunnel->dev->stats.multicast++;
639 skb->pkt_type = PACKET_BROADCAST;
643 if (((flags&GRE_CSUM) && csum) ||
644 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
645 tunnel->dev->stats.rx_crc_errors++;
646 tunnel->dev->stats.rx_errors++;
649 if (tunnel->parms.i_flags&GRE_SEQ) {
650 if (!(flags&GRE_SEQ) ||
651 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
652 tunnel->dev->stats.rx_fifo_errors++;
653 tunnel->dev->stats.rx_errors++;
656 tunnel->i_seqno = seqno + 1;
659 /* Warning: All skb pointers will be invalidated! */
660 if (tunnel->dev->type == ARPHRD_ETHER) {
661 if (!pskb_may_pull(skb, ETH_HLEN)) {
662 tunnel->dev->stats.rx_length_errors++;
663 tunnel->dev->stats.rx_errors++;
668 skb->protocol = eth_type_trans(skb, tunnel->dev);
669 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
672 tstats = this_cpu_ptr(tunnel->dev->tstats);
673 tstats->rx_packets++;
674 tstats->rx_bytes += skb->len;
676 __skb_tunnel_rx(skb, tunnel->dev);
678 skb_reset_network_header(skb);
679 ipgre_ecn_decapsulate(iph, skb);
686 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
695 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
697 struct ip_tunnel *tunnel = netdev_priv(dev);
698 struct pcpu_tstats *tstats;
699 const struct iphdr *old_iph = ip_hdr(skb);
700 const struct iphdr *tiph;
704 struct rtable *rt; /* Route to the other host */
705 struct net_device *tdev; /* Device to other host */
706 struct iphdr *iph; /* Our new IP header */
707 unsigned int max_headroom; /* The extra header space needed */
712 if (dev->type == ARPHRD_ETHER)
713 IPCB(skb)->flags = 0;
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
717 tiph = (const struct iphdr *)skb->data;
719 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph;
723 if ((dst = tiph->daddr) == 0) {
726 if (skb_dst(skb) == NULL) {
727 dev->stats.tx_fifo_errors++;
731 if (skb->protocol == htons(ETH_P_IP)) {
732 rt = skb_rtable(skb);
733 dst = rt->rt_gateway;
735 #if IS_ENABLED(CONFIG_IPV6)
736 else if (skb->protocol == htons(ETH_P_IPV6)) {
737 const struct in6_addr *addr6;
738 struct neighbour *neigh;
739 bool do_tx_error_icmp;
742 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
746 addr6 = (const struct in6_addr *)&neigh->primary_key;
747 addr_type = ipv6_addr_type(addr6);
749 if (addr_type == IPV6_ADDR_ANY) {
750 addr6 = &ipv6_hdr(skb)->daddr;
751 addr_type = ipv6_addr_type(addr6);
754 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
755 do_tx_error_icmp = true;
757 do_tx_error_icmp = false;
758 dst = addr6->s6_addr32[3];
760 neigh_release(neigh);
761 if (do_tx_error_icmp)
772 if (skb->protocol == htons(ETH_P_IP))
774 else if (skb->protocol == htons(ETH_P_IPV6))
775 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
778 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
779 tunnel->parms.o_key, RT_TOS(tos),
782 dev->stats.tx_carrier_errors++;
789 dev->stats.collisions++;
795 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
797 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
800 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
802 if (skb->protocol == htons(ETH_P_IP)) {
803 df |= (old_iph->frag_off&htons(IP_DF));
805 if ((old_iph->frag_off&htons(IP_DF)) &&
806 mtu < ntohs(old_iph->tot_len)) {
807 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
812 #if IS_ENABLED(CONFIG_IPV6)
813 else if (skb->protocol == htons(ETH_P_IPV6)) {
814 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
816 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
817 if ((tunnel->parms.iph.daddr &&
818 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
819 rt6->rt6i_dst.plen == 128) {
820 rt6->rt6i_flags |= RTF_MODIFIED;
821 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
825 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
826 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
833 if (tunnel->err_count > 0) {
834 if (time_before(jiffies,
835 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
838 dst_link_failure(skb);
840 tunnel->err_count = 0;
843 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
845 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
846 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
847 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
848 if (max_headroom > dev->needed_headroom)
849 dev->needed_headroom = max_headroom;
852 dev->stats.tx_dropped++;
857 skb_set_owner_w(new_skb, skb->sk);
860 old_iph = ip_hdr(skb);
863 skb_reset_transport_header(skb);
864 skb_push(skb, gre_hlen);
865 skb_reset_network_header(skb);
866 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
867 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
870 skb_dst_set(skb, &rt->dst);
873 * Push down and install the IPIP header.
878 iph->ihl = sizeof(struct iphdr) >> 2;
880 iph->protocol = IPPROTO_GRE;
881 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
882 iph->daddr = fl4.daddr;
883 iph->saddr = fl4.saddr;
885 if ((iph->ttl = tiph->ttl) == 0) {
886 if (skb->protocol == htons(ETH_P_IP))
887 iph->ttl = old_iph->ttl;
888 #if IS_ENABLED(CONFIG_IPV6)
889 else if (skb->protocol == htons(ETH_P_IPV6))
890 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
893 iph->ttl = ip4_dst_hoplimit(&rt->dst);
896 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
897 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
898 htons(ETH_P_TEB) : skb->protocol;
900 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
901 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
903 if (tunnel->parms.o_flags&GRE_SEQ) {
905 *ptr = htonl(tunnel->o_seqno);
908 if (tunnel->parms.o_flags&GRE_KEY) {
909 *ptr = tunnel->parms.o_key;
912 if (tunnel->parms.o_flags&GRE_CSUM) {
914 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
919 tstats = this_cpu_ptr(dev->tstats);
920 __IPTUNNEL_XMIT(tstats, &dev->stats);
923 #if IS_ENABLED(CONFIG_IPV6)
925 dst_link_failure(skb);
928 dev->stats.tx_errors++;
933 static int ipgre_tunnel_bind_dev(struct net_device *dev)
935 struct net_device *tdev = NULL;
936 struct ip_tunnel *tunnel;
937 const struct iphdr *iph;
938 int hlen = LL_MAX_HEADER;
939 int mtu = ETH_DATA_LEN;
940 int addend = sizeof(struct iphdr) + 4;
942 tunnel = netdev_priv(dev);
943 iph = &tunnel->parms.iph;
945 /* Guess output device to choose reasonable mtu and needed_headroom */
951 rt = ip_route_output_gre(dev_net(dev), &fl4,
952 iph->daddr, iph->saddr,
961 if (dev->type != ARPHRD_ETHER)
962 dev->flags |= IFF_POINTOPOINT;
965 if (!tdev && tunnel->parms.link)
966 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
969 hlen = tdev->hard_header_len + tdev->needed_headroom;
972 dev->iflink = tunnel->parms.link;
974 /* Precalculate GRE options length */
975 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
976 if (tunnel->parms.o_flags&GRE_CSUM)
978 if (tunnel->parms.o_flags&GRE_KEY)
980 if (tunnel->parms.o_flags&GRE_SEQ)
983 dev->needed_headroom = addend + hlen;
984 mtu -= dev->hard_header_len + addend;
989 tunnel->hlen = addend;
995 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
998 struct ip_tunnel_parm p;
1000 struct net *net = dev_net(dev);
1001 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1006 if (dev == ign->fb_tunnel_dev) {
1007 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1011 t = ipgre_tunnel_locate(net, &p, 0);
1014 t = netdev_priv(dev);
1015 memcpy(&p, &t->parms, sizeof(p));
1016 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1023 if (!capable(CAP_NET_ADMIN))
1027 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1031 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1032 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1033 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1036 p.iph.frag_off |= htons(IP_DF);
1038 if (!(p.i_flags&GRE_KEY))
1040 if (!(p.o_flags&GRE_KEY))
1043 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1045 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1047 if (t->dev != dev) {
1052 unsigned int nflags = 0;
1054 t = netdev_priv(dev);
1056 if (ipv4_is_multicast(p.iph.daddr))
1057 nflags = IFF_BROADCAST;
1058 else if (p.iph.daddr)
1059 nflags = IFF_POINTOPOINT;
1061 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1065 ipgre_tunnel_unlink(ign, t);
1067 t->parms.iph.saddr = p.iph.saddr;
1068 t->parms.iph.daddr = p.iph.daddr;
1069 t->parms.i_key = p.i_key;
1070 t->parms.o_key = p.o_key;
1071 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1072 memcpy(dev->broadcast, &p.iph.daddr, 4);
1073 ipgre_tunnel_link(ign, t);
1074 netdev_state_change(dev);
1080 if (cmd == SIOCCHGTUNNEL) {
1081 t->parms.iph.ttl = p.iph.ttl;
1082 t->parms.iph.tos = p.iph.tos;
1083 t->parms.iph.frag_off = p.iph.frag_off;
1084 if (t->parms.link != p.link) {
1085 t->parms.link = p.link;
1086 dev->mtu = ipgre_tunnel_bind_dev(dev);
1087 netdev_state_change(dev);
1090 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1093 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1098 if (!capable(CAP_NET_ADMIN))
1101 if (dev == ign->fb_tunnel_dev) {
1103 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1106 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1109 if (t == netdev_priv(ign->fb_tunnel_dev))
1113 unregister_netdevice(dev);
1125 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1127 struct ip_tunnel *tunnel = netdev_priv(dev);
1129 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1135 /* Nice toy. Unfortunately, useless in real life :-)
1136 It allows to construct virtual multiprotocol broadcast "LAN"
1137 over the Internet, provided multicast routing is tuned.
1140 I have no idea was this bicycle invented before me,
1141 so that I had to set ARPHRD_IPGRE to a random value.
1142 I have an impression, that Cisco could make something similar,
1143 but this feature is apparently missing in IOS<=11.2(8).
1145 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1146 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1148 ping -t 255 224.66.66.66
1150 If nobody answers, mbone does not work.
1152 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1153 ip addr add 10.66.66.<somewhat>/24 dev Universe
1154 ifconfig Universe up
1155 ifconfig Universe add fe80::<Your_real_addr>/10
1156 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1159 ftp fec0:6666:6666::193.233.7.65
1164 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1165 unsigned short type,
1166 const void *daddr, const void *saddr, unsigned int len)
1168 struct ip_tunnel *t = netdev_priv(dev);
1169 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1170 __be16 *p = (__be16*)(iph+1);
1172 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1173 p[0] = t->parms.o_flags;
1177 * Set the source hardware address.
1181 memcpy(&iph->saddr, saddr, 4);
1183 memcpy(&iph->daddr, daddr, 4);
1190 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1192 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1193 memcpy(haddr, &iph->saddr, 4);
1197 static const struct header_ops ipgre_header_ops = {
1198 .create = ipgre_header,
1199 .parse = ipgre_header_parse,
1202 #ifdef CONFIG_NET_IPGRE_BROADCAST
1203 static int ipgre_open(struct net_device *dev)
1205 struct ip_tunnel *t = netdev_priv(dev);
1207 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1211 rt = ip_route_output_gre(dev_net(dev), &fl4,
1215 RT_TOS(t->parms.iph.tos),
1218 return -EADDRNOTAVAIL;
1221 if (__in_dev_get_rtnl(dev) == NULL)
1222 return -EADDRNOTAVAIL;
1223 t->mlink = dev->ifindex;
1224 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1229 static int ipgre_close(struct net_device *dev)
1231 struct ip_tunnel *t = netdev_priv(dev);
1233 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1234 struct in_device *in_dev;
1235 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1237 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1244 static const struct net_device_ops ipgre_netdev_ops = {
1245 .ndo_init = ipgre_tunnel_init,
1246 .ndo_uninit = ipgre_tunnel_uninit,
1247 #ifdef CONFIG_NET_IPGRE_BROADCAST
1248 .ndo_open = ipgre_open,
1249 .ndo_stop = ipgre_close,
1251 .ndo_start_xmit = ipgre_tunnel_xmit,
1252 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1253 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1254 .ndo_get_stats = ipgre_get_stats,
1257 static void ipgre_dev_free(struct net_device *dev)
1259 free_percpu(dev->tstats);
1263 static void ipgre_tunnel_setup(struct net_device *dev)
1265 dev->netdev_ops = &ipgre_netdev_ops;
1266 dev->destructor = ipgre_dev_free;
1268 dev->type = ARPHRD_IPGRE;
1269 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1270 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1271 dev->flags = IFF_NOARP;
1274 dev->features |= NETIF_F_NETNS_LOCAL;
1275 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1278 static int ipgre_tunnel_init(struct net_device *dev)
1280 struct ip_tunnel *tunnel;
1283 tunnel = netdev_priv(dev);
1284 iph = &tunnel->parms.iph;
1287 strcpy(tunnel->parms.name, dev->name);
1289 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1290 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1293 #ifdef CONFIG_NET_IPGRE_BROADCAST
1294 if (ipv4_is_multicast(iph->daddr)) {
1297 dev->flags = IFF_BROADCAST;
1298 dev->header_ops = &ipgre_header_ops;
1302 dev->header_ops = &ipgre_header_ops;
1304 dev->tstats = alloc_percpu(struct pcpu_tstats);
1311 static void ipgre_fb_tunnel_init(struct net_device *dev)
1313 struct ip_tunnel *tunnel = netdev_priv(dev);
1314 struct iphdr *iph = &tunnel->parms.iph;
1317 strcpy(tunnel->parms.name, dev->name);
1320 iph->protocol = IPPROTO_GRE;
1322 tunnel->hlen = sizeof(struct iphdr) + 4;
1328 static const struct gre_protocol ipgre_protocol = {
1329 .handler = ipgre_rcv,
1330 .err_handler = ipgre_err,
1333 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1337 for (prio = 0; prio < 4; prio++) {
1339 for (h = 0; h < HASH_SIZE; h++) {
1340 struct ip_tunnel *t;
1342 t = rtnl_dereference(ign->tunnels[prio][h]);
1345 unregister_netdevice_queue(t->dev, head);
1346 t = rtnl_dereference(t->next);
1352 static int __net_init ipgre_init_net(struct net *net)
1354 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1357 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1358 ipgre_tunnel_setup);
1359 if (!ign->fb_tunnel_dev) {
1363 dev_net_set(ign->fb_tunnel_dev, net);
1365 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1366 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1368 if ((err = register_netdev(ign->fb_tunnel_dev)))
1371 rcu_assign_pointer(ign->tunnels_wc[0],
1372 netdev_priv(ign->fb_tunnel_dev));
1376 ipgre_dev_free(ign->fb_tunnel_dev);
1381 static void __net_exit ipgre_exit_net(struct net *net)
1383 struct ipgre_net *ign;
1386 ign = net_generic(net, ipgre_net_id);
1388 ipgre_destroy_tunnels(ign, &list);
1389 unregister_netdevice_many(&list);
1393 static struct pernet_operations ipgre_net_ops = {
1394 .init = ipgre_init_net,
1395 .exit = ipgre_exit_net,
1396 .id = &ipgre_net_id,
1397 .size = sizeof(struct ipgre_net),
1400 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1408 if (data[IFLA_GRE_IFLAGS])
1409 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1410 if (data[IFLA_GRE_OFLAGS])
1411 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1412 if (flags & (GRE_VERSION|GRE_ROUTING))
1418 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1422 if (tb[IFLA_ADDRESS]) {
1423 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1425 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1426 return -EADDRNOTAVAIL;
1432 if (data[IFLA_GRE_REMOTE]) {
1433 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1439 return ipgre_tunnel_validate(tb, data);
1442 static void ipgre_netlink_parms(struct nlattr *data[],
1443 struct ip_tunnel_parm *parms)
1445 memset(parms, 0, sizeof(*parms));
1447 parms->iph.protocol = IPPROTO_GRE;
1452 if (data[IFLA_GRE_LINK])
1453 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1455 if (data[IFLA_GRE_IFLAGS])
1456 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1458 if (data[IFLA_GRE_OFLAGS])
1459 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1461 if (data[IFLA_GRE_IKEY])
1462 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1464 if (data[IFLA_GRE_OKEY])
1465 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1467 if (data[IFLA_GRE_LOCAL])
1468 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1470 if (data[IFLA_GRE_REMOTE])
1471 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1473 if (data[IFLA_GRE_TTL])
1474 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1476 if (data[IFLA_GRE_TOS])
1477 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1479 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1480 parms->iph.frag_off = htons(IP_DF);
1483 static int ipgre_tap_init(struct net_device *dev)
1485 struct ip_tunnel *tunnel;
1487 tunnel = netdev_priv(dev);
1490 strcpy(tunnel->parms.name, dev->name);
1492 ipgre_tunnel_bind_dev(dev);
1494 dev->tstats = alloc_percpu(struct pcpu_tstats);
1501 static const struct net_device_ops ipgre_tap_netdev_ops = {
1502 .ndo_init = ipgre_tap_init,
1503 .ndo_uninit = ipgre_tunnel_uninit,
1504 .ndo_start_xmit = ipgre_tunnel_xmit,
1505 .ndo_set_mac_address = eth_mac_addr,
1506 .ndo_validate_addr = eth_validate_addr,
1507 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1508 .ndo_get_stats = ipgre_get_stats,
1511 static void ipgre_tap_setup(struct net_device *dev)
1516 dev->netdev_ops = &ipgre_tap_netdev_ops;
1517 dev->destructor = ipgre_dev_free;
1520 dev->features |= NETIF_F_NETNS_LOCAL;
1523 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1524 struct nlattr *data[])
1526 struct ip_tunnel *nt;
1527 struct net *net = dev_net(dev);
1528 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1532 nt = netdev_priv(dev);
1533 ipgre_netlink_parms(data, &nt->parms);
1535 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1538 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1539 eth_hw_addr_random(dev);
1541 mtu = ipgre_tunnel_bind_dev(dev);
1545 /* Can use a lockless transmit, unless we generate output sequences */
1546 if (!(nt->parms.o_flags & GRE_SEQ))
1547 dev->features |= NETIF_F_LLTX;
1549 err = register_netdevice(dev);
1554 ipgre_tunnel_link(ign, nt);
1560 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1561 struct nlattr *data[])
1563 struct ip_tunnel *t, *nt;
1564 struct net *net = dev_net(dev);
1565 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1566 struct ip_tunnel_parm p;
1569 if (dev == ign->fb_tunnel_dev)
1572 nt = netdev_priv(dev);
1573 ipgre_netlink_parms(data, &p);
1575 t = ipgre_tunnel_locate(net, &p, 0);
1583 if (dev->type != ARPHRD_ETHER) {
1584 unsigned int nflags = 0;
1586 if (ipv4_is_multicast(p.iph.daddr))
1587 nflags = IFF_BROADCAST;
1588 else if (p.iph.daddr)
1589 nflags = IFF_POINTOPOINT;
1591 if ((dev->flags ^ nflags) &
1592 (IFF_POINTOPOINT | IFF_BROADCAST))
1596 ipgre_tunnel_unlink(ign, t);
1597 t->parms.iph.saddr = p.iph.saddr;
1598 t->parms.iph.daddr = p.iph.daddr;
1599 t->parms.i_key = p.i_key;
1600 if (dev->type != ARPHRD_ETHER) {
1601 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1602 memcpy(dev->broadcast, &p.iph.daddr, 4);
1604 ipgre_tunnel_link(ign, t);
1605 netdev_state_change(dev);
1608 t->parms.o_key = p.o_key;
1609 t->parms.iph.ttl = p.iph.ttl;
1610 t->parms.iph.tos = p.iph.tos;
1611 t->parms.iph.frag_off = p.iph.frag_off;
1613 if (t->parms.link != p.link) {
1614 t->parms.link = p.link;
1615 mtu = ipgre_tunnel_bind_dev(dev);
1618 netdev_state_change(dev);
1624 static size_t ipgre_get_size(const struct net_device *dev)
1629 /* IFLA_GRE_IFLAGS */
1631 /* IFLA_GRE_OFLAGS */
1637 /* IFLA_GRE_LOCAL */
1639 /* IFLA_GRE_REMOTE */
1645 /* IFLA_GRE_PMTUDISC */
1650 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1652 struct ip_tunnel *t = netdev_priv(dev);
1653 struct ip_tunnel_parm *p = &t->parms;
1655 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1656 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1657 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1658 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1659 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1660 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1661 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1662 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1663 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1664 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1672 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1673 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1674 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1675 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1676 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1677 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1678 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1679 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1680 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1681 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1682 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1685 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1687 .maxtype = IFLA_GRE_MAX,
1688 .policy = ipgre_policy,
1689 .priv_size = sizeof(struct ip_tunnel),
1690 .setup = ipgre_tunnel_setup,
1691 .validate = ipgre_tunnel_validate,
1692 .newlink = ipgre_newlink,
1693 .changelink = ipgre_changelink,
1694 .get_size = ipgre_get_size,
1695 .fill_info = ipgre_fill_info,
1698 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1700 .maxtype = IFLA_GRE_MAX,
1701 .policy = ipgre_policy,
1702 .priv_size = sizeof(struct ip_tunnel),
1703 .setup = ipgre_tap_setup,
1704 .validate = ipgre_tap_validate,
1705 .newlink = ipgre_newlink,
1706 .changelink = ipgre_changelink,
1707 .get_size = ipgre_get_size,
1708 .fill_info = ipgre_fill_info,
1712 * And now the modules code and kernel interface.
1715 static int __init ipgre_init(void)
1719 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1721 err = register_pernet_device(&ipgre_net_ops);
1725 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1727 printk(KERN_INFO "ipgre init: can't add protocol\n");
1728 goto add_proto_failed;
1731 err = rtnl_link_register(&ipgre_link_ops);
1733 goto rtnl_link_failed;
1735 err = rtnl_link_register(&ipgre_tap_ops);
1737 goto tap_ops_failed;
1743 rtnl_link_unregister(&ipgre_link_ops);
1745 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1747 unregister_pernet_device(&ipgre_net_ops);
1751 static void __exit ipgre_fini(void)
1753 rtnl_link_unregister(&ipgre_tap_ops);
1754 rtnl_link_unregister(&ipgre_link_ops);
1755 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1756 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1757 unregister_pernet_device(&ipgre_net_ops);
1760 module_init(ipgre_init);
1761 module_exit(ipgre_fini);
1762 MODULE_LICENSE("GPL");
1763 MODULE_ALIAS_RTNL_LINK("gre");
1764 MODULE_ALIAS_RTNL_LINK("gretap");
1765 MODULE_ALIAS_NETDEV("gre0");