2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/rtnetlink.h>
37 #include <net/route.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/net_namespace.h>
41 #include <net/netns/generic.h>
42 #include <net/vxlan.h>
43 #include <net/protocol.h>
44 #if IS_ENABLED(CONFIG_IPV6)
46 #include <net/addrconf.h>
47 #include <net/ip6_tunnel.h>
48 #include <net/ip6_checksum.h>
51 #define VXLAN_VERSION "0.1"
53 #define PORT_HASH_BITS 8
54 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
55 #define VNI_HASH_BITS 10
56 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
57 #define FDB_HASH_BITS 8
58 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
59 #define FDB_AGE_DEFAULT 300 /* 5 min */
60 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
62 #define VXLAN_N_VID (1u << 24)
63 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
64 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
66 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
68 /* VXLAN protocol header */
74 /* UDP port for VXLAN traffic.
75 * The IANA assigned port is 4789, but the Linux default is 8472
76 * for compatibility with early adopters.
78 static unsigned short vxlan_port __read_mostly = 8472;
79 module_param_named(udp_port, vxlan_port, ushort, 0444);
80 MODULE_PARM_DESC(udp_port, "Destination UDP port");
82 static bool log_ecn_error = true;
83 module_param(log_ecn_error, bool, 0644);
84 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86 static int vxlan_net_id;
88 static const u8 all_zeros_mac[ETH_ALEN];
90 /* per-network namespace private data for this module */
92 struct list_head vxlan_list;
93 struct hlist_head sock_list[PORT_HASH_SIZE];
98 struct sockaddr_in sin;
99 struct sockaddr_in6 sin6;
104 union vxlan_addr remote_ip;
108 struct list_head list;
112 /* Forwarding table entry */
114 struct hlist_node hlist; /* linked list of entries */
116 unsigned long updated; /* jiffies */
118 struct list_head remotes;
119 u16 state; /* see ndm_state */
120 u8 flags; /* see ndm_flags */
121 u8 eth_addr[ETH_ALEN];
124 /* Pseudo network device */
126 struct hlist_node hlist; /* vni hash table */
127 struct list_head next; /* vxlan's per namespace list */
128 struct vxlan_sock *vn_sock; /* listening socket */
129 struct net_device *dev;
130 struct net *net; /* netns for packet i/o */
131 struct vxlan_rdst default_dst; /* default destination */
132 union vxlan_addr saddr; /* source address */
134 __u16 port_min; /* source port range */
136 __u8 tos; /* TOS override */
138 u32 flags; /* VXLAN_F_* in vxlan.h */
140 struct work_struct sock_work;
141 struct work_struct igmp_join;
142 struct work_struct igmp_leave;
144 unsigned long age_interval;
145 struct timer_list age_timer;
146 spinlock_t hash_lock;
147 unsigned int addrcnt;
148 unsigned int addrmax;
150 struct hlist_head fdb_head[FDB_HASH_SIZE];
153 /* salt for hash table */
154 static u32 vxlan_salt __read_mostly;
155 static struct workqueue_struct *vxlan_wq;
157 static void vxlan_sock_work(struct work_struct *work);
159 #if IS_ENABLED(CONFIG_IPV6)
161 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
163 if (a->sa.sa_family != b->sa.sa_family)
165 if (a->sa.sa_family == AF_INET6)
166 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
168 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
171 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
173 if (ipa->sa.sa_family == AF_INET6)
174 return ipv6_addr_any(&ipa->sin6.sin6_addr);
176 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
179 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
181 if (ipa->sa.sa_family == AF_INET6)
182 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
184 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
187 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
189 if (nla_len(nla) >= sizeof(struct in6_addr)) {
190 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
191 ip->sa.sa_family = AF_INET6;
193 } else if (nla_len(nla) >= sizeof(__be32)) {
194 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
195 ip->sa.sa_family = AF_INET;
198 return -EAFNOSUPPORT;
202 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
203 const union vxlan_addr *ip)
205 if (ip->sa.sa_family == AF_INET6)
206 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
208 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
211 #else /* !CONFIG_IPV6 */
214 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
216 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
219 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
221 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
224 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
226 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
229 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
231 if (nla_len(nla) >= sizeof(struct in6_addr)) {
232 return -EAFNOSUPPORT;
233 } else if (nla_len(nla) >= sizeof(__be32)) {
234 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
235 ip->sa.sa_family = AF_INET;
238 return -EAFNOSUPPORT;
242 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
243 const union vxlan_addr *ip)
245 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
249 /* Virtual Network hash table head */
250 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
252 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
255 /* Socket hash table head */
256 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
258 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
260 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
263 /* First remote destination for a forwarding entry.
264 * Guaranteed to be non-NULL because remotes are never deleted.
266 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
268 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
271 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
273 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
276 /* Find VXLAN socket based on network namespace and UDP port */
277 static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
279 struct vxlan_sock *vs;
281 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
282 if (inet_sk(vs->sock->sk)->inet_sport == port)
288 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
290 struct vxlan_dev *vxlan;
292 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
293 if (vxlan->default_dst.remote_vni == id)
300 /* Look up VNI in a per net namespace table */
301 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
303 struct vxlan_sock *vs;
305 vs = vxlan_find_sock(net, port);
309 return vxlan_vs_find_vni(vs, id);
312 /* Fill in neighbour message in skbuff. */
313 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
314 const struct vxlan_fdb *fdb,
315 u32 portid, u32 seq, int type, unsigned int flags,
316 const struct vxlan_rdst *rdst)
318 unsigned long now = jiffies;
319 struct nda_cacheinfo ci;
320 struct nlmsghdr *nlh;
322 bool send_ip, send_eth;
324 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
328 ndm = nlmsg_data(nlh);
329 memset(ndm, 0, sizeof(*ndm));
331 send_eth = send_ip = true;
333 if (type == RTM_GETNEIGH) {
334 ndm->ndm_family = AF_INET;
335 send_ip = !vxlan_addr_any(&rdst->remote_ip);
336 send_eth = !is_zero_ether_addr(fdb->eth_addr);
338 ndm->ndm_family = AF_BRIDGE;
339 ndm->ndm_state = fdb->state;
340 ndm->ndm_ifindex = vxlan->dev->ifindex;
341 ndm->ndm_flags = fdb->flags;
342 ndm->ndm_type = RTN_UNICAST;
344 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
345 goto nla_put_failure;
347 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
348 goto nla_put_failure;
350 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
351 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
352 goto nla_put_failure;
353 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
354 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
355 goto nla_put_failure;
356 if (rdst->remote_ifindex &&
357 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
358 goto nla_put_failure;
360 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
361 ci.ndm_confirmed = 0;
362 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
365 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
366 goto nla_put_failure;
368 return nlmsg_end(skb, nlh);
371 nlmsg_cancel(skb, nlh);
375 static inline size_t vxlan_nlmsg_size(void)
377 return NLMSG_ALIGN(sizeof(struct ndmsg))
378 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
379 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
380 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
381 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
382 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
383 + nla_total_size(sizeof(struct nda_cacheinfo));
386 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
387 struct vxlan_rdst *rd, int type)
389 struct net *net = dev_net(vxlan->dev);
393 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
397 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
399 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
400 WARN_ON(err == -EMSGSIZE);
405 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
409 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
412 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
414 struct vxlan_dev *vxlan = netdev_priv(dev);
415 struct vxlan_fdb f = {
418 struct vxlan_rdst remote = {
419 .remote_ip = *ipa, /* goes to NDA_DST */
420 .remote_vni = VXLAN_N_VID,
423 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
426 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
428 struct vxlan_fdb f = {
431 struct vxlan_rdst remote = { };
433 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
435 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
438 /* Hash Ethernet address */
439 static u32 eth_hash(const unsigned char *addr)
441 u64 value = get_unaligned((u64 *)addr);
443 /* only want 6 bytes */
449 return hash_64(value, FDB_HASH_BITS);
452 /* Hash chain to use given mac address */
453 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
456 return &vxlan->fdb_head[eth_hash(mac)];
459 /* Look up Ethernet address in forwarding table */
460 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
463 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
466 hlist_for_each_entry_rcu(f, head, hlist) {
467 if (ether_addr_equal(mac, f->eth_addr))
474 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
479 f = __vxlan_find_mac(vxlan, mac);
486 /* caller should hold vxlan->hash_lock */
487 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
488 union vxlan_addr *ip, __be16 port,
489 __u32 vni, __u32 ifindex)
491 struct vxlan_rdst *rd;
493 list_for_each_entry(rd, &f->remotes, list) {
494 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
495 rd->remote_port == port &&
496 rd->remote_vni == vni &&
497 rd->remote_ifindex == ifindex)
504 /* Replace destination of unicast mac */
505 static int vxlan_fdb_replace(struct vxlan_fdb *f,
506 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
508 struct vxlan_rdst *rd;
510 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
514 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
518 rd->remote_port = port;
519 rd->remote_vni = vni;
520 rd->remote_ifindex = ifindex;
524 /* Add/update destinations for multicast */
525 static int vxlan_fdb_append(struct vxlan_fdb *f,
526 union vxlan_addr *ip, __be16 port, __u32 vni,
527 __u32 ifindex, struct vxlan_rdst **rdp)
529 struct vxlan_rdst *rd;
531 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
535 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
539 rd->remote_port = port;
540 rd->remote_vni = vni;
541 rd->remote_ifindex = ifindex;
543 list_add_tail_rcu(&rd->list, &f->remotes);
549 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
551 struct sk_buff *p, **pp = NULL;
552 struct vxlanhdr *vh, *vh2;
553 struct ethhdr *eh, *eh2;
554 unsigned int hlen, off_vx, off_eth;
555 const struct packet_offload *ptype;
559 off_vx = skb_gro_offset(skb);
560 hlen = off_vx + sizeof(*vh);
561 vh = skb_gro_header_fast(skb, off_vx);
562 if (skb_gro_header_hard(skb, hlen)) {
563 vh = skb_gro_header_slow(skb, hlen, off_vx);
567 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
568 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
570 off_eth = skb_gro_offset(skb);
571 hlen = off_eth + sizeof(*eh);
572 eh = skb_gro_header_fast(skb, off_eth);
573 if (skb_gro_header_hard(skb, hlen)) {
574 eh = skb_gro_header_slow(skb, hlen, off_eth);
581 for (p = *head; p; p = p->next) {
582 if (!NAPI_GRO_CB(p)->same_flow)
585 vh2 = (struct vxlanhdr *)(p->data + off_vx);
586 eh2 = (struct ethhdr *)(p->data + off_eth);
587 if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
588 NAPI_GRO_CB(p)->same_flow = 0;
596 ptype = gro_find_receive_by_type(type);
602 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
603 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
604 pp = ptype->callbacks.gro_receive(head, skb);
609 NAPI_GRO_CB(skb)->flush |= flush;
614 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
617 struct packet_offload *ptype;
619 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
622 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
626 ptype = gro_find_complete_by_type(type);
628 err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
634 /* Notify netdevs that UDP port started listening */
635 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
637 struct net_device *dev;
638 struct sock *sk = vs->sock->sk;
639 struct net *net = sock_net(sk);
640 sa_family_t sa_family = sk->sk_family;
641 __be16 port = inet_sk(sk)->inet_sport;
644 if (sa_family == AF_INET) {
645 err = udp_add_offload(&vs->udp_offloads);
647 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
651 for_each_netdev_rcu(net, dev) {
652 if (dev->netdev_ops->ndo_add_vxlan_port)
653 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
659 /* Notify netdevs that UDP port is no more listening */
660 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
662 struct net_device *dev;
663 struct sock *sk = vs->sock->sk;
664 struct net *net = sock_net(sk);
665 sa_family_t sa_family = sk->sk_family;
666 __be16 port = inet_sk(sk)->inet_sport;
669 for_each_netdev_rcu(net, dev) {
670 if (dev->netdev_ops->ndo_del_vxlan_port)
671 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
676 if (sa_family == AF_INET)
677 udp_del_offload(&vs->udp_offloads);
680 /* Add new entry to forwarding table -- assumes lock held */
681 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
682 const u8 *mac, union vxlan_addr *ip,
683 __u16 state, __u16 flags,
684 __be16 port, __u32 vni, __u32 ifindex,
687 struct vxlan_rdst *rd = NULL;
691 f = __vxlan_find_mac(vxlan, mac);
693 if (flags & NLM_F_EXCL) {
694 netdev_dbg(vxlan->dev,
695 "lost race to create %pM\n", mac);
698 if (f->state != state) {
700 f->updated = jiffies;
703 if (f->flags != ndm_flags) {
704 f->flags = ndm_flags;
705 f->updated = jiffies;
708 if ((flags & NLM_F_REPLACE)) {
709 /* Only change unicasts */
710 if (!(is_multicast_ether_addr(f->eth_addr) ||
711 is_zero_ether_addr(f->eth_addr))) {
712 int rc = vxlan_fdb_replace(f, ip, port, vni,
721 if ((flags & NLM_F_APPEND) &&
722 (is_multicast_ether_addr(f->eth_addr) ||
723 is_zero_ether_addr(f->eth_addr))) {
724 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
732 if (!(flags & NLM_F_CREATE))
735 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
738 /* Disallow replace to add a multicast entry */
739 if ((flags & NLM_F_REPLACE) &&
740 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
743 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
744 f = kmalloc(sizeof(*f), GFP_ATOMIC);
750 f->flags = ndm_flags;
751 f->updated = f->used = jiffies;
752 INIT_LIST_HEAD(&f->remotes);
753 memcpy(f->eth_addr, mac, ETH_ALEN);
755 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
758 hlist_add_head_rcu(&f->hlist,
759 vxlan_fdb_head(vxlan, mac));
764 rd = first_remote_rtnl(f);
765 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
771 static void vxlan_fdb_free(struct rcu_head *head)
773 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
774 struct vxlan_rdst *rd, *nd;
776 list_for_each_entry_safe(rd, nd, &f->remotes, list)
781 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
783 netdev_dbg(vxlan->dev,
784 "delete %pM\n", f->eth_addr);
787 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
789 hlist_del_rcu(&f->hlist);
790 call_rcu(&f->rcu, vxlan_fdb_free);
793 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
794 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
796 struct net *net = dev_net(vxlan->dev);
800 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
804 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
805 if (remote->sa.sa_family == AF_INET) {
806 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
807 ip->sa.sa_family = AF_INET;
808 #if IS_ENABLED(CONFIG_IPV6)
810 ip->sin6.sin6_addr = in6addr_any;
811 ip->sa.sa_family = AF_INET6;
817 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
819 *port = nla_get_be16(tb[NDA_PORT]);
821 *port = vxlan->dst_port;
825 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
827 *vni = nla_get_u32(tb[NDA_VNI]);
829 *vni = vxlan->default_dst.remote_vni;
832 if (tb[NDA_IFINDEX]) {
833 struct net_device *tdev;
835 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
837 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
838 tdev = __dev_get_by_index(net, *ifindex);
840 return -EADDRNOTAVAIL;
848 /* Add static entry (via netlink) */
849 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
850 struct net_device *dev,
851 const unsigned char *addr, u16 flags)
853 struct vxlan_dev *vxlan = netdev_priv(dev);
854 /* struct net *net = dev_net(vxlan->dev); */
860 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
861 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
866 if (tb[NDA_DST] == NULL)
869 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
873 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
874 return -EAFNOSUPPORT;
876 spin_lock_bh(&vxlan->hash_lock);
877 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
878 port, vni, ifindex, ndm->ndm_flags);
879 spin_unlock_bh(&vxlan->hash_lock);
884 /* Delete entry (via netlink) */
885 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
886 struct net_device *dev,
887 const unsigned char *addr)
889 struct vxlan_dev *vxlan = netdev_priv(dev);
891 struct vxlan_rdst *rd = NULL;
897 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
903 spin_lock_bh(&vxlan->hash_lock);
904 f = vxlan_find_mac(vxlan, addr);
908 if (!vxlan_addr_any(&ip)) {
909 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
916 /* remove a destination if it's not the only one on the list,
917 * otherwise destroy the fdb entry
919 if (rd && !list_is_singular(&f->remotes)) {
920 list_del_rcu(&rd->list);
921 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
926 vxlan_fdb_destroy(vxlan, f);
929 spin_unlock_bh(&vxlan->hash_lock);
934 /* Dump forwarding table */
935 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
936 struct net_device *dev, int idx)
938 struct vxlan_dev *vxlan = netdev_priv(dev);
941 for (h = 0; h < FDB_HASH_SIZE; ++h) {
945 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
946 struct vxlan_rdst *rd;
948 if (idx < cb->args[0])
951 list_for_each_entry_rcu(rd, &f->remotes, list) {
952 err = vxlan_fdb_info(skb, vxlan, f,
953 NETLINK_CB(cb->skb).portid,
968 /* Watch incoming packets to learn mapping between Ethernet address
969 * and Tunnel endpoint.
970 * Return true if packet is bogus and should be droppped.
972 static bool vxlan_snoop(struct net_device *dev,
973 union vxlan_addr *src_ip, const u8 *src_mac)
975 struct vxlan_dev *vxlan = netdev_priv(dev);
978 f = vxlan_find_mac(vxlan, src_mac);
980 struct vxlan_rdst *rdst = first_remote_rcu(f);
982 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
985 /* Don't migrate static entries, drop packets */
986 if (f->state & NUD_NOARP)
991 "%pM migrated from %pIS to %pIS\n",
992 src_mac, &rdst->remote_ip, &src_ip);
994 rdst->remote_ip = *src_ip;
995 f->updated = jiffies;
996 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
998 /* learned new entry */
999 spin_lock(&vxlan->hash_lock);
1001 /* close off race between vxlan_flush and incoming packets */
1002 if (netif_running(dev))
1003 vxlan_fdb_create(vxlan, src_mac, src_ip,
1005 NLM_F_EXCL|NLM_F_CREATE,
1007 vxlan->default_dst.remote_vni,
1009 spin_unlock(&vxlan->hash_lock);
1015 /* See if multicast group is already in use by other ID */
1016 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1018 struct vxlan_dev *vxlan;
1020 /* The vxlan_sock is only used by dev, leaving group has
1021 * no effect on other vxlan devices.
1023 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1026 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1027 if (!netif_running(vxlan->dev) || vxlan == dev)
1030 if (vxlan->vn_sock != dev->vn_sock)
1033 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1034 &dev->default_dst.remote_ip))
1037 if (vxlan->default_dst.remote_ifindex !=
1038 dev->default_dst.remote_ifindex)
1047 static void vxlan_sock_hold(struct vxlan_sock *vs)
1049 atomic_inc(&vs->refcnt);
1052 void vxlan_sock_release(struct vxlan_sock *vs)
1054 struct sock *sk = vs->sock->sk;
1055 struct net *net = sock_net(sk);
1056 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1058 if (!atomic_dec_and_test(&vs->refcnt))
1061 spin_lock(&vn->sock_lock);
1062 hlist_del_rcu(&vs->hlist);
1063 rcu_assign_sk_user_data(vs->sock->sk, NULL);
1064 vxlan_notify_del_rx_port(vs);
1065 spin_unlock(&vn->sock_lock);
1067 queue_work(vxlan_wq, &vs->del_work);
1069 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1071 /* Callback to update multicast group membership when first VNI on
1072 * multicast asddress is brought up
1073 * Done as workqueue because ip_mc_join_group acquires RTNL.
1075 static void vxlan_igmp_join(struct work_struct *work)
1077 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
1078 struct vxlan_sock *vs = vxlan->vn_sock;
1079 struct sock *sk = vs->sock->sk;
1080 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1081 int ifindex = vxlan->default_dst.remote_ifindex;
1084 if (ip->sa.sa_family == AF_INET) {
1085 struct ip_mreqn mreq = {
1086 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1087 .imr_ifindex = ifindex,
1090 ip_mc_join_group(sk, &mreq);
1091 #if IS_ENABLED(CONFIG_IPV6)
1093 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1094 &ip->sin6.sin6_addr);
1099 vxlan_sock_release(vs);
1100 dev_put(vxlan->dev);
1103 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1104 static void vxlan_igmp_leave(struct work_struct *work)
1106 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
1107 struct vxlan_sock *vs = vxlan->vn_sock;
1108 struct sock *sk = vs->sock->sk;
1109 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1110 int ifindex = vxlan->default_dst.remote_ifindex;
1113 if (ip->sa.sa_family == AF_INET) {
1114 struct ip_mreqn mreq = {
1115 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1116 .imr_ifindex = ifindex,
1119 ip_mc_leave_group(sk, &mreq);
1120 #if IS_ENABLED(CONFIG_IPV6)
1122 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1123 &ip->sin6.sin6_addr);
1129 vxlan_sock_release(vs);
1130 dev_put(vxlan->dev);
1133 /* Callback from net/ipv4/udp.c to receive packets */
1134 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1136 struct vxlan_sock *vs;
1137 struct vxlanhdr *vxh;
1139 /* Need Vxlan and inner Ethernet header to be present */
1140 if (!pskb_may_pull(skb, VXLAN_HLEN))
1143 /* Return packets with reserved bits set */
1144 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1145 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
1146 (vxh->vx_vni & htonl(0xff))) {
1147 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1148 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1152 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1155 vs = rcu_dereference_sk_user_data(sk);
1159 skb_pop_rcv_encapsulation(skb);
1161 vs->rcv(vs, skb, vxh->vx_vni);
1165 /* Consume bad packet */
1170 /* Return non vxlan pkt */
1174 static void vxlan_rcv(struct vxlan_sock *vs,
1175 struct sk_buff *skb, __be32 vx_vni)
1177 struct iphdr *oip = NULL;
1178 struct ipv6hdr *oip6 = NULL;
1179 struct vxlan_dev *vxlan;
1180 struct pcpu_sw_netstats *stats;
1181 union vxlan_addr saddr;
1184 union vxlan_addr *remote_ip;
1186 vni = ntohl(vx_vni) >> 8;
1187 /* Is this VNI defined? */
1188 vxlan = vxlan_vs_find_vni(vs, vni);
1192 remote_ip = &vxlan->default_dst.remote_ip;
1193 skb_reset_mac_header(skb);
1194 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1195 skb->protocol = eth_type_trans(skb, vxlan->dev);
1196 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1198 /* Ignore packet loops (and multicast echo) */
1199 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1202 /* Re-examine inner Ethernet packet */
1203 if (remote_ip->sa.sa_family == AF_INET) {
1205 saddr.sin.sin_addr.s_addr = oip->saddr;
1206 saddr.sa.sa_family = AF_INET;
1207 #if IS_ENABLED(CONFIG_IPV6)
1209 oip6 = ipv6_hdr(skb);
1210 saddr.sin6.sin6_addr = oip6->saddr;
1211 saddr.sa.sa_family = AF_INET6;
1215 if ((vxlan->flags & VXLAN_F_LEARN) &&
1216 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1219 skb_reset_network_header(skb);
1222 err = IP6_ECN_decapsulate(oip6, skb);
1224 err = IP_ECN_decapsulate(oip, skb);
1226 if (unlikely(err)) {
1227 if (log_ecn_error) {
1229 net_info_ratelimited("non-ECT from %pI6\n",
1232 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1233 &oip->saddr, oip->tos);
1236 ++vxlan->dev->stats.rx_frame_errors;
1237 ++vxlan->dev->stats.rx_errors;
1242 stats = this_cpu_ptr(vxlan->dev->tstats);
1243 u64_stats_update_begin(&stats->syncp);
1244 stats->rx_packets++;
1245 stats->rx_bytes += skb->len;
1246 u64_stats_update_end(&stats->syncp);
1252 /* Consume bad packet */
1256 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1258 struct vxlan_dev *vxlan = netdev_priv(dev);
1259 struct arphdr *parp;
1262 struct neighbour *n;
1264 if (dev->flags & IFF_NOARP)
1267 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1268 dev->stats.tx_dropped++;
1271 parp = arp_hdr(skb);
1273 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1274 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1275 parp->ar_pro != htons(ETH_P_IP) ||
1276 parp->ar_op != htons(ARPOP_REQUEST) ||
1277 parp->ar_hln != dev->addr_len ||
1280 arpptr = (u8 *)parp + sizeof(struct arphdr);
1282 arpptr += dev->addr_len; /* sha */
1283 memcpy(&sip, arpptr, sizeof(sip));
1284 arpptr += sizeof(sip);
1285 arpptr += dev->addr_len; /* tha */
1286 memcpy(&tip, arpptr, sizeof(tip));
1288 if (ipv4_is_loopback(tip) ||
1289 ipv4_is_multicast(tip))
1292 n = neigh_lookup(&arp_tbl, &tip, dev);
1295 struct vxlan_fdb *f;
1296 struct sk_buff *reply;
1298 if (!(n->nud_state & NUD_CONNECTED)) {
1303 f = vxlan_find_mac(vxlan, n->ha);
1304 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1305 /* bridge-local neighbor */
1310 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1318 skb_reset_mac_header(reply);
1319 __skb_pull(reply, skb_network_offset(reply));
1320 reply->ip_summed = CHECKSUM_UNNECESSARY;
1321 reply->pkt_type = PACKET_HOST;
1323 if (netif_rx_ni(reply) == NET_RX_DROP)
1324 dev->stats.rx_dropped++;
1325 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1326 union vxlan_addr ipa = {
1327 .sin.sin_addr.s_addr = tip,
1328 .sa.sa_family = AF_INET,
1331 vxlan_ip_miss(dev, &ipa);
1335 return NETDEV_TX_OK;
1338 #if IS_ENABLED(CONFIG_IPV6)
1340 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1341 struct neighbour *n, bool isrouter)
1343 struct net_device *dev = request->dev;
1344 struct sk_buff *reply;
1345 struct nd_msg *ns, *na;
1346 struct ipv6hdr *pip6;
1348 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1355 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1356 sizeof(*na) + na_olen + dev->needed_tailroom;
1357 reply = alloc_skb(len, GFP_ATOMIC);
1361 reply->protocol = htons(ETH_P_IPV6);
1363 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1364 skb_push(reply, sizeof(struct ethhdr));
1365 skb_set_mac_header(reply, 0);
1367 ns = (struct nd_msg *)skb_transport_header(request);
1369 daddr = eth_hdr(request)->h_source;
1370 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1371 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1372 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1373 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1378 /* Ethernet header */
1379 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1380 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1381 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1382 reply->protocol = htons(ETH_P_IPV6);
1384 skb_pull(reply, sizeof(struct ethhdr));
1385 skb_set_network_header(reply, 0);
1386 skb_put(reply, sizeof(struct ipv6hdr));
1390 pip6 = ipv6_hdr(reply);
1391 memset(pip6, 0, sizeof(struct ipv6hdr));
1393 pip6->priority = ipv6_hdr(request)->priority;
1394 pip6->nexthdr = IPPROTO_ICMPV6;
1395 pip6->hop_limit = 255;
1396 pip6->daddr = ipv6_hdr(request)->saddr;
1397 pip6->saddr = *(struct in6_addr *)n->primary_key;
1399 skb_pull(reply, sizeof(struct ipv6hdr));
1400 skb_set_transport_header(reply, 0);
1402 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1404 /* Neighbor Advertisement */
1405 memset(na, 0, sizeof(*na)+na_olen);
1406 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1407 na->icmph.icmp6_router = isrouter;
1408 na->icmph.icmp6_override = 1;
1409 na->icmph.icmp6_solicited = 1;
1410 na->target = ns->target;
1411 ether_addr_copy(&na->opt[2], n->ha);
1412 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1413 na->opt[1] = na_olen >> 3;
1415 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1416 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1417 csum_partial(na, sizeof(*na)+na_olen, 0));
1419 pip6->payload_len = htons(sizeof(*na)+na_olen);
1421 skb_push(reply, sizeof(struct ipv6hdr));
1423 reply->ip_summed = CHECKSUM_UNNECESSARY;
1428 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1430 struct vxlan_dev *vxlan = netdev_priv(dev);
1432 const struct ipv6hdr *iphdr;
1433 const struct in6_addr *saddr, *daddr;
1434 struct neighbour *n;
1435 struct inet6_dev *in6_dev;
1437 in6_dev = __in6_dev_get(dev);
1441 if (!pskb_may_pull(skb, skb->len))
1444 iphdr = ipv6_hdr(skb);
1445 saddr = &iphdr->saddr;
1446 daddr = &iphdr->daddr;
1448 msg = (struct nd_msg *)skb_transport_header(skb);
1449 if (msg->icmph.icmp6_code != 0 ||
1450 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1453 if (ipv6_addr_loopback(daddr) ||
1454 ipv6_addr_is_multicast(&msg->target))
1457 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1460 struct vxlan_fdb *f;
1461 struct sk_buff *reply;
1463 if (!(n->nud_state & NUD_CONNECTED)) {
1468 f = vxlan_find_mac(vxlan, n->ha);
1469 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1470 /* bridge-local neighbor */
1475 reply = vxlan_na_create(skb, n,
1476 !!(f ? f->flags & NTF_ROUTER : 0));
1483 if (netif_rx_ni(reply) == NET_RX_DROP)
1484 dev->stats.rx_dropped++;
1486 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1487 union vxlan_addr ipa = {
1488 .sin6.sin6_addr = msg->target,
1489 .sa.sa_family = AF_INET6,
1492 vxlan_ip_miss(dev, &ipa);
1497 return NETDEV_TX_OK;
1501 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1503 struct vxlan_dev *vxlan = netdev_priv(dev);
1504 struct neighbour *n;
1506 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1510 switch (ntohs(eth_hdr(skb)->h_proto)) {
1515 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1518 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1519 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1520 union vxlan_addr ipa = {
1521 .sin.sin_addr.s_addr = pip->daddr,
1522 .sa.sa_family = AF_INET,
1525 vxlan_ip_miss(dev, &ipa);
1531 #if IS_ENABLED(CONFIG_IPV6)
1534 struct ipv6hdr *pip6;
1536 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1538 pip6 = ipv6_hdr(skb);
1539 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1540 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1541 union vxlan_addr ipa = {
1542 .sin6.sin6_addr = pip6->daddr,
1543 .sa.sa_family = AF_INET6,
1546 vxlan_ip_miss(dev, &ipa);
1560 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1562 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1564 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1573 /* Compute source port for outgoing packet
1574 * first choice to use L4 flow hash since it will spread
1575 * better and maybe available from hardware
1576 * secondary choice is to use jhash on the Ethernet header
1578 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1580 unsigned int range = (port_max - port_min) + 1;
1583 hash = skb_get_hash(skb);
1585 hash = jhash(skb->data, 2 * ETH_ALEN,
1586 (__force u32) skb->protocol);
1588 return htons((((u64) hash * range) >> 32) + port_min);
1590 EXPORT_SYMBOL_GPL(vxlan_src_port);
1592 static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
1595 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1596 return iptunnel_handle_offloads(skb, udp_csum, type);
1599 #if IS_ENABLED(CONFIG_IPV6)
1600 static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1601 struct dst_entry *dst, struct sk_buff *skb,
1602 struct net_device *dev, struct in6_addr *saddr,
1603 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1604 __be16 src_port, __be16 dst_port, __be32 vni,
1607 struct ipv6hdr *ip6h;
1608 struct vxlanhdr *vxh;
1613 skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk));
1617 skb_scrub_packet(skb, xnet);
1619 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1620 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1621 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1623 /* Need space for new headers (invalidates iph ptr) */
1624 err = skb_cow_head(skb, min_headroom);
1628 if (vlan_tx_tag_present(skb)) {
1629 if (WARN_ON(!__vlan_put_tag(skb,
1631 vlan_tx_tag_get(skb))))
1637 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1638 vxh->vx_flags = htonl(VXLAN_FLAGS);
1641 __skb_push(skb, sizeof(*uh));
1642 skb_reset_transport_header(skb);
1645 uh->dest = dst_port;
1646 uh->source = src_port;
1648 uh->len = htons(skb->len);
1650 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1651 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1653 skb_dst_set(skb, dst);
1655 udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb,
1656 saddr, daddr, skb->len);
1658 __skb_push(skb, sizeof(*ip6h));
1659 skb_reset_network_header(skb);
1660 ip6h = ipv6_hdr(skb);
1662 ip6h->priority = prio;
1663 ip6h->flow_lbl[0] = 0;
1664 ip6h->flow_lbl[1] = 0;
1665 ip6h->flow_lbl[2] = 0;
1666 ip6h->payload_len = htons(skb->len);
1667 ip6h->nexthdr = IPPROTO_UDP;
1668 ip6h->hop_limit = ttl;
1669 ip6h->daddr = *daddr;
1670 ip6h->saddr = *saddr;
1672 ip6tunnel_xmit(skb, dev);
1677 int vxlan_xmit_skb(struct vxlan_sock *vs,
1678 struct rtable *rt, struct sk_buff *skb,
1679 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1680 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
1682 struct vxlanhdr *vxh;
1687 skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx);
1691 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1692 + VXLAN_HLEN + sizeof(struct iphdr)
1693 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1695 /* Need space for new headers (invalidates iph ptr) */
1696 err = skb_cow_head(skb, min_headroom);
1700 if (vlan_tx_tag_present(skb)) {
1701 if (WARN_ON(!__vlan_put_tag(skb,
1703 vlan_tx_tag_get(skb))))
1709 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1710 vxh->vx_flags = htonl(VXLAN_FLAGS);
1713 __skb_push(skb, sizeof(*uh));
1714 skb_reset_transport_header(skb);
1717 uh->dest = dst_port;
1718 uh->source = src_port;
1720 uh->len = htons(skb->len);
1722 udp_set_csum(vs->sock->sk->sk_no_check_tx, skb,
1723 src, dst, skb->len);
1725 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1726 tos, ttl, df, xnet);
1728 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1730 /* Bypass encapsulation if the destination is local */
1731 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1732 struct vxlan_dev *dst_vxlan)
1734 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1735 union vxlan_addr loopback;
1736 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1738 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1739 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1740 skb->pkt_type = PACKET_HOST;
1741 skb->encapsulation = 0;
1742 skb->dev = dst_vxlan->dev;
1743 __skb_pull(skb, skb_network_offset(skb));
1745 if (remote_ip->sa.sa_family == AF_INET) {
1746 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1747 loopback.sa.sa_family = AF_INET;
1748 #if IS_ENABLED(CONFIG_IPV6)
1750 loopback.sin6.sin6_addr = in6addr_loopback;
1751 loopback.sa.sa_family = AF_INET6;
1755 if (dst_vxlan->flags & VXLAN_F_LEARN)
1756 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1758 u64_stats_update_begin(&tx_stats->syncp);
1759 tx_stats->tx_packets++;
1760 tx_stats->tx_bytes += skb->len;
1761 u64_stats_update_end(&tx_stats->syncp);
1763 if (netif_rx(skb) == NET_RX_SUCCESS) {
1764 u64_stats_update_begin(&rx_stats->syncp);
1765 rx_stats->rx_packets++;
1766 rx_stats->rx_bytes += skb->len;
1767 u64_stats_update_end(&rx_stats->syncp);
1769 skb->dev->stats.rx_dropped++;
1773 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1774 struct vxlan_rdst *rdst, bool did_rsc)
1776 struct vxlan_dev *vxlan = netdev_priv(dev);
1777 struct rtable *rt = NULL;
1778 const struct iphdr *old_iph;
1780 union vxlan_addr *dst;
1781 __be16 src_port = 0, dst_port;
1787 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1788 vni = rdst->remote_vni;
1789 dst = &rdst->remote_ip;
1791 if (vxlan_addr_any(dst)) {
1793 /* short-circuited back to local bridge */
1794 vxlan_encap_bypass(skb, vxlan, vxlan);
1800 old_iph = ip_hdr(skb);
1803 if (!ttl && vxlan_addr_multicast(dst))
1808 tos = ip_tunnel_get_dsfield(old_iph, skb);
1810 src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
1812 if (dst->sa.sa_family == AF_INET) {
1813 memset(&fl4, 0, sizeof(fl4));
1814 fl4.flowi4_oif = rdst->remote_ifindex;
1815 fl4.flowi4_tos = RT_TOS(tos);
1816 fl4.daddr = dst->sin.sin_addr.s_addr;
1817 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1819 rt = ip_route_output_key(vxlan->net, &fl4);
1821 netdev_dbg(dev, "no route to %pI4\n",
1822 &dst->sin.sin_addr.s_addr);
1823 dev->stats.tx_carrier_errors++;
1827 if (rt->dst.dev == dev) {
1828 netdev_dbg(dev, "circular route to %pI4\n",
1829 &dst->sin.sin_addr.s_addr);
1830 dev->stats.collisions++;
1834 /* Bypass encapsulation if the destination is local */
1835 if (rt->rt_flags & RTCF_LOCAL &&
1836 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1837 struct vxlan_dev *dst_vxlan;
1840 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1843 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1847 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1848 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1850 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1851 fl4.saddr, dst->sin.sin_addr.s_addr,
1852 tos, ttl, df, src_port, dst_port,
1854 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1858 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1859 #if IS_ENABLED(CONFIG_IPV6)
1861 struct sock *sk = vxlan->vn_sock->sock->sk;
1862 struct dst_entry *ndst;
1866 memset(&fl6, 0, sizeof(fl6));
1867 fl6.flowi6_oif = rdst->remote_ifindex;
1868 fl6.daddr = dst->sin6.sin6_addr;
1869 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
1870 fl6.flowi6_proto = IPPROTO_UDP;
1872 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
1873 netdev_dbg(dev, "no route to %pI6\n",
1874 &dst->sin6.sin6_addr);
1875 dev->stats.tx_carrier_errors++;
1879 if (ndst->dev == dev) {
1880 netdev_dbg(dev, "circular route to %pI6\n",
1881 &dst->sin6.sin6_addr);
1883 dev->stats.collisions++;
1887 /* Bypass encapsulation if the destination is local */
1888 flags = ((struct rt6_info *)ndst)->rt6i_flags;
1889 if (flags & RTF_LOCAL &&
1890 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1891 struct vxlan_dev *dst_vxlan;
1894 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1897 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1901 ttl = ttl ? : ip6_dst_hoplimit(ndst);
1903 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1904 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1905 src_port, dst_port, htonl(vni << 8),
1906 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1913 dev->stats.tx_dropped++;
1919 dev->stats.tx_errors++;
1924 /* Transmit local packets over Vxlan
1926 * Outer IP header inherits ECN and DF from inner header.
1927 * Outer UDP destination is the VXLAN assigned port.
1928 * source port is based on hash of flow
1930 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1932 struct vxlan_dev *vxlan = netdev_priv(dev);
1934 bool did_rsc = false;
1935 struct vxlan_rdst *rdst, *fdst = NULL;
1936 struct vxlan_fdb *f;
1938 skb_reset_mac_header(skb);
1941 if ((vxlan->flags & VXLAN_F_PROXY)) {
1942 if (ntohs(eth->h_proto) == ETH_P_ARP)
1943 return arp_reduce(dev, skb);
1944 #if IS_ENABLED(CONFIG_IPV6)
1945 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
1946 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
1947 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
1950 msg = (struct nd_msg *)skb_transport_header(skb);
1951 if (msg->icmph.icmp6_code == 0 &&
1952 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
1953 return neigh_reduce(dev, skb);
1958 f = vxlan_find_mac(vxlan, eth->h_dest);
1961 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1962 (ntohs(eth->h_proto) == ETH_P_IP ||
1963 ntohs(eth->h_proto) == ETH_P_IPV6)) {
1964 did_rsc = route_shortcircuit(dev, skb);
1966 f = vxlan_find_mac(vxlan, eth->h_dest);
1970 f = vxlan_find_mac(vxlan, all_zeros_mac);
1972 if ((vxlan->flags & VXLAN_F_L2MISS) &&
1973 !is_multicast_ether_addr(eth->h_dest))
1974 vxlan_fdb_miss(vxlan, eth->h_dest);
1976 dev->stats.tx_dropped++;
1978 return NETDEV_TX_OK;
1982 list_for_each_entry_rcu(rdst, &f->remotes, list) {
1983 struct sk_buff *skb1;
1989 skb1 = skb_clone(skb, GFP_ATOMIC);
1991 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1995 vxlan_xmit_one(skb, dev, fdst, did_rsc);
1998 return NETDEV_TX_OK;
2001 /* Walk the forwarding table and purge stale entries */
2002 static void vxlan_cleanup(unsigned long arg)
2004 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2005 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2008 if (!netif_running(vxlan->dev))
2011 spin_lock_bh(&vxlan->hash_lock);
2012 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2013 struct hlist_node *p, *n;
2014 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2016 = container_of(p, struct vxlan_fdb, hlist);
2017 unsigned long timeout;
2019 if (f->state & NUD_PERMANENT)
2022 timeout = f->used + vxlan->age_interval * HZ;
2023 if (time_before_eq(timeout, jiffies)) {
2024 netdev_dbg(vxlan->dev,
2025 "garbage collect %pM\n",
2027 f->state = NUD_STALE;
2028 vxlan_fdb_destroy(vxlan, f);
2029 } else if (time_before(timeout, next_timer))
2030 next_timer = timeout;
2033 spin_unlock_bh(&vxlan->hash_lock);
2035 mod_timer(&vxlan->age_timer, next_timer);
2038 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2040 __u32 vni = vxlan->default_dst.remote_vni;
2042 vxlan->vn_sock = vs;
2043 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2046 /* Setup stats when device is created */
2047 static int vxlan_init(struct net_device *dev)
2049 struct vxlan_dev *vxlan = netdev_priv(dev);
2050 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2051 struct vxlan_sock *vs;
2053 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2057 spin_lock(&vn->sock_lock);
2058 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
2060 /* If we have a socket with same port already, reuse it */
2061 atomic_inc(&vs->refcnt);
2062 vxlan_vs_add_dev(vs, vxlan);
2064 /* otherwise make new socket outside of RTNL */
2066 queue_work(vxlan_wq, &vxlan->sock_work);
2068 spin_unlock(&vn->sock_lock);
2073 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2075 struct vxlan_fdb *f;
2077 spin_lock_bh(&vxlan->hash_lock);
2078 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2080 vxlan_fdb_destroy(vxlan, f);
2081 spin_unlock_bh(&vxlan->hash_lock);
2084 static void vxlan_uninit(struct net_device *dev)
2086 struct vxlan_dev *vxlan = netdev_priv(dev);
2087 struct vxlan_sock *vs = vxlan->vn_sock;
2089 vxlan_fdb_delete_default(vxlan);
2092 vxlan_sock_release(vs);
2093 free_percpu(dev->tstats);
2096 /* Start ageing timer and join group when device is brought up */
2097 static int vxlan_open(struct net_device *dev)
2099 struct vxlan_dev *vxlan = netdev_priv(dev);
2100 struct vxlan_sock *vs = vxlan->vn_sock;
2102 /* socket hasn't been created */
2106 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2107 vxlan_sock_hold(vs);
2109 queue_work(vxlan_wq, &vxlan->igmp_join);
2112 if (vxlan->age_interval)
2113 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2118 /* Purge the forwarding table */
2119 static void vxlan_flush(struct vxlan_dev *vxlan)
2123 spin_lock_bh(&vxlan->hash_lock);
2124 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2125 struct hlist_node *p, *n;
2126 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2128 = container_of(p, struct vxlan_fdb, hlist);
2129 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2130 if (!is_zero_ether_addr(f->eth_addr))
2131 vxlan_fdb_destroy(vxlan, f);
2134 spin_unlock_bh(&vxlan->hash_lock);
2137 /* Cleanup timer and forwarding table on shutdown */
2138 static int vxlan_stop(struct net_device *dev)
2140 struct vxlan_dev *vxlan = netdev_priv(dev);
2141 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2142 struct vxlan_sock *vs = vxlan->vn_sock;
2144 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2145 !vxlan_group_used(vn, vxlan)) {
2146 vxlan_sock_hold(vs);
2148 queue_work(vxlan_wq, &vxlan->igmp_leave);
2151 del_timer_sync(&vxlan->age_timer);
2158 /* Stub, nothing needs to be done. */
2159 static void vxlan_set_multicast_list(struct net_device *dev)
2163 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2165 struct vxlan_dev *vxlan = netdev_priv(dev);
2166 struct vxlan_rdst *dst = &vxlan->default_dst;
2167 struct net_device *lowerdev;
2170 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2171 if (lowerdev == NULL)
2172 return eth_change_mtu(dev, new_mtu);
2174 if (dst->remote_ip.sa.sa_family == AF_INET6)
2175 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2177 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2179 if (new_mtu < 68 || new_mtu > max_mtu)
2186 static const struct net_device_ops vxlan_netdev_ops = {
2187 .ndo_init = vxlan_init,
2188 .ndo_uninit = vxlan_uninit,
2189 .ndo_open = vxlan_open,
2190 .ndo_stop = vxlan_stop,
2191 .ndo_start_xmit = vxlan_xmit,
2192 .ndo_get_stats64 = ip_tunnel_get_stats64,
2193 .ndo_set_rx_mode = vxlan_set_multicast_list,
2194 .ndo_change_mtu = vxlan_change_mtu,
2195 .ndo_validate_addr = eth_validate_addr,
2196 .ndo_set_mac_address = eth_mac_addr,
2197 .ndo_fdb_add = vxlan_fdb_add,
2198 .ndo_fdb_del = vxlan_fdb_delete,
2199 .ndo_fdb_dump = vxlan_fdb_dump,
2202 /* Info for udev, that this is a virtual tunnel endpoint */
2203 static struct device_type vxlan_type = {
2207 /* Calls the ndo_add_vxlan_port of the caller in order to
2208 * supply the listening VXLAN udp ports. Callers are expected
2209 * to implement the ndo_add_vxlan_port.
2211 void vxlan_get_rx_port(struct net_device *dev)
2213 struct vxlan_sock *vs;
2214 struct net *net = dev_net(dev);
2215 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2216 sa_family_t sa_family;
2220 spin_lock(&vn->sock_lock);
2221 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2222 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2223 port = inet_sk(vs->sock->sk)->inet_sport;
2224 sa_family = vs->sock->sk->sk_family;
2225 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2229 spin_unlock(&vn->sock_lock);
2231 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2233 /* Initialize the device structure. */
2234 static void vxlan_setup(struct net_device *dev)
2236 struct vxlan_dev *vxlan = netdev_priv(dev);
2240 eth_hw_addr_random(dev);
2242 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2243 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2245 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2247 dev->netdev_ops = &vxlan_netdev_ops;
2248 dev->destructor = free_netdev;
2249 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2251 dev->tx_queue_len = 0;
2252 dev->features |= NETIF_F_LLTX;
2253 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2254 dev->features |= NETIF_F_RXCSUM;
2255 dev->features |= NETIF_F_GSO_SOFTWARE;
2257 dev->vlan_features = dev->features;
2258 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2259 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2260 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2261 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2262 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
2263 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2265 INIT_LIST_HEAD(&vxlan->next);
2266 spin_lock_init(&vxlan->hash_lock);
2267 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
2268 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
2269 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
2271 init_timer_deferrable(&vxlan->age_timer);
2272 vxlan->age_timer.function = vxlan_cleanup;
2273 vxlan->age_timer.data = (unsigned long) vxlan;
2275 inet_get_local_port_range(dev_net(dev), &low, &high);
2276 vxlan->port_min = low;
2277 vxlan->port_max = high;
2278 vxlan->dst_port = htons(vxlan_port);
2282 for (h = 0; h < FDB_HASH_SIZE; ++h)
2283 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2286 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2287 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2288 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2289 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2290 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2291 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2292 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2293 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2294 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2295 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2296 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2297 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2298 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2299 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2300 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2301 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2302 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2303 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2306 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2308 if (tb[IFLA_ADDRESS]) {
2309 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2310 pr_debug("invalid link address (not ethernet)\n");
2314 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2315 pr_debug("invalid all zero ethernet address\n");
2316 return -EADDRNOTAVAIL;
2323 if (data[IFLA_VXLAN_ID]) {
2324 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2325 if (id >= VXLAN_VID_MASK)
2329 if (data[IFLA_VXLAN_PORT_RANGE]) {
2330 const struct ifla_vxlan_port_range *p
2331 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2333 if (ntohs(p->high) < ntohs(p->low)) {
2334 pr_debug("port range %u .. %u not valid\n",
2335 ntohs(p->low), ntohs(p->high));
2343 static void vxlan_get_drvinfo(struct net_device *netdev,
2344 struct ethtool_drvinfo *drvinfo)
2346 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2347 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2350 static const struct ethtool_ops vxlan_ethtool_ops = {
2351 .get_drvinfo = vxlan_get_drvinfo,
2352 .get_link = ethtool_op_get_link,
2355 static void vxlan_del_work(struct work_struct *work)
2357 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2359 sk_release_kernel(vs->sock->sk);
2363 #if IS_ENABLED(CONFIG_IPV6)
2364 /* Create UDP socket for encapsulation receive. AF_INET6 socket
2365 * could be used for both IPv4 and IPv6 communications, but
2366 * users may set bindv6only=1.
2368 static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2371 struct socket *sock;
2372 struct sockaddr_in6 vxlan_addr = {
2373 .sin6_family = AF_INET6,
2378 rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
2380 pr_debug("UDPv6 socket create failed\n");
2384 /* Put in proper namespace */
2386 sk_change_net(sk, net);
2388 kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
2389 (char *)&val, sizeof(val));
2390 rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
2391 sizeof(struct sockaddr_in6));
2393 pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
2394 &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
2395 sk_release_kernel(sk);
2398 /* At this point, IPv6 module should have been loaded in
2399 * sock_create_kern().
2403 /* Disable multicast loopback */
2404 inet_sk(sk)->mc_loop = 0;
2406 if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
2407 udp_set_no_check6_tx(sk, true);
2409 if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
2410 udp_set_no_check6_rx(sk, true);
2417 static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
2419 return ERR_PTR(-EPFNOSUPPORT);
2423 static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
2426 struct socket *sock;
2427 struct sockaddr_in vxlan_addr = {
2428 .sin_family = AF_INET,
2429 .sin_addr.s_addr = htonl(INADDR_ANY),
2434 /* Create UDP socket for encapsulation receive. */
2435 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
2437 pr_debug("UDP socket create failed\n");
2441 /* Put in proper namespace */
2443 sk_change_net(sk, net);
2445 rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
2446 sizeof(vxlan_addr));
2448 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
2449 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
2450 sk_release_kernel(sk);
2454 /* Disable multicast loopback */
2455 inet_sk(sk)->mc_loop = 0;
2457 if (!(flags & VXLAN_F_UDP_CSUM))
2458 sock->sk->sk_no_check_tx = 1;
2463 /* Create new listen socket if needed */
2464 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2465 vxlan_rcv_t *rcv, void *data,
2468 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2469 struct vxlan_sock *vs;
2470 struct socket *sock;
2473 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2475 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2477 return ERR_PTR(-ENOMEM);
2479 for (h = 0; h < VNI_HASH_SIZE; ++h)
2480 INIT_HLIST_HEAD(&vs->vni_list[h]);
2482 INIT_WORK(&vs->del_work, vxlan_del_work);
2485 sock = create_v6_sock(net, port, flags);
2487 sock = create_v4_sock(net, port, flags);
2490 return ERR_CAST(sock);
2495 atomic_set(&vs->refcnt, 1);
2498 rcu_assign_sk_user_data(vs->sock->sk, vs);
2500 /* Initialize the vxlan udp offloads structure */
2501 vs->udp_offloads.port = port;
2502 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2503 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2505 spin_lock(&vn->sock_lock);
2506 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2507 vxlan_notify_add_rx_port(vs);
2508 spin_unlock(&vn->sock_lock);
2510 /* Mark socket as an encapsulation socket. */
2511 udp_sk(sk)->encap_type = 1;
2512 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
2513 #if IS_ENABLED(CONFIG_IPV6)
2515 ipv6_stub->udpv6_encap_enable();
2523 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2524 vxlan_rcv_t *rcv, void *data,
2525 bool no_share, u32 flags)
2527 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2528 struct vxlan_sock *vs;
2530 vs = vxlan_socket_create(net, port, rcv, data, flags);
2534 if (no_share) /* Return error if sharing is not allowed. */
2537 spin_lock(&vn->sock_lock);
2538 vs = vxlan_find_sock(net, port);
2541 atomic_inc(&vs->refcnt);
2543 vs = ERR_PTR(-EBUSY);
2545 spin_unlock(&vn->sock_lock);
2548 vs = ERR_PTR(-EINVAL);
2552 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2554 /* Scheduled at device creation to bind to a socket */
2555 static void vxlan_sock_work(struct work_struct *work)
2557 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2558 struct net *net = vxlan->net;
2559 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2560 __be16 port = vxlan->dst_port;
2561 struct vxlan_sock *nvs;
2563 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2564 spin_lock(&vn->sock_lock);
2566 vxlan_vs_add_dev(nvs, vxlan);
2567 spin_unlock(&vn->sock_lock);
2569 dev_put(vxlan->dev);
2572 static int vxlan_newlink(struct net *net, struct net_device *dev,
2573 struct nlattr *tb[], struct nlattr *data[])
2575 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2576 struct vxlan_dev *vxlan = netdev_priv(dev);
2577 struct vxlan_rdst *dst = &vxlan->default_dst;
2580 bool use_ipv6 = false;
2582 if (!data[IFLA_VXLAN_ID])
2585 vxlan->net = dev_net(dev);
2587 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2588 dst->remote_vni = vni;
2590 /* Unless IPv6 is explicitly requested, assume IPv4 */
2591 dst->remote_ip.sa.sa_family = AF_INET;
2592 if (data[IFLA_VXLAN_GROUP]) {
2593 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2594 } else if (data[IFLA_VXLAN_GROUP6]) {
2595 if (!IS_ENABLED(CONFIG_IPV6))
2596 return -EPFNOSUPPORT;
2598 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2599 sizeof(struct in6_addr));
2600 dst->remote_ip.sa.sa_family = AF_INET6;
2604 if (data[IFLA_VXLAN_LOCAL]) {
2605 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2606 vxlan->saddr.sa.sa_family = AF_INET;
2607 } else if (data[IFLA_VXLAN_LOCAL6]) {
2608 if (!IS_ENABLED(CONFIG_IPV6))
2609 return -EPFNOSUPPORT;
2611 /* TODO: respect scope id */
2612 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2613 sizeof(struct in6_addr));
2614 vxlan->saddr.sa.sa_family = AF_INET6;
2618 if (data[IFLA_VXLAN_LINK] &&
2619 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2620 struct net_device *lowerdev
2621 = __dev_get_by_index(net, dst->remote_ifindex);
2624 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2628 #if IS_ENABLED(CONFIG_IPV6)
2630 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2631 if (idev && idev->cnf.disable_ipv6) {
2632 pr_info("IPv6 is disabled via sysctl\n");
2635 vxlan->flags |= VXLAN_F_IPV6;
2640 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2642 dev->needed_headroom = lowerdev->hard_header_len +
2643 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2644 } else if (use_ipv6)
2645 vxlan->flags |= VXLAN_F_IPV6;
2647 if (data[IFLA_VXLAN_TOS])
2648 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2650 if (data[IFLA_VXLAN_TTL])
2651 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2653 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2654 vxlan->flags |= VXLAN_F_LEARN;
2656 if (data[IFLA_VXLAN_AGEING])
2657 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2659 vxlan->age_interval = FDB_AGE_DEFAULT;
2661 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2662 vxlan->flags |= VXLAN_F_PROXY;
2664 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2665 vxlan->flags |= VXLAN_F_RSC;
2667 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2668 vxlan->flags |= VXLAN_F_L2MISS;
2670 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2671 vxlan->flags |= VXLAN_F_L3MISS;
2673 if (data[IFLA_VXLAN_LIMIT])
2674 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2676 if (data[IFLA_VXLAN_PORT_RANGE]) {
2677 const struct ifla_vxlan_port_range *p
2678 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2679 vxlan->port_min = ntohs(p->low);
2680 vxlan->port_max = ntohs(p->high);
2683 if (data[IFLA_VXLAN_PORT])
2684 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2686 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2687 vxlan->flags |= VXLAN_F_UDP_CSUM;
2689 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2690 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2691 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2693 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2694 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2695 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2697 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
2698 pr_info("duplicate VNI %u\n", vni);
2702 dev->ethtool_ops = &vxlan_ethtool_ops;
2704 /* create an fdb entry for a valid default destination */
2705 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2706 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2707 &vxlan->default_dst.remote_ip,
2708 NUD_REACHABLE|NUD_PERMANENT,
2709 NLM_F_EXCL|NLM_F_CREATE,
2711 vxlan->default_dst.remote_vni,
2712 vxlan->default_dst.remote_ifindex,
2718 err = register_netdevice(dev);
2720 vxlan_fdb_delete_default(vxlan);
2724 list_add(&vxlan->next, &vn->vxlan_list);
2729 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2731 struct vxlan_dev *vxlan = netdev_priv(dev);
2732 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2734 spin_lock(&vn->sock_lock);
2735 if (!hlist_unhashed(&vxlan->hlist))
2736 hlist_del_rcu(&vxlan->hlist);
2737 spin_unlock(&vn->sock_lock);
2739 list_del(&vxlan->next);
2740 unregister_netdevice_queue(dev, head);
2743 static size_t vxlan_get_size(const struct net_device *dev)
2746 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2747 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2748 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2749 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2750 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2751 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2752 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2753 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2754 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2755 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2756 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2757 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2758 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2759 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2760 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2761 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2762 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2763 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2767 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2769 const struct vxlan_dev *vxlan = netdev_priv(dev);
2770 const struct vxlan_rdst *dst = &vxlan->default_dst;
2771 struct ifla_vxlan_port_range ports = {
2772 .low = htons(vxlan->port_min),
2773 .high = htons(vxlan->port_max),
2776 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2777 goto nla_put_failure;
2779 if (!vxlan_addr_any(&dst->remote_ip)) {
2780 if (dst->remote_ip.sa.sa_family == AF_INET) {
2781 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2782 dst->remote_ip.sin.sin_addr.s_addr))
2783 goto nla_put_failure;
2784 #if IS_ENABLED(CONFIG_IPV6)
2786 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2787 &dst->remote_ip.sin6.sin6_addr))
2788 goto nla_put_failure;
2793 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2794 goto nla_put_failure;
2796 if (!vxlan_addr_any(&vxlan->saddr)) {
2797 if (vxlan->saddr.sa.sa_family == AF_INET) {
2798 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2799 vxlan->saddr.sin.sin_addr.s_addr))
2800 goto nla_put_failure;
2801 #if IS_ENABLED(CONFIG_IPV6)
2803 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2804 &vxlan->saddr.sin6.sin6_addr))
2805 goto nla_put_failure;
2810 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2811 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2812 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2813 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2814 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2815 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2816 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2817 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2818 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2819 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2820 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2821 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2822 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2823 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2824 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2825 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2826 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2827 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2828 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2829 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
2830 goto nla_put_failure;
2832 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2833 goto nla_put_failure;
2841 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2843 .maxtype = IFLA_VXLAN_MAX,
2844 .policy = vxlan_policy,
2845 .priv_size = sizeof(struct vxlan_dev),
2846 .setup = vxlan_setup,
2847 .validate = vxlan_validate,
2848 .newlink = vxlan_newlink,
2849 .dellink = vxlan_dellink,
2850 .get_size = vxlan_get_size,
2851 .fill_info = vxlan_fill_info,
2854 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2855 struct net_device *dev)
2857 struct vxlan_dev *vxlan, *next;
2858 LIST_HEAD(list_kill);
2860 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2861 struct vxlan_rdst *dst = &vxlan->default_dst;
2863 /* In case we created vxlan device with carrier
2864 * and we loose the carrier due to module unload
2865 * we also need to remove vxlan device. In other
2866 * cases, it's not necessary and remote_ifindex
2867 * is 0 here, so no matches.
2869 if (dst->remote_ifindex == dev->ifindex)
2870 vxlan_dellink(vxlan->dev, &list_kill);
2873 unregister_netdevice_many(&list_kill);
2876 static int vxlan_lowerdev_event(struct notifier_block *unused,
2877 unsigned long event, void *ptr)
2879 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2880 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2882 if (event == NETDEV_UNREGISTER)
2883 vxlan_handle_lowerdev_unregister(vn, dev);
2888 static struct notifier_block vxlan_notifier_block __read_mostly = {
2889 .notifier_call = vxlan_lowerdev_event,
2892 static __net_init int vxlan_init_net(struct net *net)
2894 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2897 INIT_LIST_HEAD(&vn->vxlan_list);
2898 spin_lock_init(&vn->sock_lock);
2900 for (h = 0; h < PORT_HASH_SIZE; ++h)
2901 INIT_HLIST_HEAD(&vn->sock_list[h]);
2906 static void __net_exit vxlan_exit_net(struct net *net)
2908 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2909 struct vxlan_dev *vxlan, *next;
2910 struct net_device *dev, *aux;
2914 for_each_netdev_safe(net, dev, aux)
2915 if (dev->rtnl_link_ops == &vxlan_link_ops)
2916 unregister_netdevice_queue(dev, &list);
2918 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2919 /* If vxlan->dev is in the same netns, it has already been added
2920 * to the list by the previous loop.
2922 if (!net_eq(dev_net(vxlan->dev), net))
2923 unregister_netdevice_queue(dev, &list);
2926 unregister_netdevice_many(&list);
2930 static struct pernet_operations vxlan_net_ops = {
2931 .init = vxlan_init_net,
2932 .exit = vxlan_exit_net,
2933 .id = &vxlan_net_id,
2934 .size = sizeof(struct vxlan_net),
2937 static int __init vxlan_init_module(void)
2941 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
2945 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
2947 rc = register_pernet_subsys(&vxlan_net_ops);
2951 rc = register_netdevice_notifier(&vxlan_notifier_block);
2955 rc = rtnl_link_register(&vxlan_link_ops);
2961 unregister_netdevice_notifier(&vxlan_notifier_block);
2963 unregister_pernet_subsys(&vxlan_net_ops);
2965 destroy_workqueue(vxlan_wq);
2968 late_initcall(vxlan_init_module);
2970 static void __exit vxlan_cleanup_module(void)
2972 rtnl_link_unregister(&vxlan_link_ops);
2973 unregister_netdevice_notifier(&vxlan_notifier_block);
2974 destroy_workqueue(vxlan_wq);
2975 unregister_pernet_subsys(&vxlan_net_ops);
2976 /* rcu_barrier() is called by netns */
2978 module_exit(vxlan_cleanup_module);
2980 MODULE_LICENSE("GPL");
2981 MODULE_VERSION(VXLAN_VERSION);
2982 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
2983 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
2984 MODULE_ALIAS_RTNL_LINK("vxlan");