2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
45 #include <net/udp_tunnel.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
53 #define VXLAN_VERSION "0.1"
55 #define PORT_HASH_BITS 8
56 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
57 #define VNI_HASH_BITS 10
58 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
59 #define FDB_HASH_BITS 8
60 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
61 #define FDB_AGE_DEFAULT 300 /* 5 min */
62 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
64 #define VXLAN_N_VID (1u << 24)
65 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
66 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
68 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
70 /* VXLAN protocol header */
76 /* UDP port for VXLAN traffic.
77 * The IANA assigned port is 4789, but the Linux default is 8472
78 * for compatibility with early adopters.
80 static unsigned short vxlan_port __read_mostly = 8472;
81 module_param_named(udp_port, vxlan_port, ushort, 0444);
82 MODULE_PARM_DESC(udp_port, "Destination UDP port");
84 static bool log_ecn_error = true;
85 module_param(log_ecn_error, bool, 0644);
86 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
88 static int vxlan_net_id;
90 static const u8 all_zeros_mac[ETH_ALEN];
92 /* per-network namespace private data for this module */
94 struct list_head vxlan_list;
95 struct hlist_head sock_list[PORT_HASH_SIZE];
100 struct sockaddr_in sin;
101 struct sockaddr_in6 sin6;
106 union vxlan_addr remote_ip;
110 struct list_head list;
114 /* Forwarding table entry */
116 struct hlist_node hlist; /* linked list of entries */
118 unsigned long updated; /* jiffies */
120 struct list_head remotes;
121 u16 state; /* see ndm_state */
122 u8 flags; /* see ndm_flags */
123 u8 eth_addr[ETH_ALEN];
126 /* Pseudo network device */
128 struct hlist_node hlist; /* vni hash table */
129 struct list_head next; /* vxlan's per namespace list */
130 struct vxlan_sock *vn_sock; /* listening socket */
131 struct net_device *dev;
132 struct net *net; /* netns for packet i/o */
133 struct vxlan_rdst default_dst; /* default destination */
134 union vxlan_addr saddr; /* source address */
136 __u16 port_min; /* source port range */
138 __u8 tos; /* TOS override */
140 u32 flags; /* VXLAN_F_* in vxlan.h */
142 struct work_struct sock_work;
143 struct work_struct igmp_join;
144 struct work_struct igmp_leave;
146 unsigned long age_interval;
147 struct timer_list age_timer;
148 spinlock_t hash_lock;
149 unsigned int addrcnt;
150 unsigned int addrmax;
152 struct hlist_head fdb_head[FDB_HASH_SIZE];
155 /* salt for hash table */
156 static u32 vxlan_salt __read_mostly;
157 static struct workqueue_struct *vxlan_wq;
159 static void vxlan_sock_work(struct work_struct *work);
161 #if IS_ENABLED(CONFIG_IPV6)
163 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
165 if (a->sa.sa_family != b->sa.sa_family)
167 if (a->sa.sa_family == AF_INET6)
168 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
170 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
173 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
175 if (ipa->sa.sa_family == AF_INET6)
176 return ipv6_addr_any(&ipa->sin6.sin6_addr);
178 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
181 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
183 if (ipa->sa.sa_family == AF_INET6)
184 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
186 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
189 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
191 if (nla_len(nla) >= sizeof(struct in6_addr)) {
192 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
193 ip->sa.sa_family = AF_INET6;
195 } else if (nla_len(nla) >= sizeof(__be32)) {
196 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
197 ip->sa.sa_family = AF_INET;
200 return -EAFNOSUPPORT;
204 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
205 const union vxlan_addr *ip)
207 if (ip->sa.sa_family == AF_INET6)
208 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
210 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
213 #else /* !CONFIG_IPV6 */
216 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
218 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
221 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
223 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
226 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
228 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
231 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
233 if (nla_len(nla) >= sizeof(struct in6_addr)) {
234 return -EAFNOSUPPORT;
235 } else if (nla_len(nla) >= sizeof(__be32)) {
236 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
237 ip->sa.sa_family = AF_INET;
240 return -EAFNOSUPPORT;
244 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
245 const union vxlan_addr *ip)
247 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
251 /* Virtual Network hash table head */
252 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
254 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
257 /* Socket hash table head */
258 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
260 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
262 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
265 /* First remote destination for a forwarding entry.
266 * Guaranteed to be non-NULL because remotes are never deleted.
268 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
270 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
273 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
278 /* Find VXLAN socket based on network namespace and UDP port */
279 static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
281 struct vxlan_sock *vs;
283 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
284 if (inet_sk(vs->sock->sk)->inet_sport == port)
290 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
292 struct vxlan_dev *vxlan;
294 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
295 if (vxlan->default_dst.remote_vni == id)
302 /* Look up VNI in a per net namespace table */
303 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
305 struct vxlan_sock *vs;
307 vs = vxlan_find_sock(net, port);
311 return vxlan_vs_find_vni(vs, id);
314 /* Fill in neighbour message in skbuff. */
315 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
316 const struct vxlan_fdb *fdb,
317 u32 portid, u32 seq, int type, unsigned int flags,
318 const struct vxlan_rdst *rdst)
320 unsigned long now = jiffies;
321 struct nda_cacheinfo ci;
322 struct nlmsghdr *nlh;
324 bool send_ip, send_eth;
326 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
330 ndm = nlmsg_data(nlh);
331 memset(ndm, 0, sizeof(*ndm));
333 send_eth = send_ip = true;
335 if (type == RTM_GETNEIGH) {
336 ndm->ndm_family = AF_INET;
337 send_ip = !vxlan_addr_any(&rdst->remote_ip);
338 send_eth = !is_zero_ether_addr(fdb->eth_addr);
340 ndm->ndm_family = AF_BRIDGE;
341 ndm->ndm_state = fdb->state;
342 ndm->ndm_ifindex = vxlan->dev->ifindex;
343 ndm->ndm_flags = fdb->flags;
344 ndm->ndm_type = RTN_UNICAST;
346 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
347 goto nla_put_failure;
349 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
350 goto nla_put_failure;
352 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
353 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
354 goto nla_put_failure;
355 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
356 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
357 goto nla_put_failure;
358 if (rdst->remote_ifindex &&
359 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
360 goto nla_put_failure;
362 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
363 ci.ndm_confirmed = 0;
364 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
367 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
368 goto nla_put_failure;
370 return nlmsg_end(skb, nlh);
373 nlmsg_cancel(skb, nlh);
377 static inline size_t vxlan_nlmsg_size(void)
379 return NLMSG_ALIGN(sizeof(struct ndmsg))
380 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
381 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
382 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
383 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
384 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
385 + nla_total_size(sizeof(struct nda_cacheinfo));
388 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
389 struct vxlan_rdst *rd, int type)
391 struct net *net = dev_net(vxlan->dev);
395 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
399 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
401 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
402 WARN_ON(err == -EMSGSIZE);
407 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
411 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
414 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
416 struct vxlan_dev *vxlan = netdev_priv(dev);
417 struct vxlan_fdb f = {
420 struct vxlan_rdst remote = {
421 .remote_ip = *ipa, /* goes to NDA_DST */
422 .remote_vni = VXLAN_N_VID,
425 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
428 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
430 struct vxlan_fdb f = {
433 struct vxlan_rdst remote = { };
435 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
437 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
440 /* Hash Ethernet address */
441 static u32 eth_hash(const unsigned char *addr)
443 u64 value = get_unaligned((u64 *)addr);
445 /* only want 6 bytes */
451 return hash_64(value, FDB_HASH_BITS);
454 /* Hash chain to use given mac address */
455 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
458 return &vxlan->fdb_head[eth_hash(mac)];
461 /* Look up Ethernet address in forwarding table */
462 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
465 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
468 hlist_for_each_entry_rcu(f, head, hlist) {
469 if (ether_addr_equal(mac, f->eth_addr))
476 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
481 f = __vxlan_find_mac(vxlan, mac);
488 /* caller should hold vxlan->hash_lock */
489 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
490 union vxlan_addr *ip, __be16 port,
491 __u32 vni, __u32 ifindex)
493 struct vxlan_rdst *rd;
495 list_for_each_entry(rd, &f->remotes, list) {
496 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
497 rd->remote_port == port &&
498 rd->remote_vni == vni &&
499 rd->remote_ifindex == ifindex)
506 /* Replace destination of unicast mac */
507 static int vxlan_fdb_replace(struct vxlan_fdb *f,
508 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
510 struct vxlan_rdst *rd;
512 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
516 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
520 rd->remote_port = port;
521 rd->remote_vni = vni;
522 rd->remote_ifindex = ifindex;
526 /* Add/update destinations for multicast */
527 static int vxlan_fdb_append(struct vxlan_fdb *f,
528 union vxlan_addr *ip, __be16 port, __u32 vni,
529 __u32 ifindex, struct vxlan_rdst **rdp)
531 struct vxlan_rdst *rd;
533 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
537 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
541 rd->remote_port = port;
542 rd->remote_vni = vni;
543 rd->remote_ifindex = ifindex;
545 list_add_tail_rcu(&rd->list, &f->remotes);
551 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
553 struct sk_buff *p, **pp = NULL;
554 struct vxlanhdr *vh, *vh2;
555 struct ethhdr *eh, *eh2;
556 unsigned int hlen, off_vx, off_eth;
557 const struct packet_offload *ptype;
561 off_vx = skb_gro_offset(skb);
562 hlen = off_vx + sizeof(*vh);
563 vh = skb_gro_header_fast(skb, off_vx);
564 if (skb_gro_header_hard(skb, hlen)) {
565 vh = skb_gro_header_slow(skb, hlen, off_vx);
569 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
570 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
572 off_eth = skb_gro_offset(skb);
573 hlen = off_eth + sizeof(*eh);
574 eh = skb_gro_header_fast(skb, off_eth);
575 if (skb_gro_header_hard(skb, hlen)) {
576 eh = skb_gro_header_slow(skb, hlen, off_eth);
583 for (p = *head; p; p = p->next) {
584 if (!NAPI_GRO_CB(p)->same_flow)
587 vh2 = (struct vxlanhdr *)(p->data + off_vx);
588 eh2 = (struct ethhdr *)(p->data + off_eth);
589 if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
590 NAPI_GRO_CB(p)->same_flow = 0;
598 ptype = gro_find_receive_by_type(type);
604 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
605 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
606 pp = ptype->callbacks.gro_receive(head, skb);
611 NAPI_GRO_CB(skb)->flush |= flush;
616 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
619 struct packet_offload *ptype;
621 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
624 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
628 ptype = gro_find_complete_by_type(type);
630 err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
636 /* Notify netdevs that UDP port started listening */
637 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
639 struct net_device *dev;
640 struct sock *sk = vs->sock->sk;
641 struct net *net = sock_net(sk);
642 sa_family_t sa_family = sk->sk_family;
643 __be16 port = inet_sk(sk)->inet_sport;
646 if (sa_family == AF_INET) {
647 err = udp_add_offload(&vs->udp_offloads);
649 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
653 for_each_netdev_rcu(net, dev) {
654 if (dev->netdev_ops->ndo_add_vxlan_port)
655 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
661 /* Notify netdevs that UDP port is no more listening */
662 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
664 struct net_device *dev;
665 struct sock *sk = vs->sock->sk;
666 struct net *net = sock_net(sk);
667 sa_family_t sa_family = sk->sk_family;
668 __be16 port = inet_sk(sk)->inet_sport;
671 for_each_netdev_rcu(net, dev) {
672 if (dev->netdev_ops->ndo_del_vxlan_port)
673 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
678 if (sa_family == AF_INET)
679 udp_del_offload(&vs->udp_offloads);
682 /* Add new entry to forwarding table -- assumes lock held */
683 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
684 const u8 *mac, union vxlan_addr *ip,
685 __u16 state, __u16 flags,
686 __be16 port, __u32 vni, __u32 ifindex,
689 struct vxlan_rdst *rd = NULL;
693 f = __vxlan_find_mac(vxlan, mac);
695 if (flags & NLM_F_EXCL) {
696 netdev_dbg(vxlan->dev,
697 "lost race to create %pM\n", mac);
700 if (f->state != state) {
702 f->updated = jiffies;
705 if (f->flags != ndm_flags) {
706 f->flags = ndm_flags;
707 f->updated = jiffies;
710 if ((flags & NLM_F_REPLACE)) {
711 /* Only change unicasts */
712 if (!(is_multicast_ether_addr(f->eth_addr) ||
713 is_zero_ether_addr(f->eth_addr))) {
714 int rc = vxlan_fdb_replace(f, ip, port, vni,
723 if ((flags & NLM_F_APPEND) &&
724 (is_multicast_ether_addr(f->eth_addr) ||
725 is_zero_ether_addr(f->eth_addr))) {
726 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
734 if (!(flags & NLM_F_CREATE))
737 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
740 /* Disallow replace to add a multicast entry */
741 if ((flags & NLM_F_REPLACE) &&
742 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
745 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
746 f = kmalloc(sizeof(*f), GFP_ATOMIC);
752 f->flags = ndm_flags;
753 f->updated = f->used = jiffies;
754 INIT_LIST_HEAD(&f->remotes);
755 memcpy(f->eth_addr, mac, ETH_ALEN);
757 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
760 hlist_add_head_rcu(&f->hlist,
761 vxlan_fdb_head(vxlan, mac));
766 rd = first_remote_rtnl(f);
767 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
773 static void vxlan_fdb_free(struct rcu_head *head)
775 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
776 struct vxlan_rdst *rd, *nd;
778 list_for_each_entry_safe(rd, nd, &f->remotes, list)
783 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
785 netdev_dbg(vxlan->dev,
786 "delete %pM\n", f->eth_addr);
789 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
791 hlist_del_rcu(&f->hlist);
792 call_rcu(&f->rcu, vxlan_fdb_free);
795 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
796 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
798 struct net *net = dev_net(vxlan->dev);
802 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
806 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
807 if (remote->sa.sa_family == AF_INET) {
808 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
809 ip->sa.sa_family = AF_INET;
810 #if IS_ENABLED(CONFIG_IPV6)
812 ip->sin6.sin6_addr = in6addr_any;
813 ip->sa.sa_family = AF_INET6;
819 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
821 *port = nla_get_be16(tb[NDA_PORT]);
823 *port = vxlan->dst_port;
827 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
829 *vni = nla_get_u32(tb[NDA_VNI]);
831 *vni = vxlan->default_dst.remote_vni;
834 if (tb[NDA_IFINDEX]) {
835 struct net_device *tdev;
837 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
839 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
840 tdev = __dev_get_by_index(net, *ifindex);
842 return -EADDRNOTAVAIL;
850 /* Add static entry (via netlink) */
851 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
852 struct net_device *dev,
853 const unsigned char *addr, u16 flags)
855 struct vxlan_dev *vxlan = netdev_priv(dev);
856 /* struct net *net = dev_net(vxlan->dev); */
862 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
863 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
868 if (tb[NDA_DST] == NULL)
871 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
875 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
876 return -EAFNOSUPPORT;
878 spin_lock_bh(&vxlan->hash_lock);
879 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
880 port, vni, ifindex, ndm->ndm_flags);
881 spin_unlock_bh(&vxlan->hash_lock);
886 /* Delete entry (via netlink) */
887 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
888 struct net_device *dev,
889 const unsigned char *addr)
891 struct vxlan_dev *vxlan = netdev_priv(dev);
893 struct vxlan_rdst *rd = NULL;
899 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
905 spin_lock_bh(&vxlan->hash_lock);
906 f = vxlan_find_mac(vxlan, addr);
910 if (!vxlan_addr_any(&ip)) {
911 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
918 /* remove a destination if it's not the only one on the list,
919 * otherwise destroy the fdb entry
921 if (rd && !list_is_singular(&f->remotes)) {
922 list_del_rcu(&rd->list);
923 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
928 vxlan_fdb_destroy(vxlan, f);
931 spin_unlock_bh(&vxlan->hash_lock);
936 /* Dump forwarding table */
937 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
938 struct net_device *dev,
939 struct net_device *filter_dev, int idx)
941 struct vxlan_dev *vxlan = netdev_priv(dev);
944 for (h = 0; h < FDB_HASH_SIZE; ++h) {
948 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
949 struct vxlan_rdst *rd;
951 if (idx < cb->args[0])
954 list_for_each_entry_rcu(rd, &f->remotes, list) {
955 err = vxlan_fdb_info(skb, vxlan, f,
956 NETLINK_CB(cb->skb).portid,
971 /* Watch incoming packets to learn mapping between Ethernet address
972 * and Tunnel endpoint.
973 * Return true if packet is bogus and should be droppped.
975 static bool vxlan_snoop(struct net_device *dev,
976 union vxlan_addr *src_ip, const u8 *src_mac)
978 struct vxlan_dev *vxlan = netdev_priv(dev);
981 f = vxlan_find_mac(vxlan, src_mac);
983 struct vxlan_rdst *rdst = first_remote_rcu(f);
985 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
988 /* Don't migrate static entries, drop packets */
989 if (f->state & NUD_NOARP)
994 "%pM migrated from %pIS to %pIS\n",
995 src_mac, &rdst->remote_ip, &src_ip);
997 rdst->remote_ip = *src_ip;
998 f->updated = jiffies;
999 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1001 /* learned new entry */
1002 spin_lock(&vxlan->hash_lock);
1004 /* close off race between vxlan_flush and incoming packets */
1005 if (netif_running(dev))
1006 vxlan_fdb_create(vxlan, src_mac, src_ip,
1008 NLM_F_EXCL|NLM_F_CREATE,
1010 vxlan->default_dst.remote_vni,
1012 spin_unlock(&vxlan->hash_lock);
1018 /* See if multicast group is already in use by other ID */
1019 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1021 struct vxlan_dev *vxlan;
1023 /* The vxlan_sock is only used by dev, leaving group has
1024 * no effect on other vxlan devices.
1026 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1029 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1030 if (!netif_running(vxlan->dev) || vxlan == dev)
1033 if (vxlan->vn_sock != dev->vn_sock)
1036 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1037 &dev->default_dst.remote_ip))
1040 if (vxlan->default_dst.remote_ifindex !=
1041 dev->default_dst.remote_ifindex)
1050 static void vxlan_sock_hold(struct vxlan_sock *vs)
1052 atomic_inc(&vs->refcnt);
1055 void vxlan_sock_release(struct vxlan_sock *vs)
1057 struct sock *sk = vs->sock->sk;
1058 struct net *net = sock_net(sk);
1059 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1061 if (!atomic_dec_and_test(&vs->refcnt))
1064 spin_lock(&vn->sock_lock);
1065 hlist_del_rcu(&vs->hlist);
1066 vxlan_notify_del_rx_port(vs);
1067 spin_unlock(&vn->sock_lock);
1069 queue_work(vxlan_wq, &vs->del_work);
1071 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1073 /* Callback to update multicast group membership when first VNI on
1074 * multicast asddress is brought up
1075 * Done as workqueue because ip_mc_join_group acquires RTNL.
1077 static void vxlan_igmp_join(struct work_struct *work)
1079 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
1080 struct vxlan_sock *vs = vxlan->vn_sock;
1081 struct sock *sk = vs->sock->sk;
1082 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1083 int ifindex = vxlan->default_dst.remote_ifindex;
1086 if (ip->sa.sa_family == AF_INET) {
1087 struct ip_mreqn mreq = {
1088 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1089 .imr_ifindex = ifindex,
1092 ip_mc_join_group(sk, &mreq);
1093 #if IS_ENABLED(CONFIG_IPV6)
1095 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1096 &ip->sin6.sin6_addr);
1101 vxlan_sock_release(vs);
1102 dev_put(vxlan->dev);
1105 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1106 static void vxlan_igmp_leave(struct work_struct *work)
1108 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
1109 struct vxlan_sock *vs = vxlan->vn_sock;
1110 struct sock *sk = vs->sock->sk;
1111 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1112 int ifindex = vxlan->default_dst.remote_ifindex;
1115 if (ip->sa.sa_family == AF_INET) {
1116 struct ip_mreqn mreq = {
1117 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1118 .imr_ifindex = ifindex,
1121 ip_mc_leave_group(sk, &mreq);
1122 #if IS_ENABLED(CONFIG_IPV6)
1124 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1125 &ip->sin6.sin6_addr);
1131 vxlan_sock_release(vs);
1132 dev_put(vxlan->dev);
1135 /* Callback from net/ipv4/udp.c to receive packets */
1136 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1138 struct vxlan_sock *vs;
1139 struct vxlanhdr *vxh;
1141 /* Need Vxlan and inner Ethernet header to be present */
1142 if (!pskb_may_pull(skb, VXLAN_HLEN))
1145 /* Return packets with reserved bits set */
1146 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1147 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
1148 (vxh->vx_vni & htonl(0xff))) {
1149 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1150 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1154 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1157 vs = rcu_dereference_sk_user_data(sk);
1161 vs->rcv(vs, skb, vxh->vx_vni);
1165 /* Consume bad packet */
1170 /* Return non vxlan pkt */
1174 static void vxlan_rcv(struct vxlan_sock *vs,
1175 struct sk_buff *skb, __be32 vx_vni)
1177 struct iphdr *oip = NULL;
1178 struct ipv6hdr *oip6 = NULL;
1179 struct vxlan_dev *vxlan;
1180 struct pcpu_sw_netstats *stats;
1181 union vxlan_addr saddr;
1184 union vxlan_addr *remote_ip;
1186 vni = ntohl(vx_vni) >> 8;
1187 /* Is this VNI defined? */
1188 vxlan = vxlan_vs_find_vni(vs, vni);
1192 remote_ip = &vxlan->default_dst.remote_ip;
1193 skb_reset_mac_header(skb);
1194 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1195 skb->protocol = eth_type_trans(skb, vxlan->dev);
1196 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1198 /* Ignore packet loops (and multicast echo) */
1199 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1202 /* Re-examine inner Ethernet packet */
1203 if (remote_ip->sa.sa_family == AF_INET) {
1205 saddr.sin.sin_addr.s_addr = oip->saddr;
1206 saddr.sa.sa_family = AF_INET;
1207 #if IS_ENABLED(CONFIG_IPV6)
1209 oip6 = ipv6_hdr(skb);
1210 saddr.sin6.sin6_addr = oip6->saddr;
1211 saddr.sa.sa_family = AF_INET6;
1215 if ((vxlan->flags & VXLAN_F_LEARN) &&
1216 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1219 skb_reset_network_header(skb);
1222 err = IP6_ECN_decapsulate(oip6, skb);
1224 err = IP_ECN_decapsulate(oip, skb);
1226 if (unlikely(err)) {
1227 if (log_ecn_error) {
1229 net_info_ratelimited("non-ECT from %pI6\n",
1232 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1233 &oip->saddr, oip->tos);
1236 ++vxlan->dev->stats.rx_frame_errors;
1237 ++vxlan->dev->stats.rx_errors;
1242 stats = this_cpu_ptr(vxlan->dev->tstats);
1243 u64_stats_update_begin(&stats->syncp);
1244 stats->rx_packets++;
1245 stats->rx_bytes += skb->len;
1246 u64_stats_update_end(&stats->syncp);
1252 /* Consume bad packet */
1256 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1258 struct vxlan_dev *vxlan = netdev_priv(dev);
1259 struct arphdr *parp;
1262 struct neighbour *n;
1264 if (dev->flags & IFF_NOARP)
1267 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1268 dev->stats.tx_dropped++;
1271 parp = arp_hdr(skb);
1273 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1274 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1275 parp->ar_pro != htons(ETH_P_IP) ||
1276 parp->ar_op != htons(ARPOP_REQUEST) ||
1277 parp->ar_hln != dev->addr_len ||
1280 arpptr = (u8 *)parp + sizeof(struct arphdr);
1282 arpptr += dev->addr_len; /* sha */
1283 memcpy(&sip, arpptr, sizeof(sip));
1284 arpptr += sizeof(sip);
1285 arpptr += dev->addr_len; /* tha */
1286 memcpy(&tip, arpptr, sizeof(tip));
1288 if (ipv4_is_loopback(tip) ||
1289 ipv4_is_multicast(tip))
1292 n = neigh_lookup(&arp_tbl, &tip, dev);
1295 struct vxlan_fdb *f;
1296 struct sk_buff *reply;
1298 if (!(n->nud_state & NUD_CONNECTED)) {
1303 f = vxlan_find_mac(vxlan, n->ha);
1304 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1305 /* bridge-local neighbor */
1310 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1318 skb_reset_mac_header(reply);
1319 __skb_pull(reply, skb_network_offset(reply));
1320 reply->ip_summed = CHECKSUM_UNNECESSARY;
1321 reply->pkt_type = PACKET_HOST;
1323 if (netif_rx_ni(reply) == NET_RX_DROP)
1324 dev->stats.rx_dropped++;
1325 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1326 union vxlan_addr ipa = {
1327 .sin.sin_addr.s_addr = tip,
1328 .sin.sin_family = AF_INET,
1331 vxlan_ip_miss(dev, &ipa);
1335 return NETDEV_TX_OK;
1338 #if IS_ENABLED(CONFIG_IPV6)
1339 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1340 struct neighbour *n, bool isrouter)
1342 struct net_device *dev = request->dev;
1343 struct sk_buff *reply;
1344 struct nd_msg *ns, *na;
1345 struct ipv6hdr *pip6;
1347 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1354 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1355 sizeof(*na) + na_olen + dev->needed_tailroom;
1356 reply = alloc_skb(len, GFP_ATOMIC);
1360 reply->protocol = htons(ETH_P_IPV6);
1362 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1363 skb_push(reply, sizeof(struct ethhdr));
1364 skb_set_mac_header(reply, 0);
1366 ns = (struct nd_msg *)skb_transport_header(request);
1368 daddr = eth_hdr(request)->h_source;
1369 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1370 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1371 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1372 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1377 /* Ethernet header */
1378 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1379 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1380 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1381 reply->protocol = htons(ETH_P_IPV6);
1383 skb_pull(reply, sizeof(struct ethhdr));
1384 skb_set_network_header(reply, 0);
1385 skb_put(reply, sizeof(struct ipv6hdr));
1389 pip6 = ipv6_hdr(reply);
1390 memset(pip6, 0, sizeof(struct ipv6hdr));
1392 pip6->priority = ipv6_hdr(request)->priority;
1393 pip6->nexthdr = IPPROTO_ICMPV6;
1394 pip6->hop_limit = 255;
1395 pip6->daddr = ipv6_hdr(request)->saddr;
1396 pip6->saddr = *(struct in6_addr *)n->primary_key;
1398 skb_pull(reply, sizeof(struct ipv6hdr));
1399 skb_set_transport_header(reply, 0);
1401 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1403 /* Neighbor Advertisement */
1404 memset(na, 0, sizeof(*na)+na_olen);
1405 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1406 na->icmph.icmp6_router = isrouter;
1407 na->icmph.icmp6_override = 1;
1408 na->icmph.icmp6_solicited = 1;
1409 na->target = ns->target;
1410 ether_addr_copy(&na->opt[2], n->ha);
1411 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1412 na->opt[1] = na_olen >> 3;
1414 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1415 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1416 csum_partial(na, sizeof(*na)+na_olen, 0));
1418 pip6->payload_len = htons(sizeof(*na)+na_olen);
1420 skb_push(reply, sizeof(struct ipv6hdr));
1422 reply->ip_summed = CHECKSUM_UNNECESSARY;
1427 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1429 struct vxlan_dev *vxlan = netdev_priv(dev);
1431 const struct ipv6hdr *iphdr;
1432 const struct in6_addr *saddr, *daddr;
1433 struct neighbour *n;
1434 struct inet6_dev *in6_dev;
1436 in6_dev = __in6_dev_get(dev);
1440 iphdr = ipv6_hdr(skb);
1441 saddr = &iphdr->saddr;
1442 daddr = &iphdr->daddr;
1444 msg = (struct nd_msg *)skb_transport_header(skb);
1445 if (msg->icmph.icmp6_code != 0 ||
1446 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1449 if (ipv6_addr_loopback(daddr) ||
1450 ipv6_addr_is_multicast(&msg->target))
1453 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1456 struct vxlan_fdb *f;
1457 struct sk_buff *reply;
1459 if (!(n->nud_state & NUD_CONNECTED)) {
1464 f = vxlan_find_mac(vxlan, n->ha);
1465 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1466 /* bridge-local neighbor */
1471 reply = vxlan_na_create(skb, n,
1472 !!(f ? f->flags & NTF_ROUTER : 0));
1479 if (netif_rx_ni(reply) == NET_RX_DROP)
1480 dev->stats.rx_dropped++;
1482 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1483 union vxlan_addr ipa = {
1484 .sin6.sin6_addr = msg->target,
1485 .sin6.sin6_family = AF_INET6,
1488 vxlan_ip_miss(dev, &ipa);
1493 return NETDEV_TX_OK;
1497 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1499 struct vxlan_dev *vxlan = netdev_priv(dev);
1500 struct neighbour *n;
1502 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1506 switch (ntohs(eth_hdr(skb)->h_proto)) {
1511 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1514 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1515 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1516 union vxlan_addr ipa = {
1517 .sin.sin_addr.s_addr = pip->daddr,
1518 .sin.sin_family = AF_INET,
1521 vxlan_ip_miss(dev, &ipa);
1527 #if IS_ENABLED(CONFIG_IPV6)
1530 struct ipv6hdr *pip6;
1532 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1534 pip6 = ipv6_hdr(skb);
1535 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1536 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1537 union vxlan_addr ipa = {
1538 .sin6.sin6_addr = pip6->daddr,
1539 .sin6.sin6_family = AF_INET6,
1542 vxlan_ip_miss(dev, &ipa);
1556 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1558 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1560 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1569 #if IS_ENABLED(CONFIG_IPV6)
1570 static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1571 struct dst_entry *dst, struct sk_buff *skb,
1572 struct net_device *dev, struct in6_addr *saddr,
1573 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1574 __be16 src_port, __be16 dst_port, __be32 vni,
1577 struct vxlanhdr *vxh;
1580 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
1582 skb = udp_tunnel_handle_offloads(skb, udp_sum);
1586 skb_scrub_packet(skb, xnet);
1588 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1589 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1590 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1592 /* Need space for new headers (invalidates iph ptr) */
1593 err = skb_cow_head(skb, min_headroom);
1597 if (vlan_tx_tag_present(skb)) {
1598 if (WARN_ON(!__vlan_put_tag(skb,
1600 vlan_tx_tag_get(skb))))
1606 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1607 vxh->vx_flags = htonl(VXLAN_FLAGS);
1610 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1612 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
1613 ttl, src_port, dst_port);
1618 int vxlan_xmit_skb(struct vxlan_sock *vs,
1619 struct rtable *rt, struct sk_buff *skb,
1620 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1621 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
1623 struct vxlanhdr *vxh;
1626 bool udp_sum = !vs->sock->sk->sk_no_check_tx;
1628 skb = udp_tunnel_handle_offloads(skb, udp_sum);
1632 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1633 + VXLAN_HLEN + sizeof(struct iphdr)
1634 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1636 /* Need space for new headers (invalidates iph ptr) */
1637 err = skb_cow_head(skb, min_headroom);
1641 if (vlan_tx_tag_present(skb)) {
1642 if (WARN_ON(!__vlan_put_tag(skb,
1644 vlan_tx_tag_get(skb))))
1650 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1651 vxh->vx_flags = htonl(VXLAN_FLAGS);
1654 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1656 return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
1657 ttl, df, src_port, dst_port, xnet);
1659 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1661 /* Bypass encapsulation if the destination is local */
1662 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1663 struct vxlan_dev *dst_vxlan)
1665 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1666 union vxlan_addr loopback;
1667 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1668 struct net_device *dev = skb->dev;
1671 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1672 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1673 skb->pkt_type = PACKET_HOST;
1674 skb->encapsulation = 0;
1675 skb->dev = dst_vxlan->dev;
1676 __skb_pull(skb, skb_network_offset(skb));
1678 if (remote_ip->sa.sa_family == AF_INET) {
1679 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1680 loopback.sa.sa_family = AF_INET;
1681 #if IS_ENABLED(CONFIG_IPV6)
1683 loopback.sin6.sin6_addr = in6addr_loopback;
1684 loopback.sa.sa_family = AF_INET6;
1688 if (dst_vxlan->flags & VXLAN_F_LEARN)
1689 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1691 u64_stats_update_begin(&tx_stats->syncp);
1692 tx_stats->tx_packets++;
1693 tx_stats->tx_bytes += len;
1694 u64_stats_update_end(&tx_stats->syncp);
1696 if (netif_rx(skb) == NET_RX_SUCCESS) {
1697 u64_stats_update_begin(&rx_stats->syncp);
1698 rx_stats->rx_packets++;
1699 rx_stats->rx_bytes += len;
1700 u64_stats_update_end(&rx_stats->syncp);
1702 dev->stats.rx_dropped++;
1706 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1707 struct vxlan_rdst *rdst, bool did_rsc)
1709 struct vxlan_dev *vxlan = netdev_priv(dev);
1710 struct rtable *rt = NULL;
1711 const struct iphdr *old_iph;
1713 union vxlan_addr *dst;
1714 __be16 src_port = 0, dst_port;
1720 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1721 vni = rdst->remote_vni;
1722 dst = &rdst->remote_ip;
1724 if (vxlan_addr_any(dst)) {
1726 /* short-circuited back to local bridge */
1727 vxlan_encap_bypass(skb, vxlan, vxlan);
1733 old_iph = ip_hdr(skb);
1736 if (!ttl && vxlan_addr_multicast(dst))
1741 tos = ip_tunnel_get_dsfield(old_iph, skb);
1743 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
1744 vxlan->port_max, true);
1746 if (dst->sa.sa_family == AF_INET) {
1747 memset(&fl4, 0, sizeof(fl4));
1748 fl4.flowi4_oif = rdst->remote_ifindex;
1749 fl4.flowi4_tos = RT_TOS(tos);
1750 fl4.daddr = dst->sin.sin_addr.s_addr;
1751 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1753 rt = ip_route_output_key(vxlan->net, &fl4);
1755 netdev_dbg(dev, "no route to %pI4\n",
1756 &dst->sin.sin_addr.s_addr);
1757 dev->stats.tx_carrier_errors++;
1761 if (rt->dst.dev == dev) {
1762 netdev_dbg(dev, "circular route to %pI4\n",
1763 &dst->sin.sin_addr.s_addr);
1764 dev->stats.collisions++;
1768 /* Bypass encapsulation if the destination is local */
1769 if (rt->rt_flags & RTCF_LOCAL &&
1770 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1771 struct vxlan_dev *dst_vxlan;
1774 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1777 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1781 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1782 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1784 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1785 fl4.saddr, dst->sin.sin_addr.s_addr,
1786 tos, ttl, df, src_port, dst_port,
1788 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1792 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1793 #if IS_ENABLED(CONFIG_IPV6)
1795 struct sock *sk = vxlan->vn_sock->sock->sk;
1796 struct dst_entry *ndst;
1800 memset(&fl6, 0, sizeof(fl6));
1801 fl6.flowi6_oif = rdst->remote_ifindex;
1802 fl6.daddr = dst->sin6.sin6_addr;
1803 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
1804 fl6.flowi6_proto = IPPROTO_UDP;
1806 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
1807 netdev_dbg(dev, "no route to %pI6\n",
1808 &dst->sin6.sin6_addr);
1809 dev->stats.tx_carrier_errors++;
1813 if (ndst->dev == dev) {
1814 netdev_dbg(dev, "circular route to %pI6\n",
1815 &dst->sin6.sin6_addr);
1817 dev->stats.collisions++;
1821 /* Bypass encapsulation if the destination is local */
1822 flags = ((struct rt6_info *)ndst)->rt6i_flags;
1823 if (flags & RTF_LOCAL &&
1824 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1825 struct vxlan_dev *dst_vxlan;
1828 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
1831 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1835 ttl = ttl ? : ip6_dst_hoplimit(ndst);
1837 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1838 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1839 src_port, dst_port, htonl(vni << 8),
1840 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1847 dev->stats.tx_dropped++;
1853 dev->stats.tx_errors++;
1858 /* Transmit local packets over Vxlan
1860 * Outer IP header inherits ECN and DF from inner header.
1861 * Outer UDP destination is the VXLAN assigned port.
1862 * source port is based on hash of flow
1864 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1866 struct vxlan_dev *vxlan = netdev_priv(dev);
1868 bool did_rsc = false;
1869 struct vxlan_rdst *rdst, *fdst = NULL;
1870 struct vxlan_fdb *f;
1872 skb_reset_mac_header(skb);
1875 if ((vxlan->flags & VXLAN_F_PROXY)) {
1876 if (ntohs(eth->h_proto) == ETH_P_ARP)
1877 return arp_reduce(dev, skb);
1878 #if IS_ENABLED(CONFIG_IPV6)
1879 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
1880 pskb_may_pull(skb, sizeof(struct ipv6hdr)
1881 + sizeof(struct nd_msg)) &&
1882 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
1885 msg = (struct nd_msg *)skb_transport_header(skb);
1886 if (msg->icmph.icmp6_code == 0 &&
1887 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
1888 return neigh_reduce(dev, skb);
1893 f = vxlan_find_mac(vxlan, eth->h_dest);
1896 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1897 (ntohs(eth->h_proto) == ETH_P_IP ||
1898 ntohs(eth->h_proto) == ETH_P_IPV6)) {
1899 did_rsc = route_shortcircuit(dev, skb);
1901 f = vxlan_find_mac(vxlan, eth->h_dest);
1905 f = vxlan_find_mac(vxlan, all_zeros_mac);
1907 if ((vxlan->flags & VXLAN_F_L2MISS) &&
1908 !is_multicast_ether_addr(eth->h_dest))
1909 vxlan_fdb_miss(vxlan, eth->h_dest);
1911 dev->stats.tx_dropped++;
1913 return NETDEV_TX_OK;
1917 list_for_each_entry_rcu(rdst, &f->remotes, list) {
1918 struct sk_buff *skb1;
1924 skb1 = skb_clone(skb, GFP_ATOMIC);
1926 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1930 vxlan_xmit_one(skb, dev, fdst, did_rsc);
1933 return NETDEV_TX_OK;
1936 /* Walk the forwarding table and purge stale entries */
1937 static void vxlan_cleanup(unsigned long arg)
1939 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1940 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1943 if (!netif_running(vxlan->dev))
1946 spin_lock_bh(&vxlan->hash_lock);
1947 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1948 struct hlist_node *p, *n;
1949 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1951 = container_of(p, struct vxlan_fdb, hlist);
1952 unsigned long timeout;
1954 if (f->state & NUD_PERMANENT)
1957 timeout = f->used + vxlan->age_interval * HZ;
1958 if (time_before_eq(timeout, jiffies)) {
1959 netdev_dbg(vxlan->dev,
1960 "garbage collect %pM\n",
1962 f->state = NUD_STALE;
1963 vxlan_fdb_destroy(vxlan, f);
1964 } else if (time_before(timeout, next_timer))
1965 next_timer = timeout;
1968 spin_unlock_bh(&vxlan->hash_lock);
1970 mod_timer(&vxlan->age_timer, next_timer);
1973 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
1975 __u32 vni = vxlan->default_dst.remote_vni;
1977 vxlan->vn_sock = vs;
1978 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1981 /* Setup stats when device is created */
1982 static int vxlan_init(struct net_device *dev)
1984 struct vxlan_dev *vxlan = netdev_priv(dev);
1985 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
1986 struct vxlan_sock *vs;
1988 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1992 spin_lock(&vn->sock_lock);
1993 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
1995 /* If we have a socket with same port already, reuse it */
1996 atomic_inc(&vs->refcnt);
1997 vxlan_vs_add_dev(vs, vxlan);
1999 /* otherwise make new socket outside of RTNL */
2001 queue_work(vxlan_wq, &vxlan->sock_work);
2003 spin_unlock(&vn->sock_lock);
2008 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2010 struct vxlan_fdb *f;
2012 spin_lock_bh(&vxlan->hash_lock);
2013 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2015 vxlan_fdb_destroy(vxlan, f);
2016 spin_unlock_bh(&vxlan->hash_lock);
2019 static void vxlan_uninit(struct net_device *dev)
2021 struct vxlan_dev *vxlan = netdev_priv(dev);
2022 struct vxlan_sock *vs = vxlan->vn_sock;
2024 vxlan_fdb_delete_default(vxlan);
2027 vxlan_sock_release(vs);
2028 free_percpu(dev->tstats);
2031 /* Start ageing timer and join group when device is brought up */
2032 static int vxlan_open(struct net_device *dev)
2034 struct vxlan_dev *vxlan = netdev_priv(dev);
2035 struct vxlan_sock *vs = vxlan->vn_sock;
2037 /* socket hasn't been created */
2041 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2042 vxlan_sock_hold(vs);
2044 queue_work(vxlan_wq, &vxlan->igmp_join);
2047 if (vxlan->age_interval)
2048 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2053 /* Purge the forwarding table */
2054 static void vxlan_flush(struct vxlan_dev *vxlan)
2058 spin_lock_bh(&vxlan->hash_lock);
2059 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2060 struct hlist_node *p, *n;
2061 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2063 = container_of(p, struct vxlan_fdb, hlist);
2064 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2065 if (!is_zero_ether_addr(f->eth_addr))
2066 vxlan_fdb_destroy(vxlan, f);
2069 spin_unlock_bh(&vxlan->hash_lock);
2072 /* Cleanup timer and forwarding table on shutdown */
2073 static int vxlan_stop(struct net_device *dev)
2075 struct vxlan_dev *vxlan = netdev_priv(dev);
2076 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2077 struct vxlan_sock *vs = vxlan->vn_sock;
2079 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2080 !vxlan_group_used(vn, vxlan)) {
2081 vxlan_sock_hold(vs);
2083 queue_work(vxlan_wq, &vxlan->igmp_leave);
2086 del_timer_sync(&vxlan->age_timer);
2093 /* Stub, nothing needs to be done. */
2094 static void vxlan_set_multicast_list(struct net_device *dev)
2098 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2100 struct vxlan_dev *vxlan = netdev_priv(dev);
2101 struct vxlan_rdst *dst = &vxlan->default_dst;
2102 struct net_device *lowerdev;
2105 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2106 if (lowerdev == NULL)
2107 return eth_change_mtu(dev, new_mtu);
2109 if (dst->remote_ip.sa.sa_family == AF_INET6)
2110 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2112 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2114 if (new_mtu < 68 || new_mtu > max_mtu)
2121 static const struct net_device_ops vxlan_netdev_ops = {
2122 .ndo_init = vxlan_init,
2123 .ndo_uninit = vxlan_uninit,
2124 .ndo_open = vxlan_open,
2125 .ndo_stop = vxlan_stop,
2126 .ndo_start_xmit = vxlan_xmit,
2127 .ndo_get_stats64 = ip_tunnel_get_stats64,
2128 .ndo_set_rx_mode = vxlan_set_multicast_list,
2129 .ndo_change_mtu = vxlan_change_mtu,
2130 .ndo_validate_addr = eth_validate_addr,
2131 .ndo_set_mac_address = eth_mac_addr,
2132 .ndo_fdb_add = vxlan_fdb_add,
2133 .ndo_fdb_del = vxlan_fdb_delete,
2134 .ndo_fdb_dump = vxlan_fdb_dump,
2137 /* Info for udev, that this is a virtual tunnel endpoint */
2138 static struct device_type vxlan_type = {
2142 /* Calls the ndo_add_vxlan_port of the caller in order to
2143 * supply the listening VXLAN udp ports. Callers are expected
2144 * to implement the ndo_add_vxlan_port.
2146 void vxlan_get_rx_port(struct net_device *dev)
2148 struct vxlan_sock *vs;
2149 struct net *net = dev_net(dev);
2150 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2151 sa_family_t sa_family;
2155 spin_lock(&vn->sock_lock);
2156 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2157 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2158 port = inet_sk(vs->sock->sk)->inet_sport;
2159 sa_family = vs->sock->sk->sk_family;
2160 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2164 spin_unlock(&vn->sock_lock);
2166 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2168 /* Initialize the device structure. */
2169 static void vxlan_setup(struct net_device *dev)
2171 struct vxlan_dev *vxlan = netdev_priv(dev);
2174 eth_hw_addr_random(dev);
2176 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2177 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2179 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2181 dev->netdev_ops = &vxlan_netdev_ops;
2182 dev->destructor = free_netdev;
2183 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2185 dev->tx_queue_len = 0;
2186 dev->features |= NETIF_F_LLTX;
2187 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2188 dev->features |= NETIF_F_RXCSUM;
2189 dev->features |= NETIF_F_GSO_SOFTWARE;
2191 dev->vlan_features = dev->features;
2192 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2193 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2194 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2195 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2196 netif_keep_dst(dev);
2197 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2199 INIT_LIST_HEAD(&vxlan->next);
2200 spin_lock_init(&vxlan->hash_lock);
2201 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
2202 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
2203 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
2205 init_timer_deferrable(&vxlan->age_timer);
2206 vxlan->age_timer.function = vxlan_cleanup;
2207 vxlan->age_timer.data = (unsigned long) vxlan;
2209 vxlan->dst_port = htons(vxlan_port);
2213 for (h = 0; h < FDB_HASH_SIZE; ++h)
2214 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2217 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2218 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2219 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2220 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2221 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2222 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2223 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2224 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2225 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2226 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2227 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2228 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2229 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2230 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2231 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2232 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2233 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2234 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2237 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2239 if (tb[IFLA_ADDRESS]) {
2240 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2241 pr_debug("invalid link address (not ethernet)\n");
2245 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2246 pr_debug("invalid all zero ethernet address\n");
2247 return -EADDRNOTAVAIL;
2254 if (data[IFLA_VXLAN_ID]) {
2255 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2256 if (id >= VXLAN_VID_MASK)
2260 if (data[IFLA_VXLAN_PORT_RANGE]) {
2261 const struct ifla_vxlan_port_range *p
2262 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2264 if (ntohs(p->high) < ntohs(p->low)) {
2265 pr_debug("port range %u .. %u not valid\n",
2266 ntohs(p->low), ntohs(p->high));
2274 static void vxlan_get_drvinfo(struct net_device *netdev,
2275 struct ethtool_drvinfo *drvinfo)
2277 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2278 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2281 static const struct ethtool_ops vxlan_ethtool_ops = {
2282 .get_drvinfo = vxlan_get_drvinfo,
2283 .get_link = ethtool_op_get_link,
2286 static void vxlan_del_work(struct work_struct *work)
2288 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2289 udp_tunnel_sock_release(vs->sock);
2293 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2294 __be16 port, u32 flags)
2296 struct socket *sock;
2297 struct udp_port_cfg udp_conf;
2300 memset(&udp_conf, 0, sizeof(udp_conf));
2303 udp_conf.family = AF_INET6;
2304 udp_conf.use_udp6_tx_checksums =
2305 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2306 udp_conf.use_udp6_rx_checksums =
2307 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2309 udp_conf.family = AF_INET;
2310 udp_conf.local_ip.s_addr = INADDR_ANY;
2311 udp_conf.use_udp_checksums =
2312 !!(flags & VXLAN_F_UDP_CSUM);
2315 udp_conf.local_udp_port = port;
2317 /* Open UDP socket */
2318 err = udp_sock_create(net, &udp_conf, &sock);
2320 return ERR_PTR(err);
2325 /* Create new listen socket if needed */
2326 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2327 vxlan_rcv_t *rcv, void *data,
2330 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2331 struct vxlan_sock *vs;
2332 struct socket *sock;
2334 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2335 struct udp_tunnel_sock_cfg tunnel_cfg;
2337 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2339 return ERR_PTR(-ENOMEM);
2341 for (h = 0; h < VNI_HASH_SIZE; ++h)
2342 INIT_HLIST_HEAD(&vs->vni_list[h]);
2344 INIT_WORK(&vs->del_work, vxlan_del_work);
2346 sock = vxlan_create_sock(net, ipv6, port, flags);
2349 return ERR_CAST(sock);
2353 atomic_set(&vs->refcnt, 1);
2357 /* Initialize the vxlan udp offloads structure */
2358 vs->udp_offloads.port = port;
2359 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2360 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2362 spin_lock(&vn->sock_lock);
2363 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2364 vxlan_notify_add_rx_port(vs);
2365 spin_unlock(&vn->sock_lock);
2367 /* Mark socket as an encapsulation socket. */
2368 tunnel_cfg.sk_user_data = vs;
2369 tunnel_cfg.encap_type = 1;
2370 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2371 tunnel_cfg.encap_destroy = NULL;
2373 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2378 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2379 vxlan_rcv_t *rcv, void *data,
2380 bool no_share, u32 flags)
2382 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2383 struct vxlan_sock *vs;
2385 vs = vxlan_socket_create(net, port, rcv, data, flags);
2389 if (no_share) /* Return error if sharing is not allowed. */
2392 spin_lock(&vn->sock_lock);
2393 vs = vxlan_find_sock(net, port);
2396 atomic_inc(&vs->refcnt);
2398 vs = ERR_PTR(-EBUSY);
2400 spin_unlock(&vn->sock_lock);
2403 vs = ERR_PTR(-EINVAL);
2407 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2409 /* Scheduled at device creation to bind to a socket */
2410 static void vxlan_sock_work(struct work_struct *work)
2412 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2413 struct net *net = vxlan->net;
2414 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2415 __be16 port = vxlan->dst_port;
2416 struct vxlan_sock *nvs;
2418 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
2419 spin_lock(&vn->sock_lock);
2421 vxlan_vs_add_dev(nvs, vxlan);
2422 spin_unlock(&vn->sock_lock);
2424 dev_put(vxlan->dev);
2427 static int vxlan_newlink(struct net *net, struct net_device *dev,
2428 struct nlattr *tb[], struct nlattr *data[])
2430 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2431 struct vxlan_dev *vxlan = netdev_priv(dev);
2432 struct vxlan_rdst *dst = &vxlan->default_dst;
2435 bool use_ipv6 = false;
2437 if (!data[IFLA_VXLAN_ID])
2440 vxlan->net = dev_net(dev);
2442 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2443 dst->remote_vni = vni;
2445 /* Unless IPv6 is explicitly requested, assume IPv4 */
2446 dst->remote_ip.sa.sa_family = AF_INET;
2447 if (data[IFLA_VXLAN_GROUP]) {
2448 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2449 } else if (data[IFLA_VXLAN_GROUP6]) {
2450 if (!IS_ENABLED(CONFIG_IPV6))
2451 return -EPFNOSUPPORT;
2453 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2454 sizeof(struct in6_addr));
2455 dst->remote_ip.sa.sa_family = AF_INET6;
2459 if (data[IFLA_VXLAN_LOCAL]) {
2460 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2461 vxlan->saddr.sa.sa_family = AF_INET;
2462 } else if (data[IFLA_VXLAN_LOCAL6]) {
2463 if (!IS_ENABLED(CONFIG_IPV6))
2464 return -EPFNOSUPPORT;
2466 /* TODO: respect scope id */
2467 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2468 sizeof(struct in6_addr));
2469 vxlan->saddr.sa.sa_family = AF_INET6;
2473 if (data[IFLA_VXLAN_LINK] &&
2474 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2475 struct net_device *lowerdev
2476 = __dev_get_by_index(net, dst->remote_ifindex);
2479 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2483 #if IS_ENABLED(CONFIG_IPV6)
2485 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2486 if (idev && idev->cnf.disable_ipv6) {
2487 pr_info("IPv6 is disabled via sysctl\n");
2490 vxlan->flags |= VXLAN_F_IPV6;
2495 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2497 dev->needed_headroom = lowerdev->hard_header_len +
2498 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2499 } else if (use_ipv6)
2500 vxlan->flags |= VXLAN_F_IPV6;
2502 if (data[IFLA_VXLAN_TOS])
2503 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2505 if (data[IFLA_VXLAN_TTL])
2506 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2508 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2509 vxlan->flags |= VXLAN_F_LEARN;
2511 if (data[IFLA_VXLAN_AGEING])
2512 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2514 vxlan->age_interval = FDB_AGE_DEFAULT;
2516 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2517 vxlan->flags |= VXLAN_F_PROXY;
2519 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2520 vxlan->flags |= VXLAN_F_RSC;
2522 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2523 vxlan->flags |= VXLAN_F_L2MISS;
2525 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2526 vxlan->flags |= VXLAN_F_L3MISS;
2528 if (data[IFLA_VXLAN_LIMIT])
2529 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2531 if (data[IFLA_VXLAN_PORT_RANGE]) {
2532 const struct ifla_vxlan_port_range *p
2533 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2534 vxlan->port_min = ntohs(p->low);
2535 vxlan->port_max = ntohs(p->high);
2538 if (data[IFLA_VXLAN_PORT])
2539 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2541 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2542 vxlan->flags |= VXLAN_F_UDP_CSUM;
2544 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2545 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2546 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2548 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2549 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2550 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2552 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
2553 pr_info("duplicate VNI %u\n", vni);
2557 dev->ethtool_ops = &vxlan_ethtool_ops;
2559 /* create an fdb entry for a valid default destination */
2560 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2561 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2562 &vxlan->default_dst.remote_ip,
2563 NUD_REACHABLE|NUD_PERMANENT,
2564 NLM_F_EXCL|NLM_F_CREATE,
2566 vxlan->default_dst.remote_vni,
2567 vxlan->default_dst.remote_ifindex,
2573 err = register_netdevice(dev);
2575 vxlan_fdb_delete_default(vxlan);
2579 list_add(&vxlan->next, &vn->vxlan_list);
2584 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2586 struct vxlan_dev *vxlan = netdev_priv(dev);
2587 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2589 spin_lock(&vn->sock_lock);
2590 if (!hlist_unhashed(&vxlan->hlist))
2591 hlist_del_rcu(&vxlan->hlist);
2592 spin_unlock(&vn->sock_lock);
2594 list_del(&vxlan->next);
2595 unregister_netdevice_queue(dev, head);
2598 static size_t vxlan_get_size(const struct net_device *dev)
2601 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2602 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2603 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2604 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2605 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2606 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2607 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2608 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2609 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2610 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2611 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2612 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2613 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2614 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2615 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2616 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2617 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2618 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2622 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2624 const struct vxlan_dev *vxlan = netdev_priv(dev);
2625 const struct vxlan_rdst *dst = &vxlan->default_dst;
2626 struct ifla_vxlan_port_range ports = {
2627 .low = htons(vxlan->port_min),
2628 .high = htons(vxlan->port_max),
2631 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2632 goto nla_put_failure;
2634 if (!vxlan_addr_any(&dst->remote_ip)) {
2635 if (dst->remote_ip.sa.sa_family == AF_INET) {
2636 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2637 dst->remote_ip.sin.sin_addr.s_addr))
2638 goto nla_put_failure;
2639 #if IS_ENABLED(CONFIG_IPV6)
2641 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2642 &dst->remote_ip.sin6.sin6_addr))
2643 goto nla_put_failure;
2648 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2649 goto nla_put_failure;
2651 if (!vxlan_addr_any(&vxlan->saddr)) {
2652 if (vxlan->saddr.sa.sa_family == AF_INET) {
2653 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2654 vxlan->saddr.sin.sin_addr.s_addr))
2655 goto nla_put_failure;
2656 #if IS_ENABLED(CONFIG_IPV6)
2658 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2659 &vxlan->saddr.sin6.sin6_addr))
2660 goto nla_put_failure;
2665 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2666 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2667 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2668 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2669 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2670 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2671 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2672 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2673 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2674 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2675 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2676 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2677 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2678 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2679 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2680 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2681 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2682 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2683 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2684 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
2685 goto nla_put_failure;
2687 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2688 goto nla_put_failure;
2696 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2698 .maxtype = IFLA_VXLAN_MAX,
2699 .policy = vxlan_policy,
2700 .priv_size = sizeof(struct vxlan_dev),
2701 .setup = vxlan_setup,
2702 .validate = vxlan_validate,
2703 .newlink = vxlan_newlink,
2704 .dellink = vxlan_dellink,
2705 .get_size = vxlan_get_size,
2706 .fill_info = vxlan_fill_info,
2709 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2710 struct net_device *dev)
2712 struct vxlan_dev *vxlan, *next;
2713 LIST_HEAD(list_kill);
2715 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2716 struct vxlan_rdst *dst = &vxlan->default_dst;
2718 /* In case we created vxlan device with carrier
2719 * and we loose the carrier due to module unload
2720 * we also need to remove vxlan device. In other
2721 * cases, it's not necessary and remote_ifindex
2722 * is 0 here, so no matches.
2724 if (dst->remote_ifindex == dev->ifindex)
2725 vxlan_dellink(vxlan->dev, &list_kill);
2728 unregister_netdevice_many(&list_kill);
2731 static int vxlan_lowerdev_event(struct notifier_block *unused,
2732 unsigned long event, void *ptr)
2734 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2735 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2737 if (event == NETDEV_UNREGISTER)
2738 vxlan_handle_lowerdev_unregister(vn, dev);
2743 static struct notifier_block vxlan_notifier_block __read_mostly = {
2744 .notifier_call = vxlan_lowerdev_event,
2747 static __net_init int vxlan_init_net(struct net *net)
2749 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2752 INIT_LIST_HEAD(&vn->vxlan_list);
2753 spin_lock_init(&vn->sock_lock);
2755 for (h = 0; h < PORT_HASH_SIZE; ++h)
2756 INIT_HLIST_HEAD(&vn->sock_list[h]);
2761 static void __net_exit vxlan_exit_net(struct net *net)
2763 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2764 struct vxlan_dev *vxlan, *next;
2765 struct net_device *dev, *aux;
2769 for_each_netdev_safe(net, dev, aux)
2770 if (dev->rtnl_link_ops == &vxlan_link_ops)
2771 unregister_netdevice_queue(dev, &list);
2773 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2774 /* If vxlan->dev is in the same netns, it has already been added
2775 * to the list by the previous loop.
2777 if (!net_eq(dev_net(vxlan->dev), net))
2778 unregister_netdevice_queue(dev, &list);
2781 unregister_netdevice_many(&list);
2785 static struct pernet_operations vxlan_net_ops = {
2786 .init = vxlan_init_net,
2787 .exit = vxlan_exit_net,
2788 .id = &vxlan_net_id,
2789 .size = sizeof(struct vxlan_net),
2792 static int __init vxlan_init_module(void)
2796 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
2800 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
2802 rc = register_pernet_subsys(&vxlan_net_ops);
2806 rc = register_netdevice_notifier(&vxlan_notifier_block);
2810 rc = rtnl_link_register(&vxlan_link_ops);
2816 unregister_netdevice_notifier(&vxlan_notifier_block);
2818 unregister_pernet_subsys(&vxlan_net_ops);
2820 destroy_workqueue(vxlan_wq);
2823 late_initcall(vxlan_init_module);
2825 static void __exit vxlan_cleanup_module(void)
2827 rtnl_link_unregister(&vxlan_link_ops);
2828 unregister_netdevice_notifier(&vxlan_notifier_block);
2829 destroy_workqueue(vxlan_wq);
2830 unregister_pernet_subsys(&vxlan_net_ops);
2831 /* rcu_barrier() is called by netns */
2833 module_exit(vxlan_cleanup_module);
2835 MODULE_LICENSE("GPL");
2836 MODULE_VERSION(VXLAN_VERSION);
2837 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
2838 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
2839 MODULE_ALIAS_RTNL_LINK("vxlan");