2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/rtnetlink.h>
37 #include <net/route.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/net_namespace.h>
41 #include <net/netns/generic.h>
42 #include <net/vxlan.h>
43 #include <net/protocol.h>
44 #if IS_ENABLED(CONFIG_IPV6)
46 #include <net/addrconf.h>
47 #include <net/ip6_tunnel.h>
48 #include <net/ip6_checksum.h>
51 #define VXLAN_VERSION "0.1"
53 #define PORT_HASH_BITS 8
54 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
55 #define VNI_HASH_BITS 10
56 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
57 #define FDB_HASH_BITS 8
58 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
59 #define FDB_AGE_DEFAULT 300 /* 5 min */
60 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
62 #define VXLAN_N_VID (1u << 24)
63 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
64 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
66 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
68 /* VXLAN protocol header */
74 /* UDP port for VXLAN traffic.
75 * The IANA assigned port is 4789, but the Linux default is 8472
76 * for compatibility with early adopters.
78 static unsigned short vxlan_port __read_mostly = 8472;
79 module_param_named(udp_port, vxlan_port, ushort, 0444);
80 MODULE_PARM_DESC(udp_port, "Destination UDP port");
82 static bool log_ecn_error = true;
83 module_param(log_ecn_error, bool, 0644);
84 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86 static int vxlan_net_id;
88 static const u8 all_zeros_mac[ETH_ALEN];
90 /* per-network namespace private data for this module */
92 struct list_head vxlan_list;
93 struct hlist_head sock_list[PORT_HASH_SIZE];
98 struct sockaddr_in sin;
99 struct sockaddr_in6 sin6;
104 union vxlan_addr remote_ip;
108 struct list_head list;
112 /* Forwarding table entry */
114 struct hlist_node hlist; /* linked list of entries */
116 unsigned long updated; /* jiffies */
118 struct list_head remotes;
119 u16 state; /* see ndm_state */
120 u8 flags; /* see ndm_flags */
121 u8 eth_addr[ETH_ALEN];
124 /* Pseudo network device */
126 struct hlist_node hlist; /* vni hash table */
127 struct list_head next; /* vxlan's per namespace list */
128 struct vxlan_sock *vn_sock; /* listening socket */
129 struct net_device *dev;
130 struct vxlan_rdst default_dst; /* default destination */
131 union vxlan_addr saddr; /* source address */
133 __u16 port_min; /* source port range */
135 __u8 tos; /* TOS override */
137 u32 flags; /* VXLAN_F_* below */
139 struct work_struct sock_work;
140 struct work_struct igmp_join;
141 struct work_struct igmp_leave;
143 unsigned long age_interval;
144 struct timer_list age_timer;
145 spinlock_t hash_lock;
146 unsigned int addrcnt;
147 unsigned int addrmax;
149 struct hlist_head fdb_head[FDB_HASH_SIZE];
152 #define VXLAN_F_LEARN 0x01
153 #define VXLAN_F_PROXY 0x02
154 #define VXLAN_F_RSC 0x04
155 #define VXLAN_F_L2MISS 0x08
156 #define VXLAN_F_L3MISS 0x10
157 #define VXLAN_F_IPV6 0x20 /* internal flag */
159 /* salt for hash table */
160 static u32 vxlan_salt __read_mostly;
161 static struct workqueue_struct *vxlan_wq;
163 static void vxlan_sock_work(struct work_struct *work);
165 #if IS_ENABLED(CONFIG_IPV6)
167 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
169 if (a->sa.sa_family != b->sa.sa_family)
171 if (a->sa.sa_family == AF_INET6)
172 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
174 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
177 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
179 if (ipa->sa.sa_family == AF_INET6)
180 return ipv6_addr_any(&ipa->sin6.sin6_addr);
182 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
185 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
187 if (ipa->sa.sa_family == AF_INET6)
188 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
190 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
193 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
195 if (nla_len(nla) >= sizeof(struct in6_addr)) {
196 nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
197 ip->sa.sa_family = AF_INET6;
199 } else if (nla_len(nla) >= sizeof(__be32)) {
200 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
201 ip->sa.sa_family = AF_INET;
204 return -EAFNOSUPPORT;
208 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
209 const union vxlan_addr *ip)
211 if (ip->sa.sa_family == AF_INET6)
212 return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
214 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
217 #else /* !CONFIG_IPV6 */
220 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
222 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
225 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
227 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
230 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
232 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
235 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
237 if (nla_len(nla) >= sizeof(struct in6_addr)) {
238 return -EAFNOSUPPORT;
239 } else if (nla_len(nla) >= sizeof(__be32)) {
240 ip->sin.sin_addr.s_addr = nla_get_be32(nla);
241 ip->sa.sa_family = AF_INET;
244 return -EAFNOSUPPORT;
248 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
249 const union vxlan_addr *ip)
251 return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
255 /* Virtual Network hash table head */
256 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
258 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
261 /* Socket hash table head */
262 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
264 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
266 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
269 /* First remote destination for a forwarding entry.
270 * Guaranteed to be non-NULL because remotes are never deleted.
272 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
274 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
277 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
279 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
282 /* Find VXLAN socket based on network namespace and UDP port */
283 static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
285 struct vxlan_sock *vs;
287 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
288 if (inet_sk(vs->sock->sk)->inet_sport == port)
294 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
296 struct vxlan_dev *vxlan;
298 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
299 if (vxlan->default_dst.remote_vni == id)
306 /* Look up VNI in a per net namespace table */
307 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
309 struct vxlan_sock *vs;
311 vs = vxlan_find_sock(net, port);
315 return vxlan_vs_find_vni(vs, id);
318 /* Fill in neighbour message in skbuff. */
319 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
320 const struct vxlan_fdb *fdb,
321 u32 portid, u32 seq, int type, unsigned int flags,
322 const struct vxlan_rdst *rdst)
324 unsigned long now = jiffies;
325 struct nda_cacheinfo ci;
326 struct nlmsghdr *nlh;
328 bool send_ip, send_eth;
330 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
334 ndm = nlmsg_data(nlh);
335 memset(ndm, 0, sizeof(*ndm));
337 send_eth = send_ip = true;
339 if (type == RTM_GETNEIGH) {
340 ndm->ndm_family = AF_INET;
341 send_ip = !vxlan_addr_any(&rdst->remote_ip);
342 send_eth = !is_zero_ether_addr(fdb->eth_addr);
344 ndm->ndm_family = AF_BRIDGE;
345 ndm->ndm_state = fdb->state;
346 ndm->ndm_ifindex = vxlan->dev->ifindex;
347 ndm->ndm_flags = fdb->flags;
348 ndm->ndm_type = NDA_DST;
350 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
351 goto nla_put_failure;
353 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
354 goto nla_put_failure;
356 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
357 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
358 goto nla_put_failure;
359 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
360 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
361 goto nla_put_failure;
362 if (rdst->remote_ifindex &&
363 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
364 goto nla_put_failure;
366 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
367 ci.ndm_confirmed = 0;
368 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
371 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
372 goto nla_put_failure;
374 return nlmsg_end(skb, nlh);
377 nlmsg_cancel(skb, nlh);
381 static inline size_t vxlan_nlmsg_size(void)
383 return NLMSG_ALIGN(sizeof(struct ndmsg))
384 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
385 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
386 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
387 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
388 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
389 + nla_total_size(sizeof(struct nda_cacheinfo));
392 static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
393 struct vxlan_fdb *fdb, int type)
395 struct net *net = dev_net(vxlan->dev);
399 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
404 first_remote_rtnl(fdb));
406 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
407 WARN_ON(err == -EMSGSIZE);
412 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
416 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
419 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
421 struct vxlan_dev *vxlan = netdev_priv(dev);
422 struct vxlan_fdb f = {
425 struct vxlan_rdst remote = {
426 .remote_ip = *ipa, /* goes to NDA_DST */
427 .remote_vni = VXLAN_N_VID,
430 INIT_LIST_HEAD(&f.remotes);
431 list_add_rcu(&remote.list, &f.remotes);
433 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
436 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
438 struct vxlan_fdb f = {
442 INIT_LIST_HEAD(&f.remotes);
443 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
445 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
448 /* Hash Ethernet address */
449 static u32 eth_hash(const unsigned char *addr)
451 u64 value = get_unaligned((u64 *)addr);
453 /* only want 6 bytes */
459 return hash_64(value, FDB_HASH_BITS);
462 /* Hash chain to use given mac address */
463 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
466 return &vxlan->fdb_head[eth_hash(mac)];
469 /* Look up Ethernet address in forwarding table */
470 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
473 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
476 hlist_for_each_entry_rcu(f, head, hlist) {
477 if (ether_addr_equal(mac, f->eth_addr))
484 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
489 f = __vxlan_find_mac(vxlan, mac);
496 /* caller should hold vxlan->hash_lock */
497 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
498 union vxlan_addr *ip, __be16 port,
499 __u32 vni, __u32 ifindex)
501 struct vxlan_rdst *rd;
503 list_for_each_entry(rd, &f->remotes, list) {
504 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
505 rd->remote_port == port &&
506 rd->remote_vni == vni &&
507 rd->remote_ifindex == ifindex)
514 /* Replace destination of unicast mac */
515 static int vxlan_fdb_replace(struct vxlan_fdb *f,
516 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
518 struct vxlan_rdst *rd;
520 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
524 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
528 rd->remote_port = port;
529 rd->remote_vni = vni;
530 rd->remote_ifindex = ifindex;
534 /* Add/update destinations for multicast */
535 static int vxlan_fdb_append(struct vxlan_fdb *f,
536 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
538 struct vxlan_rdst *rd;
540 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
544 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
548 rd->remote_port = port;
549 rd->remote_vni = vni;
550 rd->remote_ifindex = ifindex;
552 list_add_tail_rcu(&rd->list, &f->remotes);
557 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
559 struct sk_buff *p, **pp = NULL;
560 struct vxlanhdr *vh, *vh2;
561 struct ethhdr *eh, *eh2;
562 unsigned int hlen, off_vx, off_eth;
563 const struct packet_offload *ptype;
567 off_vx = skb_gro_offset(skb);
568 hlen = off_vx + sizeof(*vh);
569 vh = skb_gro_header_fast(skb, off_vx);
570 if (skb_gro_header_hard(skb, hlen)) {
571 vh = skb_gro_header_slow(skb, hlen, off_vx);
575 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
577 off_eth = skb_gro_offset(skb);
578 hlen = off_eth + sizeof(*eh);
579 eh = skb_gro_header_fast(skb, off_eth);
580 if (skb_gro_header_hard(skb, hlen)) {
581 eh = skb_gro_header_slow(skb, hlen, off_eth);
588 for (p = *head; p; p = p->next) {
589 if (!NAPI_GRO_CB(p)->same_flow)
592 vh2 = (struct vxlanhdr *)(p->data + off_vx);
593 eh2 = (struct ethhdr *)(p->data + off_eth);
594 if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
595 NAPI_GRO_CB(p)->same_flow = 0;
603 ptype = gro_find_receive_by_type(type);
609 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
610 pp = ptype->callbacks.gro_receive(head, skb);
615 NAPI_GRO_CB(skb)->flush |= flush;
620 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
623 struct packet_offload *ptype;
625 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
628 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
632 ptype = gro_find_complete_by_type(type);
634 err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
640 /* Notify netdevs that UDP port started listening */
641 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
643 struct net_device *dev;
644 struct sock *sk = vs->sock->sk;
645 struct net *net = sock_net(sk);
646 sa_family_t sa_family = sk->sk_family;
647 __be16 port = inet_sk(sk)->inet_sport;
650 if (sa_family == AF_INET) {
651 err = udp_add_offload(&vs->udp_offloads);
653 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
657 for_each_netdev_rcu(net, dev) {
658 if (dev->netdev_ops->ndo_add_vxlan_port)
659 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
665 /* Notify netdevs that UDP port is no more listening */
666 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
668 struct net_device *dev;
669 struct sock *sk = vs->sock->sk;
670 struct net *net = sock_net(sk);
671 sa_family_t sa_family = sk->sk_family;
672 __be16 port = inet_sk(sk)->inet_sport;
675 for_each_netdev_rcu(net, dev) {
676 if (dev->netdev_ops->ndo_del_vxlan_port)
677 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
682 if (sa_family == AF_INET)
683 udp_del_offload(&vs->udp_offloads);
686 /* Add new entry to forwarding table -- assumes lock held */
687 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
688 const u8 *mac, union vxlan_addr *ip,
689 __u16 state, __u16 flags,
690 __be16 port, __u32 vni, __u32 ifindex,
696 f = __vxlan_find_mac(vxlan, mac);
698 if (flags & NLM_F_EXCL) {
699 netdev_dbg(vxlan->dev,
700 "lost race to create %pM\n", mac);
703 if (f->state != state) {
705 f->updated = jiffies;
708 if (f->flags != ndm_flags) {
709 f->flags = ndm_flags;
710 f->updated = jiffies;
713 if ((flags & NLM_F_REPLACE)) {
714 /* Only change unicasts */
715 if (!(is_multicast_ether_addr(f->eth_addr) ||
716 is_zero_ether_addr(f->eth_addr))) {
717 int rc = vxlan_fdb_replace(f, ip, port, vni,
726 if ((flags & NLM_F_APPEND) &&
727 (is_multicast_ether_addr(f->eth_addr) ||
728 is_zero_ether_addr(f->eth_addr))) {
729 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
736 if (!(flags & NLM_F_CREATE))
739 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
742 /* Disallow replace to add a multicast entry */
743 if ((flags & NLM_F_REPLACE) &&
744 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
747 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
748 f = kmalloc(sizeof(*f), GFP_ATOMIC);
754 f->flags = ndm_flags;
755 f->updated = f->used = jiffies;
756 INIT_LIST_HEAD(&f->remotes);
757 memcpy(f->eth_addr, mac, ETH_ALEN);
759 vxlan_fdb_append(f, ip, port, vni, ifindex);
762 hlist_add_head_rcu(&f->hlist,
763 vxlan_fdb_head(vxlan, mac));
767 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
772 static void vxlan_fdb_free(struct rcu_head *head)
774 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
775 struct vxlan_rdst *rd, *nd;
777 list_for_each_entry_safe(rd, nd, &f->remotes, list)
782 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
784 netdev_dbg(vxlan->dev,
785 "delete %pM\n", f->eth_addr);
788 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
790 hlist_del_rcu(&f->hlist);
791 call_rcu(&f->rcu, vxlan_fdb_free);
794 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
795 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
797 struct net *net = dev_net(vxlan->dev);
801 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
805 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
806 if (remote->sa.sa_family == AF_INET) {
807 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
808 ip->sa.sa_family = AF_INET;
809 #if IS_ENABLED(CONFIG_IPV6)
811 ip->sin6.sin6_addr = in6addr_any;
812 ip->sa.sa_family = AF_INET6;
818 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
820 *port = nla_get_be16(tb[NDA_PORT]);
822 *port = vxlan->dst_port;
826 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
828 *vni = nla_get_u32(tb[NDA_VNI]);
830 *vni = vxlan->default_dst.remote_vni;
833 if (tb[NDA_IFINDEX]) {
834 struct net_device *tdev;
836 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
838 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
839 tdev = __dev_get_by_index(net, *ifindex);
841 return -EADDRNOTAVAIL;
849 /* Add static entry (via netlink) */
850 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
851 struct net_device *dev,
852 const unsigned char *addr, u16 flags)
854 struct vxlan_dev *vxlan = netdev_priv(dev);
855 /* struct net *net = dev_net(vxlan->dev); */
861 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
862 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
867 if (tb[NDA_DST] == NULL)
870 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
874 spin_lock_bh(&vxlan->hash_lock);
875 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
876 port, vni, ifindex, ndm->ndm_flags);
877 spin_unlock_bh(&vxlan->hash_lock);
882 /* Delete entry (via netlink) */
883 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
884 struct net_device *dev,
885 const unsigned char *addr)
887 struct vxlan_dev *vxlan = netdev_priv(dev);
889 struct vxlan_rdst *rd = NULL;
895 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
901 spin_lock_bh(&vxlan->hash_lock);
902 f = vxlan_find_mac(vxlan, addr);
906 if (!vxlan_addr_any(&ip)) {
907 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
914 /* remove a destination if it's not the only one on the list,
915 * otherwise destroy the fdb entry
917 if (rd && !list_is_singular(&f->remotes)) {
918 list_del_rcu(&rd->list);
923 vxlan_fdb_destroy(vxlan, f);
926 spin_unlock_bh(&vxlan->hash_lock);
931 /* Dump forwarding table */
932 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
933 struct net_device *dev, int idx)
935 struct vxlan_dev *vxlan = netdev_priv(dev);
938 for (h = 0; h < FDB_HASH_SIZE; ++h) {
942 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
943 struct vxlan_rdst *rd;
945 if (idx < cb->args[0])
948 list_for_each_entry_rcu(rd, &f->remotes, list) {
949 err = vxlan_fdb_info(skb, vxlan, f,
950 NETLINK_CB(cb->skb).portid,
965 /* Watch incoming packets to learn mapping between Ethernet address
966 * and Tunnel endpoint.
967 * Return true if packet is bogus and should be droppped.
969 static bool vxlan_snoop(struct net_device *dev,
970 union vxlan_addr *src_ip, const u8 *src_mac)
972 struct vxlan_dev *vxlan = netdev_priv(dev);
975 f = vxlan_find_mac(vxlan, src_mac);
977 struct vxlan_rdst *rdst = first_remote_rcu(f);
979 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
982 /* Don't migrate static entries, drop packets */
983 if (f->state & NUD_NOARP)
988 "%pM migrated from %pIS to %pIS\n",
989 src_mac, &rdst->remote_ip, &src_ip);
991 rdst->remote_ip = *src_ip;
992 f->updated = jiffies;
993 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
995 /* learned new entry */
996 spin_lock(&vxlan->hash_lock);
998 /* close off race between vxlan_flush and incoming packets */
999 if (netif_running(dev))
1000 vxlan_fdb_create(vxlan, src_mac, src_ip,
1002 NLM_F_EXCL|NLM_F_CREATE,
1004 vxlan->default_dst.remote_vni,
1006 spin_unlock(&vxlan->hash_lock);
1012 /* See if multicast group is already in use by other ID */
1013 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1015 struct vxlan_dev *vxlan;
1017 /* The vxlan_sock is only used by dev, leaving group has
1018 * no effect on other vxlan devices.
1020 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1023 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1024 if (!netif_running(vxlan->dev) || vxlan == dev)
1027 if (vxlan->vn_sock != dev->vn_sock)
1030 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1031 &dev->default_dst.remote_ip))
1034 if (vxlan->default_dst.remote_ifindex !=
1035 dev->default_dst.remote_ifindex)
1044 static void vxlan_sock_hold(struct vxlan_sock *vs)
1046 atomic_inc(&vs->refcnt);
1049 void vxlan_sock_release(struct vxlan_sock *vs)
1051 struct sock *sk = vs->sock->sk;
1052 struct net *net = sock_net(sk);
1053 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1055 if (!atomic_dec_and_test(&vs->refcnt))
1058 spin_lock(&vn->sock_lock);
1059 hlist_del_rcu(&vs->hlist);
1060 rcu_assign_sk_user_data(vs->sock->sk, NULL);
1061 vxlan_notify_del_rx_port(vs);
1062 spin_unlock(&vn->sock_lock);
1064 queue_work(vxlan_wq, &vs->del_work);
1066 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1068 /* Callback to update multicast group membership when first VNI on
1069 * multicast asddress is brought up
1070 * Done as workqueue because ip_mc_join_group acquires RTNL.
1072 static void vxlan_igmp_join(struct work_struct *work)
1074 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
1075 struct vxlan_sock *vs = vxlan->vn_sock;
1076 struct sock *sk = vs->sock->sk;
1077 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1078 int ifindex = vxlan->default_dst.remote_ifindex;
1081 if (ip->sa.sa_family == AF_INET) {
1082 struct ip_mreqn mreq = {
1083 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1084 .imr_ifindex = ifindex,
1087 ip_mc_join_group(sk, &mreq);
1088 #if IS_ENABLED(CONFIG_IPV6)
1090 ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1091 &ip->sin6.sin6_addr);
1096 vxlan_sock_release(vs);
1097 dev_put(vxlan->dev);
1100 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1101 static void vxlan_igmp_leave(struct work_struct *work)
1103 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
1104 struct vxlan_sock *vs = vxlan->vn_sock;
1105 struct sock *sk = vs->sock->sk;
1106 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1107 int ifindex = vxlan->default_dst.remote_ifindex;
1110 if (ip->sa.sa_family == AF_INET) {
1111 struct ip_mreqn mreq = {
1112 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1113 .imr_ifindex = ifindex,
1116 ip_mc_leave_group(sk, &mreq);
1117 #if IS_ENABLED(CONFIG_IPV6)
1119 ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1120 &ip->sin6.sin6_addr);
1126 vxlan_sock_release(vs);
1127 dev_put(vxlan->dev);
1130 /* Callback from net/ipv4/udp.c to receive packets */
1131 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1133 struct vxlan_sock *vs;
1134 struct vxlanhdr *vxh;
1137 /* Need Vxlan and inner Ethernet header to be present */
1138 if (!pskb_may_pull(skb, VXLAN_HLEN))
1141 /* Return packets with reserved bits set */
1142 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1143 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
1144 (vxh->vx_vni & htonl(0xff))) {
1145 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1146 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1150 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1153 port = inet_sk(sk)->inet_sport;
1155 vs = rcu_dereference_sk_user_data(sk);
1159 /* If the NIC driver gave us an encapsulated packet
1160 * with the encapsulation mark, the device checksummed it
1161 * for us. Otherwise force the upper layers to verify it.
1163 if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
1164 !skb->encapsulation)
1165 skb->ip_summed = CHECKSUM_NONE;
1167 skb->encapsulation = 0;
1169 vs->rcv(vs, skb, vxh->vx_vni);
1173 /* Consume bad packet */
1178 /* Return non vxlan pkt */
1182 static void vxlan_rcv(struct vxlan_sock *vs,
1183 struct sk_buff *skb, __be32 vx_vni)
1185 struct iphdr *oip = NULL;
1186 struct ipv6hdr *oip6 = NULL;
1187 struct vxlan_dev *vxlan;
1188 struct pcpu_sw_netstats *stats;
1189 union vxlan_addr saddr;
1192 union vxlan_addr *remote_ip;
1194 vni = ntohl(vx_vni) >> 8;
1195 /* Is this VNI defined? */
1196 vxlan = vxlan_vs_find_vni(vs, vni);
1200 remote_ip = &vxlan->default_dst.remote_ip;
1201 skb_reset_mac_header(skb);
1202 skb->protocol = eth_type_trans(skb, vxlan->dev);
1204 /* Ignore packet loops (and multicast echo) */
1205 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1208 /* Re-examine inner Ethernet packet */
1209 if (remote_ip->sa.sa_family == AF_INET) {
1211 saddr.sin.sin_addr.s_addr = oip->saddr;
1212 saddr.sa.sa_family = AF_INET;
1213 #if IS_ENABLED(CONFIG_IPV6)
1215 oip6 = ipv6_hdr(skb);
1216 saddr.sin6.sin6_addr = oip6->saddr;
1217 saddr.sa.sa_family = AF_INET6;
1221 if ((vxlan->flags & VXLAN_F_LEARN) &&
1222 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1225 skb_reset_network_header(skb);
1228 err = IP6_ECN_decapsulate(oip6, skb);
1230 err = IP_ECN_decapsulate(oip, skb);
1232 if (unlikely(err)) {
1233 if (log_ecn_error) {
1235 net_info_ratelimited("non-ECT from %pI6\n",
1238 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1239 &oip->saddr, oip->tos);
1242 ++vxlan->dev->stats.rx_frame_errors;
1243 ++vxlan->dev->stats.rx_errors;
1248 stats = this_cpu_ptr(vxlan->dev->tstats);
1249 u64_stats_update_begin(&stats->syncp);
1250 stats->rx_packets++;
1251 stats->rx_bytes += skb->len;
1252 u64_stats_update_end(&stats->syncp);
1258 /* Consume bad packet */
1262 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1264 struct vxlan_dev *vxlan = netdev_priv(dev);
1265 struct arphdr *parp;
1268 struct neighbour *n;
1270 if (dev->flags & IFF_NOARP)
1273 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1274 dev->stats.tx_dropped++;
1277 parp = arp_hdr(skb);
1279 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1280 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1281 parp->ar_pro != htons(ETH_P_IP) ||
1282 parp->ar_op != htons(ARPOP_REQUEST) ||
1283 parp->ar_hln != dev->addr_len ||
1286 arpptr = (u8 *)parp + sizeof(struct arphdr);
1288 arpptr += dev->addr_len; /* sha */
1289 memcpy(&sip, arpptr, sizeof(sip));
1290 arpptr += sizeof(sip);
1291 arpptr += dev->addr_len; /* tha */
1292 memcpy(&tip, arpptr, sizeof(tip));
1294 if (ipv4_is_loopback(tip) ||
1295 ipv4_is_multicast(tip))
1298 n = neigh_lookup(&arp_tbl, &tip, dev);
1301 struct vxlan_fdb *f;
1302 struct sk_buff *reply;
1304 if (!(n->nud_state & NUD_CONNECTED)) {
1309 f = vxlan_find_mac(vxlan, n->ha);
1310 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1311 /* bridge-local neighbor */
1316 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1324 skb_reset_mac_header(reply);
1325 __skb_pull(reply, skb_network_offset(reply));
1326 reply->ip_summed = CHECKSUM_UNNECESSARY;
1327 reply->pkt_type = PACKET_HOST;
1329 if (netif_rx_ni(reply) == NET_RX_DROP)
1330 dev->stats.rx_dropped++;
1331 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1332 union vxlan_addr ipa = {
1333 .sin.sin_addr.s_addr = tip,
1334 .sa.sa_family = AF_INET,
1337 vxlan_ip_miss(dev, &ipa);
1341 return NETDEV_TX_OK;
1344 #if IS_ENABLED(CONFIG_IPV6)
1346 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1347 struct neighbour *n, bool isrouter)
1349 struct net_device *dev = request->dev;
1350 struct sk_buff *reply;
1351 struct nd_msg *ns, *na;
1352 struct ipv6hdr *pip6;
1354 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1361 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1362 sizeof(*na) + na_olen + dev->needed_tailroom;
1363 reply = alloc_skb(len, GFP_ATOMIC);
1367 reply->protocol = htons(ETH_P_IPV6);
1369 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1370 skb_push(reply, sizeof(struct ethhdr));
1371 skb_set_mac_header(reply, 0);
1373 ns = (struct nd_msg *)skb_transport_header(request);
1375 daddr = eth_hdr(request)->h_source;
1376 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1377 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1378 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1379 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1384 /* Ethernet header */
1385 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1386 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1387 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1388 reply->protocol = htons(ETH_P_IPV6);
1390 skb_pull(reply, sizeof(struct ethhdr));
1391 skb_set_network_header(reply, 0);
1392 skb_put(reply, sizeof(struct ipv6hdr));
1396 pip6 = ipv6_hdr(reply);
1397 memset(pip6, 0, sizeof(struct ipv6hdr));
1399 pip6->priority = ipv6_hdr(request)->priority;
1400 pip6->nexthdr = IPPROTO_ICMPV6;
1401 pip6->hop_limit = 255;
1402 pip6->daddr = ipv6_hdr(request)->saddr;
1403 pip6->saddr = *(struct in6_addr *)n->primary_key;
1405 skb_pull(reply, sizeof(struct ipv6hdr));
1406 skb_set_transport_header(reply, 0);
1408 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1410 /* Neighbor Advertisement */
1411 memset(na, 0, sizeof(*na)+na_olen);
1412 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1413 na->icmph.icmp6_router = isrouter;
1414 na->icmph.icmp6_override = 1;
1415 na->icmph.icmp6_solicited = 1;
1416 na->target = ns->target;
1417 ether_addr_copy(&na->opt[2], n->ha);
1418 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1419 na->opt[1] = na_olen >> 3;
1421 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1422 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1423 csum_partial(na, sizeof(*na)+na_olen, 0));
1425 pip6->payload_len = htons(sizeof(*na)+na_olen);
1427 skb_push(reply, sizeof(struct ipv6hdr));
1429 reply->ip_summed = CHECKSUM_UNNECESSARY;
1434 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1436 struct vxlan_dev *vxlan = netdev_priv(dev);
1438 const struct ipv6hdr *iphdr;
1439 const struct in6_addr *saddr, *daddr;
1440 struct neighbour *n;
1441 struct inet6_dev *in6_dev;
1443 in6_dev = __in6_dev_get(dev);
1447 if (!pskb_may_pull(skb, skb->len))
1450 iphdr = ipv6_hdr(skb);
1451 saddr = &iphdr->saddr;
1452 daddr = &iphdr->daddr;
1454 msg = (struct nd_msg *)skb_transport_header(skb);
1455 if (msg->icmph.icmp6_code != 0 ||
1456 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1459 if (ipv6_addr_loopback(daddr) ||
1460 ipv6_addr_is_multicast(&msg->target))
1463 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1466 struct vxlan_fdb *f;
1467 struct sk_buff *reply;
1469 if (!(n->nud_state & NUD_CONNECTED)) {
1474 f = vxlan_find_mac(vxlan, n->ha);
1475 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1476 /* bridge-local neighbor */
1481 reply = vxlan_na_create(skb, n,
1482 !!(f ? f->flags & NTF_ROUTER : 0));
1489 if (netif_rx_ni(reply) == NET_RX_DROP)
1490 dev->stats.rx_dropped++;
1492 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1493 union vxlan_addr ipa = {
1494 .sin6.sin6_addr = msg->target,
1495 .sa.sa_family = AF_INET6,
1498 vxlan_ip_miss(dev, &ipa);
1503 return NETDEV_TX_OK;
1507 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1509 struct vxlan_dev *vxlan = netdev_priv(dev);
1510 struct neighbour *n;
1512 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1516 switch (ntohs(eth_hdr(skb)->h_proto)) {
1521 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1524 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1525 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1526 union vxlan_addr ipa = {
1527 .sin.sin_addr.s_addr = pip->daddr,
1528 .sa.sa_family = AF_INET,
1531 vxlan_ip_miss(dev, &ipa);
1537 #if IS_ENABLED(CONFIG_IPV6)
1540 struct ipv6hdr *pip6;
1542 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1544 pip6 = ipv6_hdr(skb);
1545 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1546 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1547 union vxlan_addr ipa = {
1548 .sin6.sin6_addr = pip6->daddr,
1549 .sa.sa_family = AF_INET6,
1552 vxlan_ip_miss(dev, &ipa);
1566 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1568 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1570 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1579 /* Compute source port for outgoing packet
1580 * first choice to use L4 flow hash since it will spread
1581 * better and maybe available from hardware
1582 * secondary choice is to use jhash on the Ethernet header
1584 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1586 unsigned int range = (port_max - port_min) + 1;
1589 hash = skb_get_hash(skb);
1591 hash = jhash(skb->data, 2 * ETH_ALEN,
1592 (__force u32) skb->protocol);
1594 return htons((((u64) hash * range) >> 32) + port_min);
1596 EXPORT_SYMBOL_GPL(vxlan_src_port);
1598 static int handle_offloads(struct sk_buff *skb)
1600 if (skb_is_gso(skb)) {
1601 int err = skb_unclone(skb, GFP_ATOMIC);
1605 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1606 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
1607 skb->ip_summed = CHECKSUM_NONE;
1612 #if IS_ENABLED(CONFIG_IPV6)
1613 static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1614 struct dst_entry *dst, struct sk_buff *skb,
1615 struct net_device *dev, struct in6_addr *saddr,
1616 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1617 __be16 src_port, __be16 dst_port, __be32 vni)
1619 struct ipv6hdr *ip6h;
1620 struct vxlanhdr *vxh;
1625 if (!skb->encapsulation) {
1626 skb_reset_inner_headers(skb);
1627 skb->encapsulation = 1;
1630 skb_scrub_packet(skb, false);
1632 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1633 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1634 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1636 /* Need space for new headers (invalidates iph ptr) */
1637 err = skb_cow_head(skb, min_headroom);
1641 if (vlan_tx_tag_present(skb)) {
1642 if (WARN_ON(!__vlan_put_tag(skb,
1644 vlan_tx_tag_get(skb))))
1650 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1651 vxh->vx_flags = htonl(VXLAN_FLAGS);
1654 __skb_push(skb, sizeof(*uh));
1655 skb_reset_transport_header(skb);
1658 uh->dest = dst_port;
1659 uh->source = src_port;
1661 uh->len = htons(skb->len);
1664 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1665 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1667 skb_dst_set(skb, dst);
1669 if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
1670 __wsum csum = skb_checksum(skb, 0, skb->len, 0);
1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
1675 uh->check = CSUM_MANGLED_0;
1677 skb->ip_summed = CHECKSUM_PARTIAL;
1678 skb->csum_start = skb_transport_header(skb) - skb->head;
1679 skb->csum_offset = offsetof(struct udphdr, check);
1680 uh->check = ~csum_ipv6_magic(saddr, daddr,
1681 skb->len, IPPROTO_UDP, 0);
1684 __skb_push(skb, sizeof(*ip6h));
1685 skb_reset_network_header(skb);
1686 ip6h = ipv6_hdr(skb);
1688 ip6h->priority = prio;
1689 ip6h->flow_lbl[0] = 0;
1690 ip6h->flow_lbl[1] = 0;
1691 ip6h->flow_lbl[2] = 0;
1692 ip6h->payload_len = htons(skb->len);
1693 ip6h->nexthdr = IPPROTO_UDP;
1694 ip6h->hop_limit = ttl;
1695 ip6h->daddr = *daddr;
1696 ip6h->saddr = *saddr;
1698 err = handle_offloads(skb);
1702 ip6tunnel_xmit(skb, dev);
1707 int vxlan_xmit_skb(struct vxlan_sock *vs,
1708 struct rtable *rt, struct sk_buff *skb,
1709 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1710 __be16 src_port, __be16 dst_port, __be32 vni)
1712 struct vxlanhdr *vxh;
1717 if (!skb->encapsulation) {
1718 skb_reset_inner_headers(skb);
1719 skb->encapsulation = 1;
1722 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1723 + VXLAN_HLEN + sizeof(struct iphdr)
1724 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1726 /* Need space for new headers (invalidates iph ptr) */
1727 err = skb_cow_head(skb, min_headroom);
1731 if (vlan_tx_tag_present(skb)) {
1732 if (WARN_ON(!__vlan_put_tag(skb,
1734 vlan_tx_tag_get(skb))))
1740 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1741 vxh->vx_flags = htonl(VXLAN_FLAGS);
1744 __skb_push(skb, sizeof(*uh));
1745 skb_reset_transport_header(skb);
1748 uh->dest = dst_port;
1749 uh->source = src_port;
1751 uh->len = htons(skb->len);
1754 err = handle_offloads(skb);
1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
1761 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1763 /* Bypass encapsulation if the destination is local */
1764 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1765 struct vxlan_dev *dst_vxlan)
1767 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1768 union vxlan_addr loopback;
1769 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1771 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1772 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1773 skb->pkt_type = PACKET_HOST;
1774 skb->encapsulation = 0;
1775 skb->dev = dst_vxlan->dev;
1776 __skb_pull(skb, skb_network_offset(skb));
1778 if (remote_ip->sa.sa_family == AF_INET) {
1779 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1780 loopback.sa.sa_family = AF_INET;
1781 #if IS_ENABLED(CONFIG_IPV6)
1783 loopback.sin6.sin6_addr = in6addr_loopback;
1784 loopback.sa.sa_family = AF_INET6;
1788 if (dst_vxlan->flags & VXLAN_F_LEARN)
1789 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1791 u64_stats_update_begin(&tx_stats->syncp);
1792 tx_stats->tx_packets++;
1793 tx_stats->tx_bytes += skb->len;
1794 u64_stats_update_end(&tx_stats->syncp);
1796 if (netif_rx(skb) == NET_RX_SUCCESS) {
1797 u64_stats_update_begin(&rx_stats->syncp);
1798 rx_stats->rx_packets++;
1799 rx_stats->rx_bytes += skb->len;
1800 u64_stats_update_end(&rx_stats->syncp);
1802 skb->dev->stats.rx_dropped++;
1806 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1807 struct vxlan_rdst *rdst, bool did_rsc)
1809 struct vxlan_dev *vxlan = netdev_priv(dev);
1810 struct rtable *rt = NULL;
1811 const struct iphdr *old_iph;
1813 union vxlan_addr *dst;
1814 __be16 src_port = 0, dst_port;
1820 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1821 vni = rdst->remote_vni;
1822 dst = &rdst->remote_ip;
1824 if (vxlan_addr_any(dst)) {
1826 /* short-circuited back to local bridge */
1827 vxlan_encap_bypass(skb, vxlan, vxlan);
1833 old_iph = ip_hdr(skb);
1836 if (!ttl && vxlan_addr_multicast(dst))
1841 tos = ip_tunnel_get_dsfield(old_iph, skb);
1843 src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
1845 if (dst->sa.sa_family == AF_INET) {
1846 memset(&fl4, 0, sizeof(fl4));
1847 fl4.flowi4_oif = rdst->remote_ifindex;
1848 fl4.flowi4_tos = RT_TOS(tos);
1849 fl4.daddr = dst->sin.sin_addr.s_addr;
1850 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1852 rt = ip_route_output_key(dev_net(dev), &fl4);
1854 netdev_dbg(dev, "no route to %pI4\n",
1855 &dst->sin.sin_addr.s_addr);
1856 dev->stats.tx_carrier_errors++;
1860 if (rt->dst.dev == dev) {
1861 netdev_dbg(dev, "circular route to %pI4\n",
1862 &dst->sin.sin_addr.s_addr);
1863 dev->stats.collisions++;
1867 /* Bypass encapsulation if the destination is local */
1868 if (rt->rt_flags & RTCF_LOCAL &&
1869 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1870 struct vxlan_dev *dst_vxlan;
1873 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1876 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1880 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1881 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1883 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
1884 fl4.saddr, dst->sin.sin_addr.s_addr,
1885 tos, ttl, df, src_port, dst_port,
1890 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1891 #if IS_ENABLED(CONFIG_IPV6)
1893 struct sock *sk = vxlan->vn_sock->sock->sk;
1894 struct dst_entry *ndst;
1898 memset(&fl6, 0, sizeof(fl6));
1899 fl6.flowi6_oif = rdst->remote_ifindex;
1900 fl6.daddr = dst->sin6.sin6_addr;
1901 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
1902 fl6.flowi6_proto = IPPROTO_UDP;
1904 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
1905 netdev_dbg(dev, "no route to %pI6\n",
1906 &dst->sin6.sin6_addr);
1907 dev->stats.tx_carrier_errors++;
1911 if (ndst->dev == dev) {
1912 netdev_dbg(dev, "circular route to %pI6\n",
1913 &dst->sin6.sin6_addr);
1915 dev->stats.collisions++;
1919 /* Bypass encapsulation if the destination is local */
1920 flags = ((struct rt6_info *)ndst)->rt6i_flags;
1921 if (flags & RTF_LOCAL &&
1922 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1923 struct vxlan_dev *dst_vxlan;
1926 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1929 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1933 ttl = ttl ? : ip6_dst_hoplimit(ndst);
1935 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
1936 dev, &fl6.saddr, &fl6.daddr, 0, ttl,
1937 src_port, dst_port, htonl(vni << 8));
1944 dev->stats.tx_dropped++;
1950 dev->stats.tx_errors++;
1955 /* Transmit local packets over Vxlan
1957 * Outer IP header inherits ECN and DF from inner header.
1958 * Outer UDP destination is the VXLAN assigned port.
1959 * source port is based on hash of flow
1961 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1963 struct vxlan_dev *vxlan = netdev_priv(dev);
1965 bool did_rsc = false;
1966 struct vxlan_rdst *rdst, *fdst = NULL;
1967 struct vxlan_fdb *f;
1969 skb_reset_mac_header(skb);
1972 if ((vxlan->flags & VXLAN_F_PROXY)) {
1973 if (ntohs(eth->h_proto) == ETH_P_ARP)
1974 return arp_reduce(dev, skb);
1975 #if IS_ENABLED(CONFIG_IPV6)
1976 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
1977 skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
1978 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
1981 msg = (struct nd_msg *)skb_transport_header(skb);
1982 if (msg->icmph.icmp6_code == 0 &&
1983 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
1984 return neigh_reduce(dev, skb);
1989 f = vxlan_find_mac(vxlan, eth->h_dest);
1992 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1993 (ntohs(eth->h_proto) == ETH_P_IP ||
1994 ntohs(eth->h_proto) == ETH_P_IPV6)) {
1995 did_rsc = route_shortcircuit(dev, skb);
1997 f = vxlan_find_mac(vxlan, eth->h_dest);
2001 f = vxlan_find_mac(vxlan, all_zeros_mac);
2003 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2004 !is_multicast_ether_addr(eth->h_dest))
2005 vxlan_fdb_miss(vxlan, eth->h_dest);
2007 dev->stats.tx_dropped++;
2009 return NETDEV_TX_OK;
2013 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2014 struct sk_buff *skb1;
2020 skb1 = skb_clone(skb, GFP_ATOMIC);
2022 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2026 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2029 return NETDEV_TX_OK;
2032 /* Walk the forwarding table and purge stale entries */
2033 static void vxlan_cleanup(unsigned long arg)
2035 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2036 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2039 if (!netif_running(vxlan->dev))
2042 spin_lock_bh(&vxlan->hash_lock);
2043 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2044 struct hlist_node *p, *n;
2045 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2047 = container_of(p, struct vxlan_fdb, hlist);
2048 unsigned long timeout;
2050 if (f->state & NUD_PERMANENT)
2053 timeout = f->used + vxlan->age_interval * HZ;
2054 if (time_before_eq(timeout, jiffies)) {
2055 netdev_dbg(vxlan->dev,
2056 "garbage collect %pM\n",
2058 f->state = NUD_STALE;
2059 vxlan_fdb_destroy(vxlan, f);
2060 } else if (time_before(timeout, next_timer))
2061 next_timer = timeout;
2064 spin_unlock_bh(&vxlan->hash_lock);
2066 mod_timer(&vxlan->age_timer, next_timer);
2069 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2071 __u32 vni = vxlan->default_dst.remote_vni;
2073 vxlan->vn_sock = vs;
2074 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2077 /* Setup stats when device is created */
2078 static int vxlan_init(struct net_device *dev)
2080 struct vxlan_dev *vxlan = netdev_priv(dev);
2081 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2082 struct vxlan_sock *vs;
2085 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
2089 for_each_possible_cpu(i) {
2090 struct pcpu_sw_netstats *vxlan_stats;
2091 vxlan_stats = per_cpu_ptr(dev->tstats, i);
2092 u64_stats_init(&vxlan_stats->syncp);
2096 spin_lock(&vn->sock_lock);
2097 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
2099 /* If we have a socket with same port already, reuse it */
2100 atomic_inc(&vs->refcnt);
2101 vxlan_vs_add_dev(vs, vxlan);
2103 /* otherwise make new socket outside of RTNL */
2105 queue_work(vxlan_wq, &vxlan->sock_work);
2107 spin_unlock(&vn->sock_lock);
2112 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2114 struct vxlan_fdb *f;
2116 spin_lock_bh(&vxlan->hash_lock);
2117 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2119 vxlan_fdb_destroy(vxlan, f);
2120 spin_unlock_bh(&vxlan->hash_lock);
2123 static void vxlan_uninit(struct net_device *dev)
2125 struct vxlan_dev *vxlan = netdev_priv(dev);
2126 struct vxlan_sock *vs = vxlan->vn_sock;
2128 vxlan_fdb_delete_default(vxlan);
2131 vxlan_sock_release(vs);
2132 free_percpu(dev->tstats);
2135 /* Start ageing timer and join group when device is brought up */
2136 static int vxlan_open(struct net_device *dev)
2138 struct vxlan_dev *vxlan = netdev_priv(dev);
2139 struct vxlan_sock *vs = vxlan->vn_sock;
2141 /* socket hasn't been created */
2145 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2146 vxlan_sock_hold(vs);
2148 queue_work(vxlan_wq, &vxlan->igmp_join);
2151 if (vxlan->age_interval)
2152 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2157 /* Purge the forwarding table */
2158 static void vxlan_flush(struct vxlan_dev *vxlan)
2162 spin_lock_bh(&vxlan->hash_lock);
2163 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2164 struct hlist_node *p, *n;
2165 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2167 = container_of(p, struct vxlan_fdb, hlist);
2168 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2169 if (!is_zero_ether_addr(f->eth_addr))
2170 vxlan_fdb_destroy(vxlan, f);
2173 spin_unlock_bh(&vxlan->hash_lock);
2176 /* Cleanup timer and forwarding table on shutdown */
2177 static int vxlan_stop(struct net_device *dev)
2179 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2180 struct vxlan_dev *vxlan = netdev_priv(dev);
2181 struct vxlan_sock *vs = vxlan->vn_sock;
2183 if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2184 !vxlan_group_used(vn, vxlan)) {
2185 vxlan_sock_hold(vs);
2187 queue_work(vxlan_wq, &vxlan->igmp_leave);
2190 del_timer_sync(&vxlan->age_timer);
2197 /* Stub, nothing needs to be done. */
2198 static void vxlan_set_multicast_list(struct net_device *dev)
2202 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2204 struct vxlan_dev *vxlan = netdev_priv(dev);
2205 struct vxlan_rdst *dst = &vxlan->default_dst;
2206 struct net_device *lowerdev;
2209 lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
2210 if (lowerdev == NULL)
2211 return eth_change_mtu(dev, new_mtu);
2213 if (dst->remote_ip.sa.sa_family == AF_INET6)
2214 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2216 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2218 if (new_mtu < 68 || new_mtu > max_mtu)
2225 static const struct net_device_ops vxlan_netdev_ops = {
2226 .ndo_init = vxlan_init,
2227 .ndo_uninit = vxlan_uninit,
2228 .ndo_open = vxlan_open,
2229 .ndo_stop = vxlan_stop,
2230 .ndo_start_xmit = vxlan_xmit,
2231 .ndo_get_stats64 = ip_tunnel_get_stats64,
2232 .ndo_set_rx_mode = vxlan_set_multicast_list,
2233 .ndo_change_mtu = vxlan_change_mtu,
2234 .ndo_validate_addr = eth_validate_addr,
2235 .ndo_set_mac_address = eth_mac_addr,
2236 .ndo_fdb_add = vxlan_fdb_add,
2237 .ndo_fdb_del = vxlan_fdb_delete,
2238 .ndo_fdb_dump = vxlan_fdb_dump,
2241 /* Info for udev, that this is a virtual tunnel endpoint */
2242 static struct device_type vxlan_type = {
2246 /* Calls the ndo_add_vxlan_port of the caller in order to
2247 * supply the listening VXLAN udp ports. Callers are expected
2248 * to implement the ndo_add_vxlan_port.
2250 void vxlan_get_rx_port(struct net_device *dev)
2252 struct vxlan_sock *vs;
2253 struct net *net = dev_net(dev);
2254 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2255 sa_family_t sa_family;
2259 spin_lock(&vn->sock_lock);
2260 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2261 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2262 port = inet_sk(vs->sock->sk)->inet_sport;
2263 sa_family = vs->sock->sk->sk_family;
2264 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2268 spin_unlock(&vn->sock_lock);
2270 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2272 /* Initialize the device structure. */
2273 static void vxlan_setup(struct net_device *dev)
2275 struct vxlan_dev *vxlan = netdev_priv(dev);
2279 eth_hw_addr_random(dev);
2281 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2282 dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
2284 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
2286 dev->netdev_ops = &vxlan_netdev_ops;
2287 dev->destructor = free_netdev;
2288 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2290 dev->tx_queue_len = 0;
2291 dev->features |= NETIF_F_LLTX;
2292 dev->features |= NETIF_F_NETNS_LOCAL;
2293 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2294 dev->features |= NETIF_F_RXCSUM;
2295 dev->features |= NETIF_F_GSO_SOFTWARE;
2297 dev->vlan_features = dev->features;
2298 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2299 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2300 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2301 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2302 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
2303 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2305 INIT_LIST_HEAD(&vxlan->next);
2306 spin_lock_init(&vxlan->hash_lock);
2307 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
2308 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
2309 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
2311 init_timer_deferrable(&vxlan->age_timer);
2312 vxlan->age_timer.function = vxlan_cleanup;
2313 vxlan->age_timer.data = (unsigned long) vxlan;
2315 inet_get_local_port_range(dev_net(dev), &low, &high);
2316 vxlan->port_min = low;
2317 vxlan->port_max = high;
2318 vxlan->dst_port = htons(vxlan_port);
2322 for (h = 0; h < FDB_HASH_SIZE; ++h)
2323 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2326 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2327 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2328 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2329 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2330 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2331 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2332 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2333 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2334 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2335 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2336 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2337 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2338 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2339 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2340 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2341 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2342 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2343 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2346 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2348 if (tb[IFLA_ADDRESS]) {
2349 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2350 pr_debug("invalid link address (not ethernet)\n");
2354 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2355 pr_debug("invalid all zero ethernet address\n");
2356 return -EADDRNOTAVAIL;
2363 if (data[IFLA_VXLAN_ID]) {
2364 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2365 if (id >= VXLAN_VID_MASK)
2369 if (data[IFLA_VXLAN_PORT_RANGE]) {
2370 const struct ifla_vxlan_port_range *p
2371 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2373 if (ntohs(p->high) < ntohs(p->low)) {
2374 pr_debug("port range %u .. %u not valid\n",
2375 ntohs(p->low), ntohs(p->high));
2383 static void vxlan_get_drvinfo(struct net_device *netdev,
2384 struct ethtool_drvinfo *drvinfo)
2386 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2387 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2390 static const struct ethtool_ops vxlan_ethtool_ops = {
2391 .get_drvinfo = vxlan_get_drvinfo,
2392 .get_link = ethtool_op_get_link,
2395 static void vxlan_del_work(struct work_struct *work)
2397 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2399 sk_release_kernel(vs->sock->sk);
2403 #if IS_ENABLED(CONFIG_IPV6)
2404 /* Create UDP socket for encapsulation receive. AF_INET6 socket
2405 * could be used for both IPv4 and IPv6 communications, but
2406 * users may set bindv6only=1.
2408 static struct socket *create_v6_sock(struct net *net, __be16 port)
2411 struct socket *sock;
2412 struct sockaddr_in6 vxlan_addr = {
2413 .sin6_family = AF_INET6,
2418 rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
2420 pr_debug("UDPv6 socket create failed\n");
2424 /* Put in proper namespace */
2426 sk_change_net(sk, net);
2428 kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
2429 (char *)&val, sizeof(val));
2430 rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
2431 sizeof(struct sockaddr_in6));
2433 pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
2434 &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
2435 sk_release_kernel(sk);
2438 /* At this point, IPv6 module should have been loaded in
2439 * sock_create_kern().
2443 /* Disable multicast loopback */
2444 inet_sk(sk)->mc_loop = 0;
2450 static struct socket *create_v6_sock(struct net *net, __be16 port)
2452 return ERR_PTR(-EPFNOSUPPORT);
2456 static struct socket *create_v4_sock(struct net *net, __be16 port)
2459 struct socket *sock;
2460 struct sockaddr_in vxlan_addr = {
2461 .sin_family = AF_INET,
2462 .sin_addr.s_addr = htonl(INADDR_ANY),
2467 /* Create UDP socket for encapsulation receive. */
2468 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
2470 pr_debug("UDP socket create failed\n");
2474 /* Put in proper namespace */
2476 sk_change_net(sk, net);
2478 rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
2479 sizeof(vxlan_addr));
2481 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
2482 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
2483 sk_release_kernel(sk);
2487 /* Disable multicast loopback */
2488 inet_sk(sk)->mc_loop = 0;
2492 /* Create new listen socket if needed */
2493 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2494 vxlan_rcv_t *rcv, void *data, bool ipv6)
2496 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2497 struct vxlan_sock *vs;
2498 struct socket *sock;
2502 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2504 return ERR_PTR(-ENOMEM);
2506 for (h = 0; h < VNI_HASH_SIZE; ++h)
2507 INIT_HLIST_HEAD(&vs->vni_list[h]);
2509 INIT_WORK(&vs->del_work, vxlan_del_work);
2512 sock = create_v6_sock(net, port);
2514 sock = create_v4_sock(net, port);
2517 return ERR_CAST(sock);
2522 atomic_set(&vs->refcnt, 1);
2525 rcu_assign_sk_user_data(vs->sock->sk, vs);
2527 /* Initialize the vxlan udp offloads structure */
2528 vs->udp_offloads.port = port;
2529 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2530 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2532 spin_lock(&vn->sock_lock);
2533 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2534 vxlan_notify_add_rx_port(vs);
2535 spin_unlock(&vn->sock_lock);
2537 /* Mark socket as an encapsulation socket. */
2538 udp_sk(sk)->encap_type = 1;
2539 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
2540 #if IS_ENABLED(CONFIG_IPV6)
2542 ipv6_stub->udpv6_encap_enable();
2550 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2551 vxlan_rcv_t *rcv, void *data,
2552 bool no_share, bool ipv6)
2554 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2555 struct vxlan_sock *vs;
2557 vs = vxlan_socket_create(net, port, rcv, data, ipv6);
2561 if (no_share) /* Return error if sharing is not allowed. */
2564 spin_lock(&vn->sock_lock);
2565 vs = vxlan_find_sock(net, port);
2568 atomic_inc(&vs->refcnt);
2570 vs = ERR_PTR(-EBUSY);
2572 spin_unlock(&vn->sock_lock);
2575 vs = ERR_PTR(-EINVAL);
2579 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2581 /* Scheduled at device creation to bind to a socket */
2582 static void vxlan_sock_work(struct work_struct *work)
2584 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
2585 struct net *net = dev_net(vxlan->dev);
2586 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2587 __be16 port = vxlan->dst_port;
2588 struct vxlan_sock *nvs;
2590 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
2591 spin_lock(&vn->sock_lock);
2593 vxlan_vs_add_dev(nvs, vxlan);
2594 spin_unlock(&vn->sock_lock);
2596 dev_put(vxlan->dev);
2599 static int vxlan_newlink(struct net *net, struct net_device *dev,
2600 struct nlattr *tb[], struct nlattr *data[])
2602 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2603 struct vxlan_dev *vxlan = netdev_priv(dev);
2604 struct vxlan_rdst *dst = &vxlan->default_dst;
2607 bool use_ipv6 = false;
2609 if (!data[IFLA_VXLAN_ID])
2612 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2613 dst->remote_vni = vni;
2615 if (data[IFLA_VXLAN_GROUP]) {
2616 dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
2617 dst->remote_ip.sa.sa_family = AF_INET;
2618 } else if (data[IFLA_VXLAN_GROUP6]) {
2619 if (!IS_ENABLED(CONFIG_IPV6))
2620 return -EPFNOSUPPORT;
2622 nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
2623 sizeof(struct in6_addr));
2624 dst->remote_ip.sa.sa_family = AF_INET6;
2628 if (data[IFLA_VXLAN_LOCAL]) {
2629 vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
2630 vxlan->saddr.sa.sa_family = AF_INET;
2631 } else if (data[IFLA_VXLAN_LOCAL6]) {
2632 if (!IS_ENABLED(CONFIG_IPV6))
2633 return -EPFNOSUPPORT;
2635 /* TODO: respect scope id */
2636 nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
2637 sizeof(struct in6_addr));
2638 vxlan->saddr.sa.sa_family = AF_INET6;
2642 if (data[IFLA_VXLAN_LINK] &&
2643 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2644 struct net_device *lowerdev
2645 = __dev_get_by_index(net, dst->remote_ifindex);
2648 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2652 #if IS_ENABLED(CONFIG_IPV6)
2654 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2655 if (idev && idev->cnf.disable_ipv6) {
2656 pr_info("IPv6 is disabled via sysctl\n");
2659 vxlan->flags |= VXLAN_F_IPV6;
2664 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2666 /* update header length based on lower device */
2667 dev->hard_header_len = lowerdev->hard_header_len +
2668 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2669 } else if (use_ipv6)
2670 vxlan->flags |= VXLAN_F_IPV6;
2672 if (data[IFLA_VXLAN_TOS])
2673 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2675 if (data[IFLA_VXLAN_TTL])
2676 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2678 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2679 vxlan->flags |= VXLAN_F_LEARN;
2681 if (data[IFLA_VXLAN_AGEING])
2682 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2684 vxlan->age_interval = FDB_AGE_DEFAULT;
2686 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2687 vxlan->flags |= VXLAN_F_PROXY;
2689 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2690 vxlan->flags |= VXLAN_F_RSC;
2692 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2693 vxlan->flags |= VXLAN_F_L2MISS;
2695 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2696 vxlan->flags |= VXLAN_F_L3MISS;
2698 if (data[IFLA_VXLAN_LIMIT])
2699 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2701 if (data[IFLA_VXLAN_PORT_RANGE]) {
2702 const struct ifla_vxlan_port_range *p
2703 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2704 vxlan->port_min = ntohs(p->low);
2705 vxlan->port_max = ntohs(p->high);
2708 if (data[IFLA_VXLAN_PORT])
2709 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2711 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
2712 pr_info("duplicate VNI %u\n", vni);
2716 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
2718 /* create an fdb entry for a valid default destination */
2719 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2720 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2721 &vxlan->default_dst.remote_ip,
2722 NUD_REACHABLE|NUD_PERMANENT,
2723 NLM_F_EXCL|NLM_F_CREATE,
2725 vxlan->default_dst.remote_vni,
2726 vxlan->default_dst.remote_ifindex,
2732 err = register_netdevice(dev);
2734 vxlan_fdb_delete_default(vxlan);
2738 list_add(&vxlan->next, &vn->vxlan_list);
2743 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2745 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2746 struct vxlan_dev *vxlan = netdev_priv(dev);
2748 spin_lock(&vn->sock_lock);
2749 if (!hlist_unhashed(&vxlan->hlist))
2750 hlist_del_rcu(&vxlan->hlist);
2751 spin_unlock(&vn->sock_lock);
2753 list_del(&vxlan->next);
2754 unregister_netdevice_queue(dev, head);
2757 static size_t vxlan_get_size(const struct net_device *dev)
2760 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2761 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2762 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2763 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2764 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2765 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2766 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2767 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2768 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2769 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2770 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2771 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2772 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2773 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2774 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
2778 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2780 const struct vxlan_dev *vxlan = netdev_priv(dev);
2781 const struct vxlan_rdst *dst = &vxlan->default_dst;
2782 struct ifla_vxlan_port_range ports = {
2783 .low = htons(vxlan->port_min),
2784 .high = htons(vxlan->port_max),
2787 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2788 goto nla_put_failure;
2790 if (!vxlan_addr_any(&dst->remote_ip)) {
2791 if (dst->remote_ip.sa.sa_family == AF_INET) {
2792 if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
2793 dst->remote_ip.sin.sin_addr.s_addr))
2794 goto nla_put_failure;
2795 #if IS_ENABLED(CONFIG_IPV6)
2797 if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
2798 &dst->remote_ip.sin6.sin6_addr))
2799 goto nla_put_failure;
2804 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2805 goto nla_put_failure;
2807 if (!vxlan_addr_any(&vxlan->saddr)) {
2808 if (vxlan->saddr.sa.sa_family == AF_INET) {
2809 if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
2810 vxlan->saddr.sin.sin_addr.s_addr))
2811 goto nla_put_failure;
2812 #if IS_ENABLED(CONFIG_IPV6)
2814 if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
2815 &vxlan->saddr.sin6.sin6_addr))
2816 goto nla_put_failure;
2821 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2822 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2823 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2824 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2825 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2826 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2827 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2828 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2829 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2830 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2831 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2832 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2833 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2834 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
2835 goto nla_put_failure;
2837 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2838 goto nla_put_failure;
2846 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2848 .maxtype = IFLA_VXLAN_MAX,
2849 .policy = vxlan_policy,
2850 .priv_size = sizeof(struct vxlan_dev),
2851 .setup = vxlan_setup,
2852 .validate = vxlan_validate,
2853 .newlink = vxlan_newlink,
2854 .dellink = vxlan_dellink,
2855 .get_size = vxlan_get_size,
2856 .fill_info = vxlan_fill_info,
2859 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2860 struct net_device *dev)
2862 struct vxlan_dev *vxlan, *next;
2863 LIST_HEAD(list_kill);
2865 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2866 struct vxlan_rdst *dst = &vxlan->default_dst;
2868 /* In case we created vxlan device with carrier
2869 * and we loose the carrier due to module unload
2870 * we also need to remove vxlan device. In other
2871 * cases, it's not necessary and remote_ifindex
2872 * is 0 here, so no matches.
2874 if (dst->remote_ifindex == dev->ifindex)
2875 vxlan_dellink(vxlan->dev, &list_kill);
2878 unregister_netdevice_many(&list_kill);
2881 static int vxlan_lowerdev_event(struct notifier_block *unused,
2882 unsigned long event, void *ptr)
2884 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2885 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2887 if (event == NETDEV_UNREGISTER)
2888 vxlan_handle_lowerdev_unregister(vn, dev);
2893 static struct notifier_block vxlan_notifier_block __read_mostly = {
2894 .notifier_call = vxlan_lowerdev_event,
2897 static __net_init int vxlan_init_net(struct net *net)
2899 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2902 INIT_LIST_HEAD(&vn->vxlan_list);
2903 spin_lock_init(&vn->sock_lock);
2905 for (h = 0; h < PORT_HASH_SIZE; ++h)
2906 INIT_HLIST_HEAD(&vn->sock_list[h]);
2911 static struct pernet_operations vxlan_net_ops = {
2912 .init = vxlan_init_net,
2913 .id = &vxlan_net_id,
2914 .size = sizeof(struct vxlan_net),
2917 static int __init vxlan_init_module(void)
2921 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
2925 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
2927 rc = register_pernet_subsys(&vxlan_net_ops);
2931 rc = register_netdevice_notifier(&vxlan_notifier_block);
2935 rc = rtnl_link_register(&vxlan_link_ops);
2941 unregister_netdevice_notifier(&vxlan_notifier_block);
2943 unregister_pernet_subsys(&vxlan_net_ops);
2945 destroy_workqueue(vxlan_wq);
2948 late_initcall(vxlan_init_module);
2950 static void __exit vxlan_cleanup_module(void)
2952 rtnl_link_unregister(&vxlan_link_ops);
2953 unregister_netdevice_notifier(&vxlan_notifier_block);
2954 destroy_workqueue(vxlan_wq);
2955 unregister_pernet_subsys(&vxlan_net_ops);
2956 /* rcu_barrier() is called by netns */
2958 module_exit(vxlan_cleanup_module);
2960 MODULE_LICENSE("GPL");
2961 MODULE_VERSION(VXLAN_VERSION);
2962 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
2963 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
2964 MODULE_ALIAS_RTNL_LINK("vxlan");