2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
45 #include <net/udp_tunnel.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
53 #define VXLAN_VERSION "0.1"
55 #define PORT_HASH_BITS 8
56 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
57 #define VNI_HASH_BITS 10
58 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
59 #define FDB_HASH_BITS 8
60 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
61 #define FDB_AGE_DEFAULT 300 /* 5 min */
62 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
64 /* UDP port for VXLAN traffic.
65 * The IANA assigned port is 4789, but the Linux default is 8472
66 * for compatibility with early adopters.
68 static unsigned short vxlan_port __read_mostly = 8472;
69 module_param_named(udp_port, vxlan_port, ushort, 0444);
70 MODULE_PARM_DESC(udp_port, "Destination UDP port");
72 static bool log_ecn_error = true;
73 module_param(log_ecn_error, bool, 0644);
74 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
76 static int vxlan_net_id;
78 static const u8 all_zeros_mac[ETH_ALEN];
80 /* per-network namespace private data for this module */
82 struct list_head vxlan_list;
83 struct hlist_head sock_list[PORT_HASH_SIZE];
88 struct sockaddr_in sin;
89 struct sockaddr_in6 sin6;
94 union vxlan_addr remote_ip;
98 struct list_head list;
102 /* Forwarding table entry */
104 struct hlist_node hlist; /* linked list of entries */
106 unsigned long updated; /* jiffies */
108 struct list_head remotes;
109 u16 state; /* see ndm_state */
110 u8 flags; /* see ndm_flags */
111 u8 eth_addr[ETH_ALEN];
114 /* Pseudo network device */
116 struct hlist_node hlist; /* vni hash table */
117 struct list_head next; /* vxlan's per namespace list */
118 struct vxlan_sock *vn_sock; /* listening socket */
119 struct net_device *dev;
120 struct net *net; /* netns for packet i/o */
121 struct vxlan_rdst default_dst; /* default destination */
122 union vxlan_addr saddr; /* source address */
124 __u16 port_min; /* source port range */
126 __u8 tos; /* TOS override */
128 u32 flags; /* VXLAN_F_* in vxlan.h */
130 unsigned long age_interval;
131 struct timer_list age_timer;
132 spinlock_t hash_lock;
133 unsigned int addrcnt;
134 unsigned int addrmax;
136 struct hlist_head fdb_head[FDB_HASH_SIZE];
139 /* salt for hash table */
140 static u32 vxlan_salt __read_mostly;
141 static struct workqueue_struct *vxlan_wq;
143 #if IS_ENABLED(CONFIG_IPV6)
145 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
147 if (a->sa.sa_family != b->sa.sa_family)
149 if (a->sa.sa_family == AF_INET6)
150 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
152 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
155 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
157 if (ipa->sa.sa_family == AF_INET6)
158 return ipv6_addr_any(&ipa->sin6.sin6_addr);
160 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
163 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
165 if (ipa->sa.sa_family == AF_INET6)
166 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
168 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
171 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
173 if (nla_len(nla) >= sizeof(struct in6_addr)) {
174 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
175 ip->sa.sa_family = AF_INET6;
177 } else if (nla_len(nla) >= sizeof(__be32)) {
178 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
179 ip->sa.sa_family = AF_INET;
182 return -EAFNOSUPPORT;
186 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
187 const union vxlan_addr *ip)
189 if (ip->sa.sa_family == AF_INET6)
190 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
192 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
195 #else /* !CONFIG_IPV6 */
198 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
200 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
203 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
205 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
208 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
210 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
213 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
215 if (nla_len(nla) >= sizeof(struct in6_addr)) {
216 return -EAFNOSUPPORT;
217 } else if (nla_len(nla) >= sizeof(__be32)) {
218 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
219 ip->sa.sa_family = AF_INET;
222 return -EAFNOSUPPORT;
226 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
227 const union vxlan_addr *ip)
229 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
233 /* Virtual Network hash table head */
234 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
236 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
239 /* Socket hash table head */
240 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
242 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
244 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
247 /* First remote destination for a forwarding entry.
248 * Guaranteed to be non-NULL because remotes are never deleted.
250 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
252 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
255 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
257 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
260 /* Find VXLAN socket based on network namespace, address family and UDP port
261 * and enabled unshareable flags.
263 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
264 __be16 port, u32 flags)
266 struct vxlan_sock *vs;
268 flags &= VXLAN_F_RCV_FLAGS;
270 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
271 if (inet_sk(vs->sock->sk)->inet_sport == port &&
272 inet_sk(vs->sock->sk)->sk.sk_family == family &&
279 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
281 struct vxlan_dev *vxlan;
283 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
284 if (vxlan->default_dst.remote_vni == id)
291 /* Look up VNI in a per net namespace table */
292 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
293 sa_family_t family, __be16 port,
296 struct vxlan_sock *vs;
298 vs = vxlan_find_sock(net, family, port, flags);
302 return vxlan_vs_find_vni(vs, id);
305 /* Fill in neighbour message in skbuff. */
306 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
307 const struct vxlan_fdb *fdb,
308 u32 portid, u32 seq, int type, unsigned int flags,
309 const struct vxlan_rdst *rdst)
311 unsigned long now = jiffies;
312 struct nda_cacheinfo ci;
313 struct nlmsghdr *nlh;
315 bool send_ip, send_eth;
317 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
321 ndm = nlmsg_data(nlh);
322 memset(ndm, 0, sizeof(*ndm));
324 send_eth = send_ip = true;
326 if (type == RTM_GETNEIGH) {
327 ndm->ndm_family = AF_INET;
328 send_ip = !vxlan_addr_any(&rdst->remote_ip);
329 send_eth = !is_zero_ether_addr(fdb->eth_addr);
331 ndm->ndm_family = AF_BRIDGE;
332 ndm->ndm_state = fdb->state;
333 ndm->ndm_ifindex = vxlan->dev->ifindex;
334 ndm->ndm_flags = fdb->flags;
335 ndm->ndm_type = RTN_UNICAST;
337 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
338 nla_put_s32(skb, NDA_LINK_NETNSID,
339 peernet2id(dev_net(vxlan->dev), vxlan->net)))
340 goto nla_put_failure;
342 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
343 goto nla_put_failure;
345 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
346 goto nla_put_failure;
348 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
349 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
350 goto nla_put_failure;
351 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
352 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
353 goto nla_put_failure;
354 if (rdst->remote_ifindex &&
355 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
356 goto nla_put_failure;
358 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
359 ci.ndm_confirmed = 0;
360 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
363 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
364 goto nla_put_failure;
370 nlmsg_cancel(skb, nlh);
374 static inline size_t vxlan_nlmsg_size(void)
376 return NLMSG_ALIGN(sizeof(struct ndmsg))
377 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
378 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
379 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
380 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
381 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
382 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
383 + nla_total_size(sizeof(struct nda_cacheinfo));
386 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
387 struct vxlan_rdst *rd, int type)
389 struct net *net = dev_net(vxlan->dev);
393 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
397 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
399 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
400 WARN_ON(err == -EMSGSIZE);
405 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
409 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
412 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
414 struct vxlan_dev *vxlan = netdev_priv(dev);
415 struct vxlan_fdb f = {
418 struct vxlan_rdst remote = {
419 .remote_ip = *ipa, /* goes to NDA_DST */
420 .remote_vni = VXLAN_N_VID,
423 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
426 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
428 struct vxlan_fdb f = {
431 struct vxlan_rdst remote = { };
433 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
435 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
438 /* Hash Ethernet address */
439 static u32 eth_hash(const unsigned char *addr)
441 u64 value = get_unaligned((u64 *)addr);
443 /* only want 6 bytes */
449 return hash_64(value, FDB_HASH_BITS);
452 /* Hash chain to use given mac address */
453 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
456 return &vxlan->fdb_head[eth_hash(mac)];
459 /* Look up Ethernet address in forwarding table */
460 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
463 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
466 hlist_for_each_entry_rcu(f, head, hlist) {
467 if (ether_addr_equal(mac, f->eth_addr))
474 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
479 f = __vxlan_find_mac(vxlan, mac);
486 /* caller should hold vxlan->hash_lock */
487 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
488 union vxlan_addr *ip, __be16 port,
489 __u32 vni, __u32 ifindex)
491 struct vxlan_rdst *rd;
493 list_for_each_entry(rd, &f->remotes, list) {
494 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
495 rd->remote_port == port &&
496 rd->remote_vni == vni &&
497 rd->remote_ifindex == ifindex)
504 /* Replace destination of unicast mac */
505 static int vxlan_fdb_replace(struct vxlan_fdb *f,
506 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
508 struct vxlan_rdst *rd;
510 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
514 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
518 rd->remote_port = port;
519 rd->remote_vni = vni;
520 rd->remote_ifindex = ifindex;
524 /* Add/update destinations for multicast */
525 static int vxlan_fdb_append(struct vxlan_fdb *f,
526 union vxlan_addr *ip, __be16 port, __u32 vni,
527 __u32 ifindex, struct vxlan_rdst **rdp)
529 struct vxlan_rdst *rd;
531 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
535 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
539 rd->remote_port = port;
540 rd->remote_vni = vni;
541 rd->remote_ifindex = ifindex;
543 list_add_tail_rcu(&rd->list, &f->remotes);
549 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
551 struct vxlanhdr *vh, size_t hdrlen,
552 u32 data, struct gro_remcsum *grc,
555 size_t start, offset, plen;
557 if (skb->remcsum_offload)
560 if (!NAPI_GRO_CB(skb)->csum_valid)
563 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
564 offset = start + ((data & VXLAN_RCO_UDP) ?
565 offsetof(struct udphdr, check) :
566 offsetof(struct tcphdr, check));
568 plen = hdrlen + offset + sizeof(u16);
570 /* Pull checksum that will be written */
571 if (skb_gro_header_hard(skb, off + plen)) {
572 vh = skb_gro_header_slow(skb, off + plen, off);
577 skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
578 start, offset, grc, nopartial);
580 skb->remcsum_offload = 1;
585 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
587 struct udp_offload *uoff)
589 struct sk_buff *p, **pp = NULL;
590 struct vxlanhdr *vh, *vh2;
591 unsigned int hlen, off_vx;
593 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
596 struct gro_remcsum grc;
598 skb_gro_remcsum_init(&grc);
600 off_vx = skb_gro_offset(skb);
601 hlen = off_vx + sizeof(*vh);
602 vh = skb_gro_header_fast(skb, off_vx);
603 if (skb_gro_header_hard(skb, hlen)) {
604 vh = skb_gro_header_slow(skb, hlen, off_vx);
609 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
610 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
612 flags = ntohl(vh->vx_flags);
614 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
615 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
616 ntohl(vh->vx_vni), &grc,
618 VXLAN_F_REMCSUM_NOPARTIAL));
626 for (p = *head; p; p = p->next) {
627 if (!NAPI_GRO_CB(p)->same_flow)
630 vh2 = (struct vxlanhdr *)(p->data + off_vx);
631 if (vh->vx_flags != vh2->vx_flags ||
632 vh->vx_vni != vh2->vx_vni) {
633 NAPI_GRO_CB(p)->same_flow = 0;
638 pp = eth_gro_receive(head, skb);
641 skb_gro_remcsum_cleanup(skb, &grc);
642 NAPI_GRO_CB(skb)->flush |= flush;
647 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
648 struct udp_offload *uoff)
650 udp_tunnel_gro_complete(skb, nhoff);
652 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
655 /* Notify netdevs that UDP port started listening */
656 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
658 struct net_device *dev;
659 struct sock *sk = vs->sock->sk;
660 struct net *net = sock_net(sk);
661 sa_family_t sa_family = sk->sk_family;
662 __be16 port = inet_sk(sk)->inet_sport;
665 if (sa_family == AF_INET) {
666 err = udp_add_offload(&vs->udp_offloads);
668 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
672 for_each_netdev_rcu(net, dev) {
673 if (dev->netdev_ops->ndo_add_vxlan_port)
674 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
680 /* Notify netdevs that UDP port is no more listening */
681 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
683 struct net_device *dev;
684 struct sock *sk = vs->sock->sk;
685 struct net *net = sock_net(sk);
686 sa_family_t sa_family = sk->sk_family;
687 __be16 port = inet_sk(sk)->inet_sport;
690 for_each_netdev_rcu(net, dev) {
691 if (dev->netdev_ops->ndo_del_vxlan_port)
692 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
697 if (sa_family == AF_INET)
698 udp_del_offload(&vs->udp_offloads);
701 /* Add new entry to forwarding table -- assumes lock held */
702 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
703 const u8 *mac, union vxlan_addr *ip,
704 __u16 state, __u16 flags,
705 __be16 port, __u32 vni, __u32 ifindex,
708 struct vxlan_rdst *rd = NULL;
712 f = __vxlan_find_mac(vxlan, mac);
714 if (flags & NLM_F_EXCL) {
715 netdev_dbg(vxlan->dev,
716 "lost race to create %pM\n", mac);
719 if (f->state != state) {
721 f->updated = jiffies;
724 if (f->flags != ndm_flags) {
725 f->flags = ndm_flags;
726 f->updated = jiffies;
729 if ((flags & NLM_F_REPLACE)) {
730 /* Only change unicasts */
731 if (!(is_multicast_ether_addr(f->eth_addr) ||
732 is_zero_ether_addr(f->eth_addr))) {
733 int rc = vxlan_fdb_replace(f, ip, port, vni,
742 if ((flags & NLM_F_APPEND) &&
743 (is_multicast_ether_addr(f->eth_addr) ||
744 is_zero_ether_addr(f->eth_addr))) {
745 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
753 if (!(flags & NLM_F_CREATE))
756 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
759 /* Disallow replace to add a multicast entry */
760 if ((flags & NLM_F_REPLACE) &&
761 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
764 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
765 f = kmalloc(sizeof(*f), GFP_ATOMIC);
771 f->flags = ndm_flags;
772 f->updated = f->used = jiffies;
773 INIT_LIST_HEAD(&f->remotes);
774 memcpy(f->eth_addr, mac, ETH_ALEN);
776 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
779 hlist_add_head_rcu(&f->hlist,
780 vxlan_fdb_head(vxlan, mac));
785 rd = first_remote_rtnl(f);
786 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
792 static void vxlan_fdb_free(struct rcu_head *head)
794 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
795 struct vxlan_rdst *rd, *nd;
797 list_for_each_entry_safe(rd, nd, &f->remotes, list)
802 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
804 netdev_dbg(vxlan->dev,
805 "delete %pM\n", f->eth_addr);
808 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
810 hlist_del_rcu(&f->hlist);
811 call_rcu(&f->rcu, vxlan_fdb_free);
814 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
815 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
817 struct net *net = dev_net(vxlan->dev);
821 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
825 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
826 if (remote->sa.sa_family == AF_INET) {
827 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
828 ip->sa.sa_family = AF_INET;
829 #if IS_ENABLED(CONFIG_IPV6)
831 ip->sin6.sin6_addr = in6addr_any;
832 ip->sa.sa_family = AF_INET6;
838 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
840 *port = nla_get_be16(tb[NDA_PORT]);
842 *port = vxlan->dst_port;
846 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
848 *vni = nla_get_u32(tb[NDA_VNI]);
850 *vni = vxlan->default_dst.remote_vni;
853 if (tb[NDA_IFINDEX]) {
854 struct net_device *tdev;
856 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
858 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
859 tdev = __dev_get_by_index(net, *ifindex);
861 return -EADDRNOTAVAIL;
869 /* Add static entry (via netlink) */
870 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
871 struct net_device *dev,
872 const unsigned char *addr, u16 vid, u16 flags)
874 struct vxlan_dev *vxlan = netdev_priv(dev);
875 /* struct net *net = dev_net(vxlan->dev); */
881 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
882 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
887 if (tb[NDA_DST] == NULL)
890 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
894 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
895 return -EAFNOSUPPORT;
897 spin_lock_bh(&vxlan->hash_lock);
898 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
899 port, vni, ifindex, ndm->ndm_flags);
900 spin_unlock_bh(&vxlan->hash_lock);
905 /* Delete entry (via netlink) */
906 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
907 struct net_device *dev,
908 const unsigned char *addr, u16 vid)
910 struct vxlan_dev *vxlan = netdev_priv(dev);
912 struct vxlan_rdst *rd = NULL;
918 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
924 spin_lock_bh(&vxlan->hash_lock);
925 f = vxlan_find_mac(vxlan, addr);
929 if (!vxlan_addr_any(&ip)) {
930 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
937 /* remove a destination if it's not the only one on the list,
938 * otherwise destroy the fdb entry
940 if (rd && !list_is_singular(&f->remotes)) {
941 list_del_rcu(&rd->list);
942 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
947 vxlan_fdb_destroy(vxlan, f);
950 spin_unlock_bh(&vxlan->hash_lock);
955 /* Dump forwarding table */
956 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
957 struct net_device *dev,
958 struct net_device *filter_dev, int idx)
960 struct vxlan_dev *vxlan = netdev_priv(dev);
963 for (h = 0; h < FDB_HASH_SIZE; ++h) {
967 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
968 struct vxlan_rdst *rd;
970 if (idx < cb->args[0])
973 list_for_each_entry_rcu(rd, &f->remotes, list) {
974 err = vxlan_fdb_info(skb, vxlan, f,
975 NETLINK_CB(cb->skb).portid,
990 /* Watch incoming packets to learn mapping between Ethernet address
991 * and Tunnel endpoint.
992 * Return true if packet is bogus and should be dropped.
994 static bool vxlan_snoop(struct net_device *dev,
995 union vxlan_addr *src_ip, const u8 *src_mac)
997 struct vxlan_dev *vxlan = netdev_priv(dev);
1000 f = vxlan_find_mac(vxlan, src_mac);
1002 struct vxlan_rdst *rdst = first_remote_rcu(f);
1004 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
1007 /* Don't migrate static entries, drop packets */
1008 if (f->state & NUD_NOARP)
1011 if (net_ratelimit())
1013 "%pM migrated from %pIS to %pIS\n",
1014 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1016 rdst->remote_ip = *src_ip;
1017 f->updated = jiffies;
1018 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1020 /* learned new entry */
1021 spin_lock(&vxlan->hash_lock);
1023 /* close off race between vxlan_flush and incoming packets */
1024 if (netif_running(dev))
1025 vxlan_fdb_create(vxlan, src_mac, src_ip,
1027 NLM_F_EXCL|NLM_F_CREATE,
1029 vxlan->default_dst.remote_vni,
1031 spin_unlock(&vxlan->hash_lock);
1037 /* See if multicast group is already in use by other ID */
1038 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1040 struct vxlan_dev *vxlan;
1042 /* The vxlan_sock is only used by dev, leaving group has
1043 * no effect on other vxlan devices.
1045 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1048 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1049 if (!netif_running(vxlan->dev) || vxlan == dev)
1052 if (vxlan->vn_sock != dev->vn_sock)
1055 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1056 &dev->default_dst.remote_ip))
1059 if (vxlan->default_dst.remote_ifindex !=
1060 dev->default_dst.remote_ifindex)
1069 void vxlan_sock_release(struct vxlan_sock *vs)
1071 struct sock *sk = vs->sock->sk;
1072 struct net *net = sock_net(sk);
1073 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1075 if (!atomic_dec_and_test(&vs->refcnt))
1078 spin_lock(&vn->sock_lock);
1079 hlist_del_rcu(&vs->hlist);
1080 vxlan_notify_del_rx_port(vs);
1081 spin_unlock(&vn->sock_lock);
1083 queue_work(vxlan_wq, &vs->del_work);
1085 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1087 /* Update multicast group membership when first VNI on
1088 * multicast address is brought up
1090 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1092 struct vxlan_sock *vs = vxlan->vn_sock;
1093 struct sock *sk = vs->sock->sk;
1094 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1095 int ifindex = vxlan->default_dst.remote_ifindex;
1099 if (ip->sa.sa_family == AF_INET) {
1100 struct ip_mreqn mreq = {
1101 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1102 .imr_ifindex = ifindex,
1105 ret = ip_mc_join_group(sk, &mreq);
1106 #if IS_ENABLED(CONFIG_IPV6)
1108 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1109 &ip->sin6.sin6_addr);
1117 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1118 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1120 struct vxlan_sock *vs = vxlan->vn_sock;
1121 struct sock *sk = vs->sock->sk;
1122 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1123 int ifindex = vxlan->default_dst.remote_ifindex;
1127 if (ip->sa.sa_family == AF_INET) {
1128 struct ip_mreqn mreq = {
1129 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1130 .imr_ifindex = ifindex,
1133 ret = ip_mc_leave_group(sk, &mreq);
1134 #if IS_ENABLED(CONFIG_IPV6)
1136 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1137 &ip->sin6.sin6_addr);
1145 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
1146 size_t hdrlen, u32 data, bool nopartial)
1148 size_t start, offset, plen;
1150 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
1151 offset = start + ((data & VXLAN_RCO_UDP) ?
1152 offsetof(struct udphdr, check) :
1153 offsetof(struct tcphdr, check));
1155 plen = hdrlen + offset + sizeof(u16);
1157 if (!pskb_may_pull(skb, plen))
1160 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1162 skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
1168 /* Callback from net/ipv4/udp.c to receive packets */
1169 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1171 struct vxlan_sock *vs;
1172 struct vxlanhdr *vxh;
1174 struct vxlan_metadata md = {0};
1176 /* Need Vxlan and inner Ethernet header to be present */
1177 if (!pskb_may_pull(skb, VXLAN_HLEN))
1180 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1181 flags = ntohl(vxh->vx_flags);
1182 vni = ntohl(vxh->vx_vni);
1184 if (flags & VXLAN_HF_VNI) {
1185 flags &= ~VXLAN_HF_VNI;
1187 /* VNI flag always required to be set */
1191 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1193 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1195 vs = rcu_dereference_sk_user_data(sk);
1199 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1200 vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
1201 !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
1205 flags &= ~VXLAN_HF_RCO;
1206 vni &= VXLAN_VNI_MASK;
1209 /* For backwards compatibility, only allow reserved fields to be
1210 * used by VXLAN extensions if explicitly requested.
1212 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1213 struct vxlanhdr_gbp *gbp;
1215 gbp = (struct vxlanhdr_gbp *)vxh;
1216 md.gbp = ntohs(gbp->policy_id);
1218 if (gbp->dont_learn)
1219 md.gbp |= VXLAN_GBP_DONT_LEARN;
1221 if (gbp->policy_applied)
1222 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
1224 flags &= ~VXLAN_GBP_USED_BITS;
1227 if (flags || vni & ~VXLAN_VNI_MASK) {
1228 /* If there are any unprocessed flags remaining treat
1229 * this as a malformed packet. This behavior diverges from
1230 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1231 * in reserved fields are to be ignored. The approach here
1232 * maintains compatibility with previous stack code, and also
1233 * is more robust and provides a little more security in
1234 * adding extensions to VXLAN.
1240 md.vni = vxh->vx_vni;
1241 vs->rcv(vs, skb, &md);
1245 /* Consume bad packet */
1250 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1251 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1254 /* Return non vxlan pkt */
1258 static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1259 struct vxlan_metadata *md)
1261 struct iphdr *oip = NULL;
1262 struct ipv6hdr *oip6 = NULL;
1263 struct vxlan_dev *vxlan;
1264 struct pcpu_sw_netstats *stats;
1265 union vxlan_addr saddr;
1268 union vxlan_addr *remote_ip;
1270 vni = ntohl(md->vni) >> 8;
1271 /* Is this VNI defined? */
1272 vxlan = vxlan_vs_find_vni(vs, vni);
1276 remote_ip = &vxlan->default_dst.remote_ip;
1277 skb_reset_mac_header(skb);
1278 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1279 skb->protocol = eth_type_trans(skb, vxlan->dev);
1280 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1282 /* Ignore packet loops (and multicast echo) */
1283 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1286 /* Re-examine inner Ethernet packet */
1287 if (remote_ip->sa.sa_family == AF_INET) {
1289 saddr.sin.sin_addr.s_addr = oip->saddr;
1290 saddr.sa.sa_family = AF_INET;
1291 #if IS_ENABLED(CONFIG_IPV6)
1293 oip6 = ipv6_hdr(skb);
1294 saddr.sin6.sin6_addr = oip6->saddr;
1295 saddr.sa.sa_family = AF_INET6;
1299 if ((vxlan->flags & VXLAN_F_LEARN) &&
1300 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1303 skb_reset_network_header(skb);
1304 skb->mark = md->gbp;
1307 err = IP6_ECN_decapsulate(oip6, skb);
1309 err = IP_ECN_decapsulate(oip, skb);
1311 if (unlikely(err)) {
1312 if (log_ecn_error) {
1314 net_info_ratelimited("non-ECT from %pI6\n",
1317 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1318 &oip->saddr, oip->tos);
1321 ++vxlan->dev->stats.rx_frame_errors;
1322 ++vxlan->dev->stats.rx_errors;
1327 stats = this_cpu_ptr(vxlan->dev->tstats);
1328 u64_stats_update_begin(&stats->syncp);
1329 stats->rx_packets++;
1330 stats->rx_bytes += skb->len;
1331 u64_stats_update_end(&stats->syncp);
1337 /* Consume bad packet */
1341 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1343 struct vxlan_dev *vxlan = netdev_priv(dev);
1344 struct arphdr *parp;
1347 struct neighbour *n;
1349 if (dev->flags & IFF_NOARP)
1352 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1353 dev->stats.tx_dropped++;
1356 parp = arp_hdr(skb);
1358 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1359 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1360 parp->ar_pro != htons(ETH_P_IP) ||
1361 parp->ar_op != htons(ARPOP_REQUEST) ||
1362 parp->ar_hln != dev->addr_len ||
1365 arpptr = (u8 *)parp + sizeof(struct arphdr);
1367 arpptr += dev->addr_len; /* sha */
1368 memcpy(&sip, arpptr, sizeof(sip));
1369 arpptr += sizeof(sip);
1370 arpptr += dev->addr_len; /* tha */
1371 memcpy(&tip, arpptr, sizeof(tip));
1373 if (ipv4_is_loopback(tip) ||
1374 ipv4_is_multicast(tip))
1377 n = neigh_lookup(&arp_tbl, &tip, dev);
1380 struct vxlan_fdb *f;
1381 struct sk_buff *reply;
1383 if (!(n->nud_state & NUD_CONNECTED)) {
1388 f = vxlan_find_mac(vxlan, n->ha);
1389 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1390 /* bridge-local neighbor */
1395 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1403 skb_reset_mac_header(reply);
1404 __skb_pull(reply, skb_network_offset(reply));
1405 reply->ip_summed = CHECKSUM_UNNECESSARY;
1406 reply->pkt_type = PACKET_HOST;
1408 if (netif_rx_ni(reply) == NET_RX_DROP)
1409 dev->stats.rx_dropped++;
1410 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1411 union vxlan_addr ipa = {
1412 .sin.sin_addr.s_addr = tip,
1413 .sin.sin_family = AF_INET,
1416 vxlan_ip_miss(dev, &ipa);
1420 return NETDEV_TX_OK;
1423 #if IS_ENABLED(CONFIG_IPV6)
1424 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1425 struct neighbour *n, bool isrouter)
1427 struct net_device *dev = request->dev;
1428 struct sk_buff *reply;
1429 struct nd_msg *ns, *na;
1430 struct ipv6hdr *pip6;
1432 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1439 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1440 sizeof(*na) + na_olen + dev->needed_tailroom;
1441 reply = alloc_skb(len, GFP_ATOMIC);
1445 reply->protocol = htons(ETH_P_IPV6);
1447 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1448 skb_push(reply, sizeof(struct ethhdr));
1449 skb_set_mac_header(reply, 0);
1451 ns = (struct nd_msg *)skb_transport_header(request);
1453 daddr = eth_hdr(request)->h_source;
1454 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1455 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1456 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1457 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1462 /* Ethernet header */
1463 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1464 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1465 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1466 reply->protocol = htons(ETH_P_IPV6);
1468 skb_pull(reply, sizeof(struct ethhdr));
1469 skb_set_network_header(reply, 0);
1470 skb_put(reply, sizeof(struct ipv6hdr));
1474 pip6 = ipv6_hdr(reply);
1475 memset(pip6, 0, sizeof(struct ipv6hdr));
1477 pip6->priority = ipv6_hdr(request)->priority;
1478 pip6->nexthdr = IPPROTO_ICMPV6;
1479 pip6->hop_limit = 255;
1480 pip6->daddr = ipv6_hdr(request)->saddr;
1481 pip6->saddr = *(struct in6_addr *)n->primary_key;
1483 skb_pull(reply, sizeof(struct ipv6hdr));
1484 skb_set_transport_header(reply, 0);
1486 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1488 /* Neighbor Advertisement */
1489 memset(na, 0, sizeof(*na)+na_olen);
1490 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1491 na->icmph.icmp6_router = isrouter;
1492 na->icmph.icmp6_override = 1;
1493 na->icmph.icmp6_solicited = 1;
1494 na->target = ns->target;
1495 ether_addr_copy(&na->opt[2], n->ha);
1496 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1497 na->opt[1] = na_olen >> 3;
1499 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1500 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1501 csum_partial(na, sizeof(*na)+na_olen, 0));
1503 pip6->payload_len = htons(sizeof(*na)+na_olen);
1505 skb_push(reply, sizeof(struct ipv6hdr));
1507 reply->ip_summed = CHECKSUM_UNNECESSARY;
1512 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1514 struct vxlan_dev *vxlan = netdev_priv(dev);
1516 const struct ipv6hdr *iphdr;
1517 const struct in6_addr *saddr, *daddr;
1518 struct neighbour *n;
1519 struct inet6_dev *in6_dev;
1521 in6_dev = __in6_dev_get(dev);
1525 iphdr = ipv6_hdr(skb);
1526 saddr = &iphdr->saddr;
1527 daddr = &iphdr->daddr;
1529 msg = (struct nd_msg *)skb_transport_header(skb);
1530 if (msg->icmph.icmp6_code != 0 ||
1531 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1534 if (ipv6_addr_loopback(daddr) ||
1535 ipv6_addr_is_multicast(&msg->target))
1538 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1541 struct vxlan_fdb *f;
1542 struct sk_buff *reply;
1544 if (!(n->nud_state & NUD_CONNECTED)) {
1549 f = vxlan_find_mac(vxlan, n->ha);
1550 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1551 /* bridge-local neighbor */
1556 reply = vxlan_na_create(skb, n,
1557 !!(f ? f->flags & NTF_ROUTER : 0));
1564 if (netif_rx_ni(reply) == NET_RX_DROP)
1565 dev->stats.rx_dropped++;
1567 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1568 union vxlan_addr ipa = {
1569 .sin6.sin6_addr = msg->target,
1570 .sin6.sin6_family = AF_INET6,
1573 vxlan_ip_miss(dev, &ipa);
1578 return NETDEV_TX_OK;
1582 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1584 struct vxlan_dev *vxlan = netdev_priv(dev);
1585 struct neighbour *n;
1587 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1591 switch (ntohs(eth_hdr(skb)->h_proto)) {
1596 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1599 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1600 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1601 union vxlan_addr ipa = {
1602 .sin.sin_addr.s_addr = pip->daddr,
1603 .sin.sin_family = AF_INET,
1606 vxlan_ip_miss(dev, &ipa);
1612 #if IS_ENABLED(CONFIG_IPV6)
1615 struct ipv6hdr *pip6;
1617 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1619 pip6 = ipv6_hdr(skb);
1620 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1621 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1622 union vxlan_addr ipa = {
1623 .sin6.sin6_addr = pip6->daddr,
1624 .sin6.sin6_family = AF_INET6,
1627 vxlan_ip_miss(dev, &ipa);
1641 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1643 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1645 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1654 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1655 struct vxlan_metadata *md)
1657 struct vxlanhdr_gbp *gbp;
1662 gbp = (struct vxlanhdr_gbp *)vxh;
1663 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
1665 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1666 gbp->dont_learn = 1;
1668 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1669 gbp->policy_applied = 1;
1671 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1674 #if IS_ENABLED(CONFIG_IPV6)
1675 static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
1676 struct net_device *dev, struct in6_addr *saddr,
1677 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1678 __be16 src_port, __be16 dst_port,
1679 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1681 struct vxlanhdr *vxh;
1684 bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1685 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1686 u16 hdrlen = sizeof(struct vxlanhdr);
1688 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1689 skb->ip_summed == CHECKSUM_PARTIAL) {
1690 int csum_start = skb_checksum_start_offset(skb);
1692 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1693 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1694 (skb->csum_offset == offsetof(struct udphdr, check) ||
1695 skb->csum_offset == offsetof(struct tcphdr, check))) {
1697 type |= SKB_GSO_TUNNEL_REMCSUM;
1701 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1707 skb_scrub_packet(skb, xnet);
1709 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1710 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1711 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1713 /* Need space for new headers (invalidates iph ptr) */
1714 err = skb_cow_head(skb, min_headroom);
1715 if (unlikely(err)) {
1720 skb = vlan_hwaccel_push_inside(skb);
1721 if (WARN_ON(!skb)) {
1726 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1727 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1728 vxh->vx_vni = md->vni;
1730 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1731 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1734 if (skb->csum_offset == offsetof(struct udphdr, check))
1735 data |= VXLAN_RCO_UDP;
1737 vxh->vx_vni |= htonl(data);
1738 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1740 if (!skb_is_gso(skb)) {
1741 skb->ip_summed = CHECKSUM_NONE;
1742 skb->encapsulation = 0;
1746 if (vxflags & VXLAN_F_GBP)
1747 vxlan_build_gbp_hdr(vxh, vxflags, md);
1749 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1751 udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
1752 ttl, src_port, dst_port,
1753 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1761 int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
1762 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1763 __be16 src_port, __be16 dst_port,
1764 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1766 struct vxlanhdr *vxh;
1769 bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
1770 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1771 u16 hdrlen = sizeof(struct vxlanhdr);
1773 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1774 skb->ip_summed == CHECKSUM_PARTIAL) {
1775 int csum_start = skb_checksum_start_offset(skb);
1777 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1778 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1779 (skb->csum_offset == offsetof(struct udphdr, check) ||
1780 skb->csum_offset == offsetof(struct tcphdr, check))) {
1782 type |= SKB_GSO_TUNNEL_REMCSUM;
1786 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1788 return PTR_ERR(skb);
1790 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1791 + VXLAN_HLEN + sizeof(struct iphdr)
1792 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1794 /* Need space for new headers (invalidates iph ptr) */
1795 err = skb_cow_head(skb, min_headroom);
1796 if (unlikely(err)) {
1801 skb = vlan_hwaccel_push_inside(skb);
1805 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1806 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1807 vxh->vx_vni = md->vni;
1809 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1810 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1813 if (skb->csum_offset == offsetof(struct udphdr, check))
1814 data |= VXLAN_RCO_UDP;
1816 vxh->vx_vni |= htonl(data);
1817 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1819 if (!skb_is_gso(skb)) {
1820 skb->ip_summed = CHECKSUM_NONE;
1821 skb->encapsulation = 0;
1825 if (vxflags & VXLAN_F_GBP)
1826 vxlan_build_gbp_hdr(vxh, vxflags, md);
1828 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1830 return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
1831 ttl, df, src_port, dst_port, xnet,
1832 !(vxflags & VXLAN_F_UDP_CSUM));
1834 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1836 /* Bypass encapsulation if the destination is local */
1837 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1838 struct vxlan_dev *dst_vxlan)
1840 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1841 union vxlan_addr loopback;
1842 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1843 struct net_device *dev = skb->dev;
1846 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1847 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1848 skb->pkt_type = PACKET_HOST;
1849 skb->encapsulation = 0;
1850 skb->dev = dst_vxlan->dev;
1851 __skb_pull(skb, skb_network_offset(skb));
1853 if (remote_ip->sa.sa_family == AF_INET) {
1854 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1855 loopback.sa.sa_family = AF_INET;
1856 #if IS_ENABLED(CONFIG_IPV6)
1858 loopback.sin6.sin6_addr = in6addr_loopback;
1859 loopback.sa.sa_family = AF_INET6;
1863 if (dst_vxlan->flags & VXLAN_F_LEARN)
1864 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1866 u64_stats_update_begin(&tx_stats->syncp);
1867 tx_stats->tx_packets++;
1868 tx_stats->tx_bytes += len;
1869 u64_stats_update_end(&tx_stats->syncp);
1871 if (netif_rx(skb) == NET_RX_SUCCESS) {
1872 u64_stats_update_begin(&rx_stats->syncp);
1873 rx_stats->rx_packets++;
1874 rx_stats->rx_bytes += len;
1875 u64_stats_update_end(&rx_stats->syncp);
1877 dev->stats.rx_dropped++;
1881 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1882 struct vxlan_rdst *rdst, bool did_rsc)
1884 struct vxlan_dev *vxlan = netdev_priv(dev);
1885 struct rtable *rt = NULL;
1886 const struct iphdr *old_iph;
1888 union vxlan_addr *dst;
1889 struct vxlan_metadata md;
1890 __be16 src_port = 0, dst_port;
1896 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1897 vni = rdst->remote_vni;
1898 dst = &rdst->remote_ip;
1900 if (vxlan_addr_any(dst)) {
1902 /* short-circuited back to local bridge */
1903 vxlan_encap_bypass(skb, vxlan, vxlan);
1909 old_iph = ip_hdr(skb);
1912 if (!ttl && vxlan_addr_multicast(dst))
1917 tos = ip_tunnel_get_dsfield(old_iph, skb);
1919 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
1920 vxlan->port_max, true);
1922 if (dst->sa.sa_family == AF_INET) {
1923 memset(&fl4, 0, sizeof(fl4));
1924 fl4.flowi4_oif = rdst->remote_ifindex;
1925 fl4.flowi4_tos = RT_TOS(tos);
1926 fl4.daddr = dst->sin.sin_addr.s_addr;
1927 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
1929 rt = ip_route_output_key(vxlan->net, &fl4);
1931 netdev_dbg(dev, "no route to %pI4\n",
1932 &dst->sin.sin_addr.s_addr);
1933 dev->stats.tx_carrier_errors++;
1937 if (rt->dst.dev == dev) {
1938 netdev_dbg(dev, "circular route to %pI4\n",
1939 &dst->sin.sin_addr.s_addr);
1940 dev->stats.collisions++;
1944 /* Bypass encapsulation if the destination is local */
1945 if (rt->rt_flags & RTCF_LOCAL &&
1946 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1947 struct vxlan_dev *dst_vxlan;
1950 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1951 dst->sa.sa_family, dst_port,
1955 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1959 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1960 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1961 md.vni = htonl(vni << 8);
1964 err = vxlan_xmit_skb(rt, skb, fl4.saddr,
1965 dst->sin.sin_addr.s_addr, tos, ttl, df,
1966 src_port, dst_port, &md,
1967 !net_eq(vxlan->net, dev_net(vxlan->dev)),
1970 /* skb is already freed. */
1975 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1976 #if IS_ENABLED(CONFIG_IPV6)
1978 struct sock *sk = vxlan->vn_sock->sock->sk;
1979 struct dst_entry *ndst;
1983 memset(&fl6, 0, sizeof(fl6));
1984 fl6.flowi6_oif = rdst->remote_ifindex;
1985 fl6.daddr = dst->sin6.sin6_addr;
1986 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
1987 fl6.flowi6_proto = IPPROTO_UDP;
1989 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
1990 netdev_dbg(dev, "no route to %pI6\n",
1991 &dst->sin6.sin6_addr);
1992 dev->stats.tx_carrier_errors++;
1996 if (ndst->dev == dev) {
1997 netdev_dbg(dev, "circular route to %pI6\n",
1998 &dst->sin6.sin6_addr);
2000 dev->stats.collisions++;
2004 /* Bypass encapsulation if the destination is local */
2005 flags = ((struct rt6_info *)ndst)->rt6i_flags;
2006 if (flags & RTF_LOCAL &&
2007 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2008 struct vxlan_dev *dst_vxlan;
2011 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2012 dst->sa.sa_family, dst_port,
2016 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2020 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2021 md.vni = htonl(vni << 8);
2024 err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
2025 0, ttl, src_port, dst_port, &md,
2026 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2034 dev->stats.tx_dropped++;
2040 dev->stats.tx_errors++;
2045 /* Transmit local packets over Vxlan
2047 * Outer IP header inherits ECN and DF from inner header.
2048 * Outer UDP destination is the VXLAN assigned port.
2049 * source port is based on hash of flow
2051 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2053 struct vxlan_dev *vxlan = netdev_priv(dev);
2055 bool did_rsc = false;
2056 struct vxlan_rdst *rdst, *fdst = NULL;
2057 struct vxlan_fdb *f;
2059 skb_reset_mac_header(skb);
2062 if ((vxlan->flags & VXLAN_F_PROXY)) {
2063 if (ntohs(eth->h_proto) == ETH_P_ARP)
2064 return arp_reduce(dev, skb);
2065 #if IS_ENABLED(CONFIG_IPV6)
2066 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2067 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2068 + sizeof(struct nd_msg)) &&
2069 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2072 msg = (struct nd_msg *)skb_transport_header(skb);
2073 if (msg->icmph.icmp6_code == 0 &&
2074 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2075 return neigh_reduce(dev, skb);
2081 f = vxlan_find_mac(vxlan, eth->h_dest);
2084 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2085 (ntohs(eth->h_proto) == ETH_P_IP ||
2086 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2087 did_rsc = route_shortcircuit(dev, skb);
2089 f = vxlan_find_mac(vxlan, eth->h_dest);
2093 f = vxlan_find_mac(vxlan, all_zeros_mac);
2095 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2096 !is_multicast_ether_addr(eth->h_dest))
2097 vxlan_fdb_miss(vxlan, eth->h_dest);
2099 dev->stats.tx_dropped++;
2101 return NETDEV_TX_OK;
2105 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2106 struct sk_buff *skb1;
2112 skb1 = skb_clone(skb, GFP_ATOMIC);
2114 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2118 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2121 return NETDEV_TX_OK;
2124 /* Walk the forwarding table and purge stale entries */
2125 static void vxlan_cleanup(unsigned long arg)
2127 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2128 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2131 if (!netif_running(vxlan->dev))
2134 spin_lock_bh(&vxlan->hash_lock);
2135 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2136 struct hlist_node *p, *n;
2137 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2139 = container_of(p, struct vxlan_fdb, hlist);
2140 unsigned long timeout;
2142 if (f->state & NUD_PERMANENT)
2145 timeout = f->used + vxlan->age_interval * HZ;
2146 if (time_before_eq(timeout, jiffies)) {
2147 netdev_dbg(vxlan->dev,
2148 "garbage collect %pM\n",
2150 f->state = NUD_STALE;
2151 vxlan_fdb_destroy(vxlan, f);
2152 } else if (time_before(timeout, next_timer))
2153 next_timer = timeout;
2156 spin_unlock_bh(&vxlan->hash_lock);
2158 mod_timer(&vxlan->age_timer, next_timer);
2161 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2163 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2164 __u32 vni = vxlan->default_dst.remote_vni;
2166 vxlan->vn_sock = vs;
2167 spin_lock(&vn->sock_lock);
2168 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2169 spin_unlock(&vn->sock_lock);
2172 /* Setup stats when device is created */
2173 static int vxlan_init(struct net_device *dev)
2175 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2182 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2184 struct vxlan_fdb *f;
2186 spin_lock_bh(&vxlan->hash_lock);
2187 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2189 vxlan_fdb_destroy(vxlan, f);
2190 spin_unlock_bh(&vxlan->hash_lock);
2193 static void vxlan_uninit(struct net_device *dev)
2195 struct vxlan_dev *vxlan = netdev_priv(dev);
2197 vxlan_fdb_delete_default(vxlan);
2199 free_percpu(dev->tstats);
2202 /* Start ageing timer and join group when device is brought up */
2203 static int vxlan_open(struct net_device *dev)
2205 struct vxlan_dev *vxlan = netdev_priv(dev);
2206 struct vxlan_sock *vs;
2209 vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
2210 false, vxlan->flags);
2214 vxlan_vs_add_dev(vs, vxlan);
2216 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2217 ret = vxlan_igmp_join(vxlan);
2219 vxlan_sock_release(vs);
2224 if (vxlan->age_interval)
2225 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2230 /* Purge the forwarding table */
2231 static void vxlan_flush(struct vxlan_dev *vxlan)
2235 spin_lock_bh(&vxlan->hash_lock);
2236 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2237 struct hlist_node *p, *n;
2238 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2240 = container_of(p, struct vxlan_fdb, hlist);
2241 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2242 if (!is_zero_ether_addr(f->eth_addr))
2243 vxlan_fdb_destroy(vxlan, f);
2246 spin_unlock_bh(&vxlan->hash_lock);
2249 /* Cleanup timer and forwarding table on shutdown */
2250 static int vxlan_stop(struct net_device *dev)
2252 struct vxlan_dev *vxlan = netdev_priv(dev);
2253 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2254 struct vxlan_sock *vs = vxlan->vn_sock;
2257 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2258 !vxlan_group_used(vn, vxlan)) {
2259 ret = vxlan_igmp_leave(vxlan);
2264 del_timer_sync(&vxlan->age_timer);
2267 vxlan_sock_release(vs);
2272 /* Stub, nothing needs to be done. */
2273 static void vxlan_set_multicast_list(struct net_device *dev)
2277 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2279 struct vxlan_dev *vxlan = netdev_priv(dev);
2280 struct vxlan_rdst *dst = &vxlan->default_dst;
2281 struct net_device *lowerdev;
2284 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2285 if (lowerdev == NULL)
2286 return eth_change_mtu(dev, new_mtu);
2288 if (dst->remote_ip.sa.sa_family == AF_INET6)
2289 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2291 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2293 if (new_mtu < 68 || new_mtu > max_mtu)
2300 static const struct net_device_ops vxlan_netdev_ops = {
2301 .ndo_init = vxlan_init,
2302 .ndo_uninit = vxlan_uninit,
2303 .ndo_open = vxlan_open,
2304 .ndo_stop = vxlan_stop,
2305 .ndo_start_xmit = vxlan_xmit,
2306 .ndo_get_stats64 = ip_tunnel_get_stats64,
2307 .ndo_set_rx_mode = vxlan_set_multicast_list,
2308 .ndo_change_mtu = vxlan_change_mtu,
2309 .ndo_validate_addr = eth_validate_addr,
2310 .ndo_set_mac_address = eth_mac_addr,
2311 .ndo_fdb_add = vxlan_fdb_add,
2312 .ndo_fdb_del = vxlan_fdb_delete,
2313 .ndo_fdb_dump = vxlan_fdb_dump,
2316 /* Info for udev, that this is a virtual tunnel endpoint */
2317 static struct device_type vxlan_type = {
2321 /* Calls the ndo_add_vxlan_port of the caller in order to
2322 * supply the listening VXLAN udp ports. Callers are expected
2323 * to implement the ndo_add_vxlan_port.
2325 void vxlan_get_rx_port(struct net_device *dev)
2327 struct vxlan_sock *vs;
2328 struct net *net = dev_net(dev);
2329 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2330 sa_family_t sa_family;
2334 spin_lock(&vn->sock_lock);
2335 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2336 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2337 port = inet_sk(vs->sock->sk)->inet_sport;
2338 sa_family = vs->sock->sk->sk_family;
2339 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2343 spin_unlock(&vn->sock_lock);
2345 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2347 /* Initialize the device structure. */
2348 static void vxlan_setup(struct net_device *dev)
2350 struct vxlan_dev *vxlan = netdev_priv(dev);
2353 eth_hw_addr_random(dev);
2355 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2356 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2358 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2360 dev->netdev_ops = &vxlan_netdev_ops;
2361 dev->destructor = free_netdev;
2362 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2364 dev->tx_queue_len = 0;
2365 dev->features |= NETIF_F_LLTX;
2366 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2367 dev->features |= NETIF_F_RXCSUM;
2368 dev->features |= NETIF_F_GSO_SOFTWARE;
2370 dev->vlan_features = dev->features;
2371 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2372 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2373 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2374 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2375 netif_keep_dst(dev);
2376 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2378 INIT_LIST_HEAD(&vxlan->next);
2379 spin_lock_init(&vxlan->hash_lock);
2381 init_timer_deferrable(&vxlan->age_timer);
2382 vxlan->age_timer.function = vxlan_cleanup;
2383 vxlan->age_timer.data = (unsigned long) vxlan;
2385 vxlan->dst_port = htons(vxlan_port);
2389 for (h = 0; h < FDB_HASH_SIZE; ++h)
2390 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2393 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2394 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2395 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2396 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2397 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2398 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2399 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2400 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2401 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2402 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2403 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2404 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2405 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2406 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2407 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2408 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2409 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2410 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2411 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2412 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2413 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2414 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2415 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2416 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2417 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2420 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2422 if (tb[IFLA_ADDRESS]) {
2423 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2424 pr_debug("invalid link address (not ethernet)\n");
2428 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2429 pr_debug("invalid all zero ethernet address\n");
2430 return -EADDRNOTAVAIL;
2437 if (data[IFLA_VXLAN_ID]) {
2438 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2439 if (id >= VXLAN_VID_MASK)
2443 if (data[IFLA_VXLAN_PORT_RANGE]) {
2444 const struct ifla_vxlan_port_range *p
2445 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2447 if (ntohs(p->high) < ntohs(p->low)) {
2448 pr_debug("port range %u .. %u not valid\n",
2449 ntohs(p->low), ntohs(p->high));
2457 static void vxlan_get_drvinfo(struct net_device *netdev,
2458 struct ethtool_drvinfo *drvinfo)
2460 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2461 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2464 static const struct ethtool_ops vxlan_ethtool_ops = {
2465 .get_drvinfo = vxlan_get_drvinfo,
2466 .get_link = ethtool_op_get_link,
2469 static void vxlan_del_work(struct work_struct *work)
2471 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2472 udp_tunnel_sock_release(vs->sock);
2476 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2477 __be16 port, u32 flags)
2479 struct socket *sock;
2480 struct udp_port_cfg udp_conf;
2483 memset(&udp_conf, 0, sizeof(udp_conf));
2486 udp_conf.family = AF_INET6;
2487 udp_conf.use_udp6_rx_checksums =
2488 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2490 udp_conf.family = AF_INET;
2493 udp_conf.local_udp_port = port;
2495 /* Open UDP socket */
2496 err = udp_sock_create(net, &udp_conf, &sock);
2498 return ERR_PTR(err);
2503 /* Create new listen socket if needed */
2504 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2505 vxlan_rcv_t *rcv, void *data,
2508 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2509 struct vxlan_sock *vs;
2510 struct socket *sock;
2512 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2513 struct udp_tunnel_sock_cfg tunnel_cfg;
2515 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2517 return ERR_PTR(-ENOMEM);
2519 for (h = 0; h < VNI_HASH_SIZE; ++h)
2520 INIT_HLIST_HEAD(&vs->vni_list[h]);
2522 INIT_WORK(&vs->del_work, vxlan_del_work);
2524 sock = vxlan_create_sock(net, ipv6, port, flags);
2526 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2529 return ERR_CAST(sock);
2533 atomic_set(&vs->refcnt, 1);
2536 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2538 /* Initialize the vxlan udp offloads structure */
2539 vs->udp_offloads.port = port;
2540 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2541 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2543 spin_lock(&vn->sock_lock);
2544 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2545 vxlan_notify_add_rx_port(vs);
2546 spin_unlock(&vn->sock_lock);
2548 /* Mark socket as an encapsulation socket. */
2549 tunnel_cfg.sk_user_data = vs;
2550 tunnel_cfg.encap_type = 1;
2551 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2552 tunnel_cfg.encap_destroy = NULL;
2554 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2559 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2560 vxlan_rcv_t *rcv, void *data,
2561 bool no_share, u32 flags)
2563 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2564 struct vxlan_sock *vs;
2565 bool ipv6 = flags & VXLAN_F_IPV6;
2568 spin_lock(&vn->sock_lock);
2569 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
2571 if (vs && vs->rcv == rcv) {
2572 if (!atomic_add_unless(&vs->refcnt, 1, 0))
2573 vs = ERR_PTR(-EBUSY);
2574 spin_unlock(&vn->sock_lock);
2577 spin_unlock(&vn->sock_lock);
2580 return vxlan_socket_create(net, port, rcv, data, flags);
2582 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2584 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2585 struct nlattr *tb[], struct nlattr *data[])
2587 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2588 struct vxlan_dev *vxlan = netdev_priv(dev);
2589 struct vxlan_rdst *dst = &vxlan->default_dst;
2592 bool use_ipv6 = false;
2594 if (!data[IFLA_VXLAN_ID])
2597 vxlan->net = src_net;
2599 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2600 dst->remote_vni = vni;
2602 /* Unless IPv6 is explicitly requested, assume IPv4 */
2603 dst->remote_ip.sa.sa_family = AF_INET;
2604 if (data[IFLA_VXLAN_GROUP]) {
2605 dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2606 } else if (data[IFLA_VXLAN_GROUP6]) {
2607 if (!IS_ENABLED(CONFIG_IPV6))
2608 return -EPFNOSUPPORT;
2610 dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2611 dst->remote_ip.sa.sa_family = AF_INET6;
2615 if (data[IFLA_VXLAN_LOCAL]) {
2616 vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
2617 vxlan->saddr.sa.sa_family = AF_INET;
2618 } else if (data[IFLA_VXLAN_LOCAL6]) {
2619 if (!IS_ENABLED(CONFIG_IPV6))
2620 return -EPFNOSUPPORT;
2622 /* TODO: respect scope id */
2623 vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
2624 vxlan->saddr.sa.sa_family = AF_INET6;
2628 if (data[IFLA_VXLAN_LINK] &&
2629 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2630 struct net_device *lowerdev
2631 = __dev_get_by_index(src_net, dst->remote_ifindex);
2634 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2638 #if IS_ENABLED(CONFIG_IPV6)
2640 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2641 if (idev && idev->cnf.disable_ipv6) {
2642 pr_info("IPv6 is disabled via sysctl\n");
2645 vxlan->flags |= VXLAN_F_IPV6;
2650 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2652 dev->needed_headroom = lowerdev->hard_header_len +
2653 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2654 } else if (use_ipv6)
2655 vxlan->flags |= VXLAN_F_IPV6;
2657 if (data[IFLA_VXLAN_TOS])
2658 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2660 if (data[IFLA_VXLAN_TTL])
2661 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2663 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2664 vxlan->flags |= VXLAN_F_LEARN;
2666 if (data[IFLA_VXLAN_AGEING])
2667 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2669 vxlan->age_interval = FDB_AGE_DEFAULT;
2671 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2672 vxlan->flags |= VXLAN_F_PROXY;
2674 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2675 vxlan->flags |= VXLAN_F_RSC;
2677 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2678 vxlan->flags |= VXLAN_F_L2MISS;
2680 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2681 vxlan->flags |= VXLAN_F_L3MISS;
2683 if (data[IFLA_VXLAN_LIMIT])
2684 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2686 if (data[IFLA_VXLAN_PORT_RANGE]) {
2687 const struct ifla_vxlan_port_range *p
2688 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2689 vxlan->port_min = ntohs(p->low);
2690 vxlan->port_max = ntohs(p->high);
2693 if (data[IFLA_VXLAN_PORT])
2694 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2696 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2697 vxlan->flags |= VXLAN_F_UDP_CSUM;
2699 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2700 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2701 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2703 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2704 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2705 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2707 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2708 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2709 vxlan->flags |= VXLAN_F_REMCSUM_TX;
2711 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2712 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2713 vxlan->flags |= VXLAN_F_REMCSUM_RX;
2715 if (data[IFLA_VXLAN_GBP])
2716 vxlan->flags |= VXLAN_F_GBP;
2718 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2719 vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2721 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2722 vxlan->dst_port, vxlan->flags)) {
2723 pr_info("duplicate VNI %u\n", vni);
2727 dev->ethtool_ops = &vxlan_ethtool_ops;
2729 /* create an fdb entry for a valid default destination */
2730 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2731 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2732 &vxlan->default_dst.remote_ip,
2733 NUD_REACHABLE|NUD_PERMANENT,
2734 NLM_F_EXCL|NLM_F_CREATE,
2736 vxlan->default_dst.remote_vni,
2737 vxlan->default_dst.remote_ifindex,
2743 err = register_netdevice(dev);
2745 vxlan_fdb_delete_default(vxlan);
2749 list_add(&vxlan->next, &vn->vxlan_list);
2754 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2756 struct vxlan_dev *vxlan = netdev_priv(dev);
2757 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2759 spin_lock(&vn->sock_lock);
2760 if (!hlist_unhashed(&vxlan->hlist))
2761 hlist_del_rcu(&vxlan->hlist);
2762 spin_unlock(&vn->sock_lock);
2764 list_del(&vxlan->next);
2765 unregister_netdevice_queue(dev, head);
2768 static size_t vxlan_get_size(const struct net_device *dev)
2771 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2772 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2773 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2774 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2775 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2776 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2777 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2778 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2779 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2780 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2781 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2782 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2783 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2784 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2785 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2786 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2787 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2788 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2789 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2790 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2794 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2796 const struct vxlan_dev *vxlan = netdev_priv(dev);
2797 const struct vxlan_rdst *dst = &vxlan->default_dst;
2798 struct ifla_vxlan_port_range ports = {
2799 .low = htons(vxlan->port_min),
2800 .high = htons(vxlan->port_max),
2803 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2804 goto nla_put_failure;
2806 if (!vxlan_addr_any(&dst->remote_ip)) {
2807 if (dst->remote_ip.sa.sa_family == AF_INET) {
2808 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
2809 dst->remote_ip.sin.sin_addr.s_addr))
2810 goto nla_put_failure;
2811 #if IS_ENABLED(CONFIG_IPV6)
2813 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
2814 &dst->remote_ip.sin6.sin6_addr))
2815 goto nla_put_failure;
2820 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2821 goto nla_put_failure;
2823 if (!vxlan_addr_any(&vxlan->saddr)) {
2824 if (vxlan->saddr.sa.sa_family == AF_INET) {
2825 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
2826 vxlan->saddr.sin.sin_addr.s_addr))
2827 goto nla_put_failure;
2828 #if IS_ENABLED(CONFIG_IPV6)
2830 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
2831 &vxlan->saddr.sin6.sin6_addr))
2832 goto nla_put_failure;
2837 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2838 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2839 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2840 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2841 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2842 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2843 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2844 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2845 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2846 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2847 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2848 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2849 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2850 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2851 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2852 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2853 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2854 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2855 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2856 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
2857 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
2858 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
2859 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
2860 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
2861 goto nla_put_failure;
2863 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2864 goto nla_put_failure;
2866 if (vxlan->flags & VXLAN_F_GBP &&
2867 nla_put_flag(skb, IFLA_VXLAN_GBP))
2868 goto nla_put_failure;
2870 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
2871 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
2872 goto nla_put_failure;
2880 static struct net *vxlan_get_link_net(const struct net_device *dev)
2882 struct vxlan_dev *vxlan = netdev_priv(dev);
2887 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2889 .maxtype = IFLA_VXLAN_MAX,
2890 .policy = vxlan_policy,
2891 .priv_size = sizeof(struct vxlan_dev),
2892 .setup = vxlan_setup,
2893 .validate = vxlan_validate,
2894 .newlink = vxlan_newlink,
2895 .dellink = vxlan_dellink,
2896 .get_size = vxlan_get_size,
2897 .fill_info = vxlan_fill_info,
2898 .get_link_net = vxlan_get_link_net,
2901 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
2902 struct net_device *dev)
2904 struct vxlan_dev *vxlan, *next;
2905 LIST_HEAD(list_kill);
2907 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2908 struct vxlan_rdst *dst = &vxlan->default_dst;
2910 /* In case we created vxlan device with carrier
2911 * and we loose the carrier due to module unload
2912 * we also need to remove vxlan device. In other
2913 * cases, it's not necessary and remote_ifindex
2914 * is 0 here, so no matches.
2916 if (dst->remote_ifindex == dev->ifindex)
2917 vxlan_dellink(vxlan->dev, &list_kill);
2920 unregister_netdevice_many(&list_kill);
2923 static int vxlan_lowerdev_event(struct notifier_block *unused,
2924 unsigned long event, void *ptr)
2926 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2927 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
2929 if (event == NETDEV_UNREGISTER)
2930 vxlan_handle_lowerdev_unregister(vn, dev);
2935 static struct notifier_block vxlan_notifier_block __read_mostly = {
2936 .notifier_call = vxlan_lowerdev_event,
2939 static __net_init int vxlan_init_net(struct net *net)
2941 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2944 INIT_LIST_HEAD(&vn->vxlan_list);
2945 spin_lock_init(&vn->sock_lock);
2947 for (h = 0; h < PORT_HASH_SIZE; ++h)
2948 INIT_HLIST_HEAD(&vn->sock_list[h]);
2953 static void __net_exit vxlan_exit_net(struct net *net)
2955 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2956 struct vxlan_dev *vxlan, *next;
2957 struct net_device *dev, *aux;
2961 for_each_netdev_safe(net, dev, aux)
2962 if (dev->rtnl_link_ops == &vxlan_link_ops)
2963 unregister_netdevice_queue(dev, &list);
2965 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
2966 /* If vxlan->dev is in the same netns, it has already been added
2967 * to the list by the previous loop.
2969 if (!net_eq(dev_net(vxlan->dev), net))
2970 unregister_netdevice_queue(dev, &list);
2973 unregister_netdevice_many(&list);
2977 static struct pernet_operations vxlan_net_ops = {
2978 .init = vxlan_init_net,
2979 .exit = vxlan_exit_net,
2980 .id = &vxlan_net_id,
2981 .size = sizeof(struct vxlan_net),
2984 static int __init vxlan_init_module(void)
2988 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
2992 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
2994 rc = register_pernet_subsys(&vxlan_net_ops);
2998 rc = register_netdevice_notifier(&vxlan_notifier_block);
3002 rc = rtnl_link_register(&vxlan_link_ops);
3008 unregister_netdevice_notifier(&vxlan_notifier_block);
3010 unregister_pernet_subsys(&vxlan_net_ops);
3012 destroy_workqueue(vxlan_wq);
3015 late_initcall(vxlan_init_module);
3017 static void __exit vxlan_cleanup_module(void)
3019 rtnl_link_unregister(&vxlan_link_ops);
3020 unregister_netdevice_notifier(&vxlan_notifier_block);
3021 destroy_workqueue(vxlan_wq);
3022 unregister_pernet_subsys(&vxlan_net_ops);
3023 /* rcu_barrier() is called by netns */
3025 module_exit(vxlan_cleanup_module);
3027 MODULE_LICENSE("GPL");
3028 MODULE_VERSION(VXLAN_VERSION);
3029 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3030 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3031 MODULE_ALIAS_RTNL_LINK("vxlan");