2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #if IS_ENABLED(CONFIG_IPV6)
60 #include <net/ip6_fib.h>
61 #include <net/ip6_route.h>
64 static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
65 __be32 key, __be32 remote)
67 return hash_32((__force u32)key ^ (__force u32)remote,
71 /* Often modified stats are per cpu, other are shared (netdev->stats) */
72 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
73 struct rtnl_link_stats64 *tot)
77 for_each_possible_cpu(i) {
78 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
79 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
83 start = u64_stats_fetch_begin_bh(&tstats->syncp);
84 rx_packets = tstats->rx_packets;
85 tx_packets = tstats->tx_packets;
86 rx_bytes = tstats->rx_bytes;
87 tx_bytes = tstats->tx_bytes;
88 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
90 tot->rx_packets += rx_packets;
91 tot->tx_packets += tx_packets;
92 tot->rx_bytes += rx_bytes;
93 tot->tx_bytes += tx_bytes;
96 tot->multicast = dev->stats.multicast;
98 tot->rx_crc_errors = dev->stats.rx_crc_errors;
99 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
100 tot->rx_length_errors = dev->stats.rx_length_errors;
101 tot->rx_frame_errors = dev->stats.rx_frame_errors;
102 tot->rx_errors = dev->stats.rx_errors;
104 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
105 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
106 tot->tx_dropped = dev->stats.tx_dropped;
107 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
108 tot->tx_errors = dev->stats.tx_errors;
110 tot->collisions = dev->stats.collisions;
114 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
116 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
117 __be16 flags, __be32 key)
119 if (p->i_flags & TUNNEL_KEY) {
120 if (flags & TUNNEL_KEY)
121 return key == p->i_key;
123 /* key expected, none present */
126 return !(flags & TUNNEL_KEY);
129 /* Fallback tunnel: no source, no destination, no key, no options
132 We require exact key match i.e. if a key is present in packet
133 it will match only tunnel with the same key; if it is not present,
134 it will match only keyless tunnel.
136 All keysless packets, if not matched configured keyless tunnels
137 will match fallback tunnel.
138 Given src, dst and key, find appropriate for input tunnel.
140 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
141 int link, __be16 flags,
142 __be32 remote, __be32 local,
146 struct ip_tunnel *t, *cand = NULL;
147 struct hlist_head *head;
149 hash = ip_tunnel_hash(itn, key, remote);
150 head = &itn->tunnels[hash];
152 hlist_for_each_entry_rcu(t, head, hash_node) {
153 if (local != t->parms.iph.saddr ||
154 remote != t->parms.iph.daddr ||
155 !(t->dev->flags & IFF_UP))
158 if (!ip_tunnel_key_match(&t->parms, flags, key))
161 if (t->parms.link == link)
167 hlist_for_each_entry_rcu(t, head, hash_node) {
168 if (remote != t->parms.iph.daddr ||
169 !(t->dev->flags & IFF_UP))
172 if (!ip_tunnel_key_match(&t->parms, flags, key))
175 if (t->parms.link == link)
181 hash = ip_tunnel_hash(itn, key, 0);
182 head = &itn->tunnels[hash];
184 hlist_for_each_entry_rcu(t, head, hash_node) {
185 if ((local != t->parms.iph.saddr &&
186 (local != t->parms.iph.daddr ||
187 !ipv4_is_multicast(local))) ||
188 !(t->dev->flags & IFF_UP))
191 if (!ip_tunnel_key_match(&t->parms, flags, key))
194 if (t->parms.link == link)
200 if (flags & TUNNEL_NO_KEY)
201 goto skip_key_lookup;
203 hlist_for_each_entry_rcu(t, head, hash_node) {
204 if (t->parms.i_key != key ||
205 !(t->dev->flags & IFF_UP))
208 if (t->parms.link == link)
218 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
219 return netdev_priv(itn->fb_tunnel_dev);
224 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
226 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
227 struct ip_tunnel_parm *parms)
232 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
233 remote = parms->iph.daddr;
237 h = ip_tunnel_hash(itn, parms->i_key, remote);
238 return &itn->tunnels[h];
241 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
243 struct hlist_head *head = ip_bucket(itn, &t->parms);
245 hlist_add_head_rcu(&t->hash_node, head);
248 static void ip_tunnel_del(struct ip_tunnel *t)
250 hlist_del_init_rcu(&t->hash_node);
253 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
254 struct ip_tunnel_parm *parms,
257 __be32 remote = parms->iph.daddr;
258 __be32 local = parms->iph.saddr;
259 __be32 key = parms->i_key;
260 int link = parms->link;
261 struct ip_tunnel *t = NULL;
262 struct hlist_head *head = ip_bucket(itn, parms);
264 hlist_for_each_entry_rcu(t, head, hash_node) {
265 if (local == t->parms.iph.saddr &&
266 remote == t->parms.iph.daddr &&
267 key == t->parms.i_key &&
268 link == t->parms.link &&
269 type == t->dev->type)
275 static struct net_device *__ip_tunnel_create(struct net *net,
276 const struct rtnl_link_ops *ops,
277 struct ip_tunnel_parm *parms)
280 struct ip_tunnel *tunnel;
281 struct net_device *dev;
285 strlcpy(name, parms->name, IFNAMSIZ);
287 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
291 strlcpy(name, ops->kind, IFNAMSIZ);
292 strncat(name, "%d", 2);
296 dev = alloc_netdev(ops->priv_size, name, ops->setup);
301 dev_net_set(dev, net);
303 dev->rtnl_link_ops = ops;
305 tunnel = netdev_priv(dev);
306 tunnel->parms = *parms;
308 err = register_netdevice(dev);
320 static inline struct rtable *ip_route_output_tunnel(struct net *net,
323 __be32 daddr, __be32 saddr,
324 __be32 key, __u8 tos, int oif)
326 memset(fl4, 0, sizeof(*fl4));
327 fl4->flowi4_oif = oif;
330 fl4->flowi4_tos = tos;
331 fl4->flowi4_proto = proto;
332 fl4->fl4_gre_key = key;
333 return ip_route_output_key(net, fl4);
336 static int ip_tunnel_bind_dev(struct net_device *dev)
338 struct net_device *tdev = NULL;
339 struct ip_tunnel *tunnel = netdev_priv(dev);
340 const struct iphdr *iph;
341 int hlen = LL_MAX_HEADER;
342 int mtu = ETH_DATA_LEN;
343 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
345 iph = &tunnel->parms.iph;
347 /* Guess output device to choose reasonable mtu and needed_headroom */
352 rt = ip_route_output_tunnel(dev_net(dev), &fl4,
353 tunnel->parms.iph.protocol,
354 iph->daddr, iph->saddr,
362 if (dev->type != ARPHRD_ETHER)
363 dev->flags |= IFF_POINTOPOINT;
366 if (!tdev && tunnel->parms.link)
367 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
370 hlen = tdev->hard_header_len + tdev->needed_headroom;
373 dev->iflink = tunnel->parms.link;
375 dev->needed_headroom = t_hlen + hlen;
376 mtu -= (dev->hard_header_len + t_hlen);
384 static struct ip_tunnel *ip_tunnel_create(struct net *net,
385 struct ip_tunnel_net *itn,
386 struct ip_tunnel_parm *parms)
388 struct ip_tunnel *nt, *fbt;
389 struct net_device *dev;
391 BUG_ON(!itn->fb_tunnel_dev);
392 fbt = netdev_priv(itn->fb_tunnel_dev);
393 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
397 dev->mtu = ip_tunnel_bind_dev(dev);
399 nt = netdev_priv(dev);
400 ip_tunnel_add(itn, nt);
404 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
405 const struct tnl_ptk_info *tpi, int hdr_len, bool log_ecn_error)
407 struct pcpu_tstats *tstats;
408 const struct iphdr *iph = ip_hdr(skb);
413 skb->protocol = tpi->proto;
415 skb->mac_header = skb->network_header;
416 __pskb_pull(skb, hdr_len);
417 skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
418 #ifdef CONFIG_NET_IPGRE_BROADCAST
419 if (ipv4_is_multicast(iph->daddr)) {
420 /* Looped back packet, drop it! */
421 if (rt_is_output_route(skb_rtable(skb)))
423 tunnel->dev->stats.multicast++;
424 skb->pkt_type = PACKET_BROADCAST;
428 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
429 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
430 tunnel->dev->stats.rx_crc_errors++;
431 tunnel->dev->stats.rx_errors++;
435 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
436 if (!(tpi->flags&TUNNEL_SEQ) ||
437 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
438 tunnel->dev->stats.rx_fifo_errors++;
439 tunnel->dev->stats.rx_errors++;
442 tunnel->i_seqno = ntohl(tpi->seq) + 1;
445 /* Warning: All skb pointers will be invalidated! */
446 if (tunnel->dev->type == ARPHRD_ETHER) {
447 if (!pskb_may_pull(skb, ETH_HLEN)) {
448 tunnel->dev->stats.rx_length_errors++;
449 tunnel->dev->stats.rx_errors++;
454 skb->protocol = eth_type_trans(skb, tunnel->dev);
455 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
458 skb->pkt_type = PACKET_HOST;
459 __skb_tunnel_rx(skb, tunnel->dev);
461 skb_reset_network_header(skb);
462 err = IP_ECN_decapsulate(iph, skb);
465 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
466 &iph->saddr, iph->tos);
468 ++tunnel->dev->stats.rx_frame_errors;
469 ++tunnel->dev->stats.rx_errors;
474 tstats = this_cpu_ptr(tunnel->dev->tstats);
475 u64_stats_update_begin(&tstats->syncp);
476 tstats->rx_packets++;
477 tstats->rx_bytes += skb->len;
478 u64_stats_update_end(&tstats->syncp);
480 gro_cells_receive(&tunnel->gro_cells, skb);
487 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
489 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
490 struct rtable *rt, __be16 df)
492 struct ip_tunnel *tunnel = netdev_priv(dev);
493 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
497 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
498 - sizeof(struct iphdr) - tunnel->hlen;
500 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
503 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
505 if (skb->protocol == htons(ETH_P_IP)) {
506 if (!skb_is_gso(skb) &&
507 (df & htons(IP_DF)) && mtu < pkt_size) {
508 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
512 #if IS_ENABLED(CONFIG_IPV6)
513 else if (skb->protocol == htons(ETH_P_IPV6)) {
514 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
516 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
517 mtu >= IPV6_MIN_MTU) {
518 if ((tunnel->parms.iph.daddr &&
519 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
520 rt6->rt6i_dst.plen == 128) {
521 rt6->rt6i_flags |= RTF_MODIFIED;
522 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
526 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
528 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
536 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
537 const struct iphdr *tnl_params)
539 struct ip_tunnel *tunnel = netdev_priv(dev);
540 const struct iphdr *inner_iph;
545 struct rtable *rt; /* Route to the other host */
546 struct net_device *tdev; /* Device to other host */
547 unsigned int max_headroom; /* The extra header space needed */
550 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
552 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
553 dst = tnl_params->daddr;
557 if (skb_dst(skb) == NULL) {
558 dev->stats.tx_fifo_errors++;
562 if (skb->protocol == htons(ETH_P_IP)) {
563 rt = skb_rtable(skb);
564 dst = rt_nexthop(rt, inner_iph->daddr);
566 #if IS_ENABLED(CONFIG_IPV6)
567 else if (skb->protocol == htons(ETH_P_IPV6)) {
568 const struct in6_addr *addr6;
569 struct neighbour *neigh;
570 bool do_tx_error_icmp;
573 neigh = dst_neigh_lookup(skb_dst(skb),
574 &ipv6_hdr(skb)->daddr);
578 addr6 = (const struct in6_addr *)&neigh->primary_key;
579 addr_type = ipv6_addr_type(addr6);
581 if (addr_type == IPV6_ADDR_ANY) {
582 addr6 = &ipv6_hdr(skb)->daddr;
583 addr_type = ipv6_addr_type(addr6);
586 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
587 do_tx_error_icmp = true;
589 do_tx_error_icmp = false;
590 dst = addr6->s6_addr32[3];
592 neigh_release(neigh);
593 if (do_tx_error_icmp)
601 tos = tnl_params->tos;
604 if (skb->protocol == htons(ETH_P_IP))
605 tos = inner_iph->tos;
606 else if (skb->protocol == htons(ETH_P_IPV6))
607 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
610 rt = ip_route_output_tunnel(dev_net(dev), &fl4,
611 tunnel->parms.iph.protocol,
612 dst, tnl_params->saddr,
617 dev->stats.tx_carrier_errors++;
624 dev->stats.collisions++;
629 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
634 if (tunnel->err_count > 0) {
635 if (time_before(jiffies,
636 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
639 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
640 dst_link_failure(skb);
642 tunnel->err_count = 0;
645 ttl = tnl_params->ttl;
647 if (skb->protocol == htons(ETH_P_IP))
648 ttl = inner_iph->ttl;
649 #if IS_ENABLED(CONFIG_IPV6)
650 else if (skb->protocol == htons(ETH_P_IPV6))
651 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
654 ttl = ip4_dst_hoplimit(&rt->dst);
657 df = tnl_params->frag_off;
658 if (skb->protocol == htons(ETH_P_IP))
659 df |= (inner_iph->frag_off&htons(IP_DF));
661 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
662 + rt->dst.header_len;
663 if (max_headroom > dev->needed_headroom)
664 dev->needed_headroom = max_headroom;
666 if (skb_cow_head(skb, dev->needed_headroom)) {
667 dev->stats.tx_dropped++;
673 skb_dst_set(skb, &rt->dst);
675 /* Push down and install the IP header. */
676 skb_push(skb, sizeof(struct iphdr));
677 skb_reset_network_header(skb);
680 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
683 iph->ihl = sizeof(struct iphdr) >> 2;
685 iph->protocol = tnl_params->protocol;
686 iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
687 iph->daddr = fl4.daddr;
688 iph->saddr = fl4.saddr;
690 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
692 iptunnel_xmit(skb, dev);
695 #if IS_ENABLED(CONFIG_IPV6)
697 dst_link_failure(skb);
700 dev->stats.tx_errors++;
703 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
705 static void ip_tunnel_update(struct ip_tunnel_net *itn,
707 struct net_device *dev,
708 struct ip_tunnel_parm *p,
712 t->parms.iph.saddr = p->iph.saddr;
713 t->parms.iph.daddr = p->iph.daddr;
714 t->parms.i_key = p->i_key;
715 t->parms.o_key = p->o_key;
716 if (dev->type != ARPHRD_ETHER) {
717 memcpy(dev->dev_addr, &p->iph.saddr, 4);
718 memcpy(dev->broadcast, &p->iph.daddr, 4);
720 ip_tunnel_add(itn, t);
722 t->parms.iph.ttl = p->iph.ttl;
723 t->parms.iph.tos = p->iph.tos;
724 t->parms.iph.frag_off = p->iph.frag_off;
726 if (t->parms.link != p->link) {
729 t->parms.link = p->link;
730 mtu = ip_tunnel_bind_dev(dev);
734 netdev_state_change(dev);
737 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
741 struct net *net = dev_net(dev);
742 struct ip_tunnel *tunnel = netdev_priv(dev);
743 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
745 BUG_ON(!itn->fb_tunnel_dev);
749 if (dev == itn->fb_tunnel_dev)
750 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
752 t = netdev_priv(dev);
753 memcpy(p, &t->parms, sizeof(*p));
759 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
762 p->iph.frag_off |= htons(IP_DF);
763 if (!(p->i_flags&TUNNEL_KEY))
765 if (!(p->o_flags&TUNNEL_KEY))
768 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
770 if (!t && (cmd == SIOCADDTUNNEL))
771 t = ip_tunnel_create(net, itn, p);
773 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
780 unsigned int nflags = 0;
782 if (ipv4_is_multicast(p->iph.daddr))
783 nflags = IFF_BROADCAST;
784 else if (p->iph.daddr)
785 nflags = IFF_POINTOPOINT;
787 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
792 t = netdev_priv(dev);
798 ip_tunnel_update(itn, t, dev, p, true);
800 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
805 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
808 if (dev == itn->fb_tunnel_dev) {
810 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
814 if (t == netdev_priv(itn->fb_tunnel_dev))
818 unregister_netdevice(dev);
829 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
831 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
833 struct ip_tunnel *tunnel = netdev_priv(dev);
834 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
837 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
842 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
844 static void ip_tunnel_dev_free(struct net_device *dev)
846 struct ip_tunnel *tunnel = netdev_priv(dev);
848 gro_cells_destroy(&tunnel->gro_cells);
849 free_percpu(dev->tstats);
853 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
855 struct net *net = dev_net(dev);
856 struct ip_tunnel *tunnel = netdev_priv(dev);
857 struct ip_tunnel_net *itn;
859 itn = net_generic(net, tunnel->ip_tnl_net_id);
861 if (itn->fb_tunnel_dev != dev) {
862 ip_tunnel_del(netdev_priv(dev));
863 unregister_netdevice_queue(dev, head);
866 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
868 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
869 struct rtnl_link_ops *ops, char *devname)
871 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
872 struct ip_tunnel_parm parms;
874 itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
879 itn->fb_tunnel_dev = NULL;
882 memset(&parms, 0, sizeof(parms));
884 strlcpy(parms.name, devname, IFNAMSIZ);
887 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
889 if (IS_ERR(itn->fb_tunnel_dev)) {
891 return PTR_ERR(itn->fb_tunnel_dev);
896 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
898 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
902 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
904 struct hlist_node *n;
905 struct hlist_head *thead = &itn->tunnels[h];
907 hlist_for_each_entry_safe(t, n, thead, hash_node)
908 unregister_netdevice_queue(t->dev, head);
910 if (itn->fb_tunnel_dev)
911 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
914 void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
919 ip_tunnel_destroy(itn, &list);
920 unregister_netdevice_many(&list);
924 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
926 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
927 struct ip_tunnel_parm *p)
929 struct ip_tunnel *nt;
930 struct net *net = dev_net(dev);
931 struct ip_tunnel_net *itn;
935 nt = netdev_priv(dev);
936 itn = net_generic(net, nt->ip_tnl_net_id);
938 if (ip_tunnel_find(itn, p, dev->type))
942 err = register_netdevice(dev);
946 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
947 eth_hw_addr_random(dev);
949 mtu = ip_tunnel_bind_dev(dev);
953 ip_tunnel_add(itn, nt);
958 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
960 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
961 struct ip_tunnel_parm *p)
963 struct ip_tunnel *t, *nt;
964 struct net *net = dev_net(dev);
965 struct ip_tunnel *tunnel = netdev_priv(dev);
966 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
968 if (dev == itn->fb_tunnel_dev)
971 nt = netdev_priv(dev);
973 t = ip_tunnel_find(itn, p, dev->type);
981 if (dev->type != ARPHRD_ETHER) {
982 unsigned int nflags = 0;
984 if (ipv4_is_multicast(p->iph.daddr))
985 nflags = IFF_BROADCAST;
986 else if (p->iph.daddr)
987 nflags = IFF_POINTOPOINT;
989 if ((dev->flags ^ nflags) &
990 (IFF_POINTOPOINT | IFF_BROADCAST))
995 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
998 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1000 int ip_tunnel_init(struct net_device *dev)
1002 struct ip_tunnel *tunnel = netdev_priv(dev);
1003 struct iphdr *iph = &tunnel->parms.iph;
1006 dev->destructor = ip_tunnel_dev_free;
1007 dev->tstats = alloc_percpu(struct pcpu_tstats);
1011 err = gro_cells_init(&tunnel->gro_cells, dev);
1013 free_percpu(dev->tstats);
1018 strcpy(tunnel->parms.name, dev->name);
1024 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1026 void ip_tunnel_uninit(struct net_device *dev)
1028 struct net *net = dev_net(dev);
1029 struct ip_tunnel *tunnel = netdev_priv(dev);
1030 struct ip_tunnel_net *itn;
1032 itn = net_generic(net, tunnel->ip_tnl_net_id);
1033 /* fb_tunnel_dev will be unregisted in net-exit call. */
1034 if (itn->fb_tunnel_dev != dev)
1035 ip_tunnel_del(netdev_priv(dev));
1037 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1039 /* Do least required initialization, rest of init is done in tunnel_init call */
1040 void ip_tunnel_setup(struct net_device *dev, int net_id)
1042 struct ip_tunnel *tunnel = netdev_priv(dev);
1043 tunnel->ip_tnl_net_id = net_id;
1045 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1047 MODULE_LICENSE("GPL");