2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
18 For comments look at net/ipv4/ip_gre.c --ANK
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
41 #include <net/ip_tunnels.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
48 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
50 static struct rtnl_link_ops vti_link_ops __read_mostly;
52 static int vti_net_id __read_mostly;
54 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
55 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
56 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
57 struct ip_tunnel __rcu *tunnels_wc[1];
58 struct ip_tunnel __rcu **tunnels[4];
60 struct net_device *fb_tunnel_dev;
63 static int vti_fb_tunnel_init(struct net_device *dev);
64 static int vti_tunnel_init(struct net_device *dev);
65 static void vti_tunnel_setup(struct net_device *dev);
66 static void vti_dev_free(struct net_device *dev);
67 static int vti_tunnel_bind_dev(struct net_device *dev);
69 #define VTI_XMIT(stats1, stats2) do { \
71 int pkt_len = skb->len; \
72 err = dst_output(skb); \
73 if (net_xmit_eval(err) == 0) { \
74 u64_stats_update_begin(&(stats1)->syncp); \
75 (stats1)->tx_bytes += pkt_len; \
76 (stats1)->tx_packets++; \
77 u64_stats_update_end(&(stats1)->syncp); \
79 (stats2)->tx_errors++; \
80 (stats2)->tx_aborted_errors++; \
85 static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
86 __be32 remote, __be32 local)
88 unsigned h0 = HASH(remote);
89 unsigned h1 = HASH(local);
91 struct vti_net *ipn = net_generic(net, vti_net_id);
93 for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
94 if (local == t->parms.iph.saddr &&
95 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
97 for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
98 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
101 for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
102 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
105 for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
106 if (t && (t->dev->flags&IFF_UP))
111 static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
112 struct ip_tunnel_parm *parms)
114 __be32 remote = parms->iph.daddr;
115 __be32 local = parms->iph.saddr;
127 return &ipn->tunnels[prio][h];
130 static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
133 return __vti_bucket(ipn, &t->parms);
136 static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
138 struct ip_tunnel __rcu **tp;
139 struct ip_tunnel *iter;
141 for (tp = vti_bucket(ipn, t);
142 (iter = rtnl_dereference(*tp)) != NULL;
145 rcu_assign_pointer(*tp, t->next);
151 static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
153 struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
155 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
156 rcu_assign_pointer(*tp, t);
159 static struct ip_tunnel *vti_tunnel_locate(struct net *net,
160 struct ip_tunnel_parm *parms,
163 __be32 remote = parms->iph.daddr;
164 __be32 local = parms->iph.saddr;
165 struct ip_tunnel *t, *nt;
166 struct ip_tunnel __rcu **tp;
167 struct net_device *dev;
169 struct vti_net *ipn = net_generic(net, vti_net_id);
171 for (tp = __vti_bucket(ipn, parms);
172 (t = rtnl_dereference(*tp)) != NULL;
174 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
181 strlcpy(name, parms->name, IFNAMSIZ);
183 strcpy(name, "vti%d");
185 dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
189 dev_net_set(dev, net);
191 nt = netdev_priv(dev);
193 dev->rtnl_link_ops = &vti_link_ops;
195 vti_tunnel_bind_dev(dev);
197 if (register_netdevice(dev) < 0)
201 vti_tunnel_link(ipn, nt);
209 static void vti_tunnel_uninit(struct net_device *dev)
211 struct net *net = dev_net(dev);
212 struct vti_net *ipn = net_generic(net, vti_net_id);
214 vti_tunnel_unlink(ipn, netdev_priv(dev));
218 static int vti_err(struct sk_buff *skb, u32 info)
221 /* All the routers (except for Linux) return only
222 * 8 bytes of packet payload. It means, that precise relaying of
223 * ICMP in the real Internet is absolutely infeasible.
225 struct iphdr *iph = (struct iphdr *)skb->data;
226 const int type = icmp_hdr(skb)->type;
227 const int code = icmp_hdr(skb)->code;
233 case ICMP_PARAMETERPROB:
236 case ICMP_DEST_UNREACH:
239 case ICMP_PORT_UNREACH:
240 /* Impossible event. */
243 /* All others are translated to HOST_UNREACH. */
247 case ICMP_TIME_EXCEEDED:
248 if (code != ICMP_EXC_TTL)
255 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
259 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
260 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
261 t->parms.link, 0, IPPROTO_IPIP, 0);
267 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
270 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
274 t->err_time = jiffies;
279 /* We dont digest the packet therefore let the packet pass */
280 static int vti_rcv(struct sk_buff *skb)
282 struct ip_tunnel *tunnel;
283 const struct iphdr *iph = ip_hdr(skb);
285 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
286 if (tunnel != NULL) {
287 struct pcpu_tstats *tstats;
288 u32 oldmark = skb->mark;
292 /* temporarily mark the skb with the tunnel o_key, to
293 * only match policies with this mark.
295 skb->mark = be32_to_cpu(tunnel->parms.o_key);
296 ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
301 tstats = this_cpu_ptr(tunnel->dev->tstats);
302 u64_stats_update_begin(&tstats->syncp);
303 tstats->rx_packets++;
304 tstats->rx_bytes += skb->len;
305 u64_stats_update_end(&tstats->syncp);
308 skb->dev = tunnel->dev;
315 /* This function assumes it is being called from dev_queue_xmit()
316 * and that skb is filled properly by that function.
319 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
321 struct ip_tunnel *tunnel = netdev_priv(dev);
322 struct pcpu_tstats *tstats;
323 struct iphdr *tiph = &tunnel->parms.iph;
325 struct rtable *rt; /* Route to the other host */
326 struct net_device *tdev; /* Device to other host */
327 struct iphdr *old_iph = ip_hdr(skb);
328 __be32 dst = tiph->daddr;
331 if (skb->protocol != htons(ETH_P_IP))
336 memset(&fl4, 0, sizeof(fl4));
337 flowi4_init_output(&fl4, tunnel->parms.link,
338 be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
341 dst, tiph->saddr, 0, 0);
342 rt = ip_route_output_key(dev_net(dev), &fl4);
344 dev->stats.tx_carrier_errors++;
347 /* if there is no transform then this tunnel is not functional.
348 * Or if the xfrm is not mode tunnel.
351 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
352 dev->stats.tx_carrier_errors++;
359 dev->stats.collisions++;
363 if (tunnel->err_count > 0) {
364 if (time_before(jiffies,
365 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
367 dst_link_failure(skb);
369 tunnel->err_count = 0;
372 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
374 skb_dst_set(skb, &rt->dst);
376 skb->dev = skb_dst(skb)->dev;
378 tstats = this_cpu_ptr(dev->tstats);
379 VTI_XMIT(tstats, &dev->stats);
383 dst_link_failure(skb);
385 dev->stats.tx_errors++;
390 static int vti_tunnel_bind_dev(struct net_device *dev)
392 struct net_device *tdev = NULL;
393 struct ip_tunnel *tunnel;
396 tunnel = netdev_priv(dev);
397 iph = &tunnel->parms.iph;
402 memset(&fl4, 0, sizeof(fl4));
403 flowi4_init_output(&fl4, tunnel->parms.link,
404 be32_to_cpu(tunnel->parms.i_key),
405 RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
407 iph->daddr, iph->saddr, 0, 0);
408 rt = ip_route_output_key(dev_net(dev), &fl4);
413 dev->flags |= IFF_POINTOPOINT;
416 if (!tdev && tunnel->parms.link)
417 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
420 dev->hard_header_len = tdev->hard_header_len +
421 sizeof(struct iphdr);
422 dev->mtu = tdev->mtu;
424 dev->iflink = tunnel->parms.link;
429 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
432 struct ip_tunnel_parm p;
434 struct net *net = dev_net(dev);
435 struct vti_net *ipn = net_generic(net, vti_net_id);
440 if (dev == ipn->fb_tunnel_dev) {
441 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
446 t = vti_tunnel_locate(net, &p, 0);
449 t = netdev_priv(dev);
450 memcpy(&p, &t->parms, sizeof(p));
451 p.i_flags |= GRE_KEY | VTI_ISVTI;
452 p.o_flags |= GRE_KEY;
453 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
460 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
464 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
468 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
472 t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
474 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
481 if (((dev->flags&IFF_POINTOPOINT) &&
483 (!(dev->flags&IFF_POINTOPOINT) &&
488 t = netdev_priv(dev);
489 vti_tunnel_unlink(ipn, t);
491 t->parms.iph.saddr = p.iph.saddr;
492 t->parms.iph.daddr = p.iph.daddr;
493 t->parms.i_key = p.i_key;
494 t->parms.o_key = p.o_key;
495 t->parms.iph.protocol = IPPROTO_IPIP;
496 memcpy(dev->dev_addr, &p.iph.saddr, 4);
497 memcpy(dev->broadcast, &p.iph.daddr, 4);
498 vti_tunnel_link(ipn, t);
499 netdev_state_change(dev);
505 if (cmd == SIOCCHGTUNNEL) {
506 t->parms.i_key = p.i_key;
507 t->parms.o_key = p.o_key;
508 if (t->parms.link != p.link) {
509 t->parms.link = p.link;
510 vti_tunnel_bind_dev(dev);
511 netdev_state_change(dev);
514 p.i_flags |= GRE_KEY | VTI_ISVTI;
515 p.o_flags |= GRE_KEY;
516 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
520 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
525 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
528 if (dev == ipn->fb_tunnel_dev) {
530 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
535 t = vti_tunnel_locate(net, &p, 0);
539 if (t->dev == ipn->fb_tunnel_dev)
543 unregister_netdevice(dev);
555 static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
557 if (new_mtu < 68 || new_mtu > 0xFFF8)
563 static const struct net_device_ops vti_netdev_ops = {
564 .ndo_init = vti_tunnel_init,
565 .ndo_uninit = vti_tunnel_uninit,
566 .ndo_start_xmit = vti_tunnel_xmit,
567 .ndo_do_ioctl = vti_tunnel_ioctl,
568 .ndo_change_mtu = vti_tunnel_change_mtu,
569 .ndo_get_stats64 = ip_tunnel_get_stats64,
572 static void vti_dev_free(struct net_device *dev)
574 free_percpu(dev->tstats);
578 static void vti_tunnel_setup(struct net_device *dev)
580 dev->netdev_ops = &vti_netdev_ops;
581 dev->destructor = vti_dev_free;
583 dev->type = ARPHRD_TUNNEL;
584 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
585 dev->mtu = ETH_DATA_LEN;
586 dev->flags = IFF_NOARP;
589 dev->features |= NETIF_F_NETNS_LOCAL;
590 dev->features |= NETIF_F_LLTX;
591 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
594 static int vti_tunnel_init(struct net_device *dev)
596 struct ip_tunnel *tunnel = netdev_priv(dev);
599 strcpy(tunnel->parms.name, dev->name);
601 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
602 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
604 dev->tstats = alloc_percpu(struct pcpu_tstats);
611 static int __net_init vti_fb_tunnel_init(struct net_device *dev)
613 struct ip_tunnel *tunnel = netdev_priv(dev);
614 struct iphdr *iph = &tunnel->parms.iph;
615 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
618 iph->protocol = IPPROTO_IPIP;
622 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
626 static struct xfrm_tunnel vti_handler __read_mostly = {
628 .err_handler = vti_err,
632 static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
636 for (prio = 1; prio < 4; prio++) {
638 for (h = 0; h < HASH_SIZE; h++) {
641 t = rtnl_dereference(ipn->tunnels[prio][h]);
643 unregister_netdevice_queue(t->dev, head);
644 t = rtnl_dereference(t->next);
650 static int __net_init vti_init_net(struct net *net)
653 struct vti_net *ipn = net_generic(net, vti_net_id);
655 ipn->tunnels[0] = ipn->tunnels_wc;
656 ipn->tunnels[1] = ipn->tunnels_l;
657 ipn->tunnels[2] = ipn->tunnels_r;
658 ipn->tunnels[3] = ipn->tunnels_r_l;
660 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
663 if (!ipn->fb_tunnel_dev) {
667 dev_net_set(ipn->fb_tunnel_dev, net);
669 err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
672 ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
674 err = register_netdev(ipn->fb_tunnel_dev);
680 vti_dev_free(ipn->fb_tunnel_dev);
686 static void __net_exit vti_exit_net(struct net *net)
688 struct vti_net *ipn = net_generic(net, vti_net_id);
692 vti_destroy_tunnels(ipn, &list);
693 unregister_netdevice_many(&list);
697 static struct pernet_operations vti_net_ops = {
698 .init = vti_init_net,
699 .exit = vti_exit_net,
701 .size = sizeof(struct vti_net),
704 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
709 static void vti_netlink_parms(struct nlattr *data[],
710 struct ip_tunnel_parm *parms)
712 memset(parms, 0, sizeof(*parms));
714 parms->iph.protocol = IPPROTO_IPIP;
719 if (data[IFLA_VTI_LINK])
720 parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
722 if (data[IFLA_VTI_IKEY])
723 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
725 if (data[IFLA_VTI_OKEY])
726 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
728 if (data[IFLA_VTI_LOCAL])
729 parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
731 if (data[IFLA_VTI_REMOTE])
732 parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
736 static int vti_newlink(struct net *src_net, struct net_device *dev,
737 struct nlattr *tb[], struct nlattr *data[])
739 struct ip_tunnel *nt;
740 struct net *net = dev_net(dev);
741 struct vti_net *ipn = net_generic(net, vti_net_id);
745 nt = netdev_priv(dev);
746 vti_netlink_parms(data, &nt->parms);
748 if (vti_tunnel_locate(net, &nt->parms, 0))
751 mtu = vti_tunnel_bind_dev(dev);
755 err = register_netdevice(dev);
760 vti_tunnel_link(ipn, nt);
766 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
767 struct nlattr *data[])
769 struct ip_tunnel *t, *nt;
770 struct net *net = dev_net(dev);
771 struct vti_net *ipn = net_generic(net, vti_net_id);
772 struct ip_tunnel_parm p;
775 if (dev == ipn->fb_tunnel_dev)
778 nt = netdev_priv(dev);
779 vti_netlink_parms(data, &p);
781 t = vti_tunnel_locate(net, &p, 0);
789 vti_tunnel_unlink(ipn, t);
790 t->parms.iph.saddr = p.iph.saddr;
791 t->parms.iph.daddr = p.iph.daddr;
792 t->parms.i_key = p.i_key;
793 t->parms.o_key = p.o_key;
794 if (dev->type != ARPHRD_ETHER) {
795 memcpy(dev->dev_addr, &p.iph.saddr, 4);
796 memcpy(dev->broadcast, &p.iph.daddr, 4);
798 vti_tunnel_link(ipn, t);
799 netdev_state_change(dev);
802 if (t->parms.link != p.link) {
803 t->parms.link = p.link;
804 mtu = vti_tunnel_bind_dev(dev);
807 netdev_state_change(dev);
813 static size_t vti_get_size(const struct net_device *dev)
824 /* IFLA_VTI_REMOTE */
829 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
831 struct ip_tunnel *t = netdev_priv(dev);
832 struct ip_tunnel_parm *p = &t->parms;
834 nla_put_u32(skb, IFLA_VTI_LINK, p->link);
835 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
836 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
837 nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
838 nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
843 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
844 [IFLA_VTI_LINK] = { .type = NLA_U32 },
845 [IFLA_VTI_IKEY] = { .type = NLA_U32 },
846 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
847 [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
848 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
851 static struct rtnl_link_ops vti_link_ops __read_mostly = {
853 .maxtype = IFLA_VTI_MAX,
854 .policy = vti_policy,
855 .priv_size = sizeof(struct ip_tunnel),
856 .setup = vti_tunnel_setup,
857 .validate = vti_tunnel_validate,
858 .newlink = vti_newlink,
859 .changelink = vti_changelink,
860 .get_size = vti_get_size,
861 .fill_info = vti_fill_info,
864 static int __init vti_init(void)
868 pr_info("IPv4 over IPSec tunneling driver\n");
870 err = register_pernet_device(&vti_net_ops);
873 err = xfrm4_mode_tunnel_input_register(&vti_handler);
875 unregister_pernet_device(&vti_net_ops);
876 pr_info(KERN_INFO "vti init: can't register tunnel\n");
879 err = rtnl_link_register(&vti_link_ops);
881 goto rtnl_link_failed;
886 xfrm4_mode_tunnel_input_deregister(&vti_handler);
887 unregister_pernet_device(&vti_net_ops);
891 static void __exit vti_fini(void)
893 rtnl_link_unregister(&vti_link_ops);
894 if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
895 pr_info("vti close: can't deregister tunnel\n");
897 unregister_pernet_device(&vti_net_ops);
900 module_init(vti_init);
901 module_exit(vti_fini);
902 MODULE_LICENSE("GPL");
903 MODULE_ALIAS_RTNL_LINK("vti");
904 MODULE_ALIAS_NETDEV("ip_vti0");