2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
18 For comments look at net/ipv4/ip_gre.c --ANK
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
48 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
50 static struct rtnl_link_ops vti_link_ops __read_mostly;
52 static int vti_net_id __read_mostly;
54 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
55 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
56 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
57 struct ip_tunnel __rcu *tunnels_wc[1];
58 struct ip_tunnel __rcu **tunnels[4];
60 struct net_device *fb_tunnel_dev;
63 static int vti_fb_tunnel_init(struct net_device *dev);
64 static int vti_tunnel_init(struct net_device *dev);
65 static void vti_tunnel_setup(struct net_device *dev);
66 static void vti_dev_free(struct net_device *dev);
67 static int vti_tunnel_bind_dev(struct net_device *dev);
69 #define VTI_XMIT(stats1, stats2) do { \
71 int pkt_len = skb->len; \
72 err = dst_output(skb); \
73 if (net_xmit_eval(err) == 0) { \
74 u64_stats_update_begin(&(stats1)->syncp); \
75 (stats1)->tx_bytes += pkt_len; \
76 (stats1)->tx_packets++; \
77 u64_stats_update_end(&(stats1)->syncp); \
79 (stats2)->tx_errors++; \
80 (stats2)->tx_aborted_errors++; \
85 static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
86 struct rtnl_link_stats64 *tot)
90 for_each_possible_cpu(i) {
91 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
92 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
96 start = u64_stats_fetch_begin_bh(&tstats->syncp);
97 rx_packets = tstats->rx_packets;
98 tx_packets = tstats->tx_packets;
99 rx_bytes = tstats->rx_bytes;
100 tx_bytes = tstats->tx_bytes;
101 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
103 tot->rx_packets += rx_packets;
104 tot->tx_packets += tx_packets;
105 tot->rx_bytes += rx_bytes;
106 tot->tx_bytes += tx_bytes;
109 tot->multicast = dev->stats.multicast;
110 tot->rx_crc_errors = dev->stats.rx_crc_errors;
111 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
112 tot->rx_length_errors = dev->stats.rx_length_errors;
113 tot->rx_errors = dev->stats.rx_errors;
114 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
115 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
116 tot->tx_dropped = dev->stats.tx_dropped;
117 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
118 tot->tx_errors = dev->stats.tx_errors;
123 static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
124 __be32 remote, __be32 local)
126 unsigned h0 = HASH(remote);
127 unsigned h1 = HASH(local);
129 struct vti_net *ipn = net_generic(net, vti_net_id);
131 for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
132 if (local == t->parms.iph.saddr &&
133 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
135 for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
136 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
139 for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
140 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
143 for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
144 if (t && (t->dev->flags&IFF_UP))
149 static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
150 struct ip_tunnel_parm *parms)
152 __be32 remote = parms->iph.daddr;
153 __be32 local = parms->iph.saddr;
165 return &ipn->tunnels[prio][h];
168 static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
171 return __vti_bucket(ipn, &t->parms);
174 static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
176 struct ip_tunnel __rcu **tp;
177 struct ip_tunnel *iter;
179 for (tp = vti_bucket(ipn, t);
180 (iter = rtnl_dereference(*tp)) != NULL;
183 rcu_assign_pointer(*tp, t->next);
189 static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
191 struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
193 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
194 rcu_assign_pointer(*tp, t);
197 static struct ip_tunnel *vti_tunnel_locate(struct net *net,
198 struct ip_tunnel_parm *parms,
201 __be32 remote = parms->iph.daddr;
202 __be32 local = parms->iph.saddr;
203 struct ip_tunnel *t, *nt;
204 struct ip_tunnel __rcu **tp;
205 struct net_device *dev;
207 struct vti_net *ipn = net_generic(net, vti_net_id);
209 for (tp = __vti_bucket(ipn, parms);
210 (t = rtnl_dereference(*tp)) != NULL;
212 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
219 strlcpy(name, parms->name, IFNAMSIZ);
221 strcpy(name, "vti%d");
223 dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
227 dev_net_set(dev, net);
229 nt = netdev_priv(dev);
231 dev->rtnl_link_ops = &vti_link_ops;
233 vti_tunnel_bind_dev(dev);
235 if (register_netdevice(dev) < 0)
239 vti_tunnel_link(ipn, nt);
247 static void vti_tunnel_uninit(struct net_device *dev)
249 struct net *net = dev_net(dev);
250 struct vti_net *ipn = net_generic(net, vti_net_id);
252 vti_tunnel_unlink(ipn, netdev_priv(dev));
256 static int vti_err(struct sk_buff *skb, u32 info)
259 /* All the routers (except for Linux) return only
260 * 8 bytes of packet payload. It means, that precise relaying of
261 * ICMP in the real Internet is absolutely infeasible.
263 struct iphdr *iph = (struct iphdr *)skb->data;
264 const int type = icmp_hdr(skb)->type;
265 const int code = icmp_hdr(skb)->code;
271 case ICMP_PARAMETERPROB:
274 case ICMP_DEST_UNREACH:
277 case ICMP_PORT_UNREACH:
278 /* Impossible event. */
281 /* All others are translated to HOST_UNREACH. */
285 case ICMP_TIME_EXCEEDED:
286 if (code != ICMP_EXC_TTL)
293 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
297 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
298 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
299 t->parms.link, 0, IPPROTO_IPIP, 0);
305 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
308 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
312 t->err_time = jiffies;
317 /* We dont digest the packet therefore let the packet pass */
318 static int vti_rcv(struct sk_buff *skb)
320 struct ip_tunnel *tunnel;
321 const struct iphdr *iph = ip_hdr(skb);
323 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
324 if (tunnel != NULL) {
325 struct pcpu_tstats *tstats;
327 tstats = this_cpu_ptr(tunnel->dev->tstats);
328 u64_stats_update_begin(&tstats->syncp);
329 tstats->rx_packets++;
330 tstats->rx_bytes += skb->len;
331 u64_stats_update_end(&tstats->syncp);
333 skb->dev = tunnel->dev;
340 /* This function assumes it is being called from dev_queue_xmit()
341 * and that skb is filled properly by that function.
344 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
346 struct ip_tunnel *tunnel = netdev_priv(dev);
347 struct pcpu_tstats *tstats;
348 struct iphdr *tiph = &tunnel->parms.iph;
350 struct rtable *rt; /* Route to the other host */
351 struct net_device *tdev; /* Device to other host */
352 struct iphdr *old_iph = ip_hdr(skb);
353 __be32 dst = tiph->daddr;
356 if (skb->protocol != htons(ETH_P_IP))
361 memset(&fl4, 0, sizeof(fl4));
362 flowi4_init_output(&fl4, tunnel->parms.link,
363 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
366 dst, tiph->saddr, 0, 0);
367 rt = ip_route_output_key(dev_net(dev), &fl4);
369 dev->stats.tx_carrier_errors++;
372 /* if there is no transform then this tunnel is not functional.
373 * Or if the xfrm is not mode tunnel.
376 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
377 dev->stats.tx_carrier_errors++;
384 dev->stats.collisions++;
388 if (tunnel->err_count > 0) {
389 if (time_before(jiffies,
390 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
392 dst_link_failure(skb);
394 tunnel->err_count = 0;
397 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
400 skb_dst_set(skb, &rt->dst);
402 skb->dev = skb_dst(skb)->dev;
404 tstats = this_cpu_ptr(dev->tstats);
405 VTI_XMIT(tstats, &dev->stats);
409 dst_link_failure(skb);
411 dev->stats.tx_errors++;
416 static int vti_tunnel_bind_dev(struct net_device *dev)
418 struct net_device *tdev = NULL;
419 struct ip_tunnel *tunnel;
422 tunnel = netdev_priv(dev);
423 iph = &tunnel->parms.iph;
428 memset(&fl4, 0, sizeof(fl4));
429 flowi4_init_output(&fl4, tunnel->parms.link,
430 be32_to_cpu(tunnel->parms.i_key),
431 RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
433 iph->daddr, iph->saddr, 0, 0);
434 rt = ip_route_output_key(dev_net(dev), &fl4);
439 dev->flags |= IFF_POINTOPOINT;
442 if (!tdev && tunnel->parms.link)
443 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
446 dev->hard_header_len = tdev->hard_header_len +
447 sizeof(struct iphdr);
448 dev->mtu = tdev->mtu;
450 dev->iflink = tunnel->parms.link;
455 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
458 struct ip_tunnel_parm p;
460 struct net *net = dev_net(dev);
461 struct vti_net *ipn = net_generic(net, vti_net_id);
466 if (dev == ipn->fb_tunnel_dev) {
467 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
472 t = vti_tunnel_locate(net, &p, 0);
475 t = netdev_priv(dev);
476 memcpy(&p, &t->parms, sizeof(p));
477 p.i_flags |= GRE_KEY | VTI_ISVTI;
478 p.o_flags |= GRE_KEY;
479 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
486 if (!capable(CAP_NET_ADMIN))
490 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
494 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
498 t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
500 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
507 if (((dev->flags&IFF_POINTOPOINT) &&
509 (!(dev->flags&IFF_POINTOPOINT) &&
514 t = netdev_priv(dev);
515 vti_tunnel_unlink(ipn, t);
517 t->parms.iph.saddr = p.iph.saddr;
518 t->parms.iph.daddr = p.iph.daddr;
519 t->parms.i_key = p.i_key;
520 t->parms.o_key = p.o_key;
521 t->parms.iph.protocol = IPPROTO_IPIP;
522 memcpy(dev->dev_addr, &p.iph.saddr, 4);
523 memcpy(dev->broadcast, &p.iph.daddr, 4);
524 vti_tunnel_link(ipn, t);
525 netdev_state_change(dev);
531 if (cmd == SIOCCHGTUNNEL) {
532 t->parms.i_key = p.i_key;
533 t->parms.o_key = p.o_key;
534 if (t->parms.link != p.link) {
535 t->parms.link = p.link;
536 vti_tunnel_bind_dev(dev);
537 netdev_state_change(dev);
540 p.i_flags |= GRE_KEY | VTI_ISVTI;
541 p.o_flags |= GRE_KEY;
542 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
546 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
551 if (!capable(CAP_NET_ADMIN))
554 if (dev == ipn->fb_tunnel_dev) {
556 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
561 t = vti_tunnel_locate(net, &p, 0);
565 if (t->dev == ipn->fb_tunnel_dev)
569 unregister_netdevice(dev);
581 static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
583 if (new_mtu < 68 || new_mtu > 0xFFF8)
589 static const struct net_device_ops vti_netdev_ops = {
590 .ndo_init = vti_tunnel_init,
591 .ndo_uninit = vti_tunnel_uninit,
592 .ndo_start_xmit = vti_tunnel_xmit,
593 .ndo_do_ioctl = vti_tunnel_ioctl,
594 .ndo_change_mtu = vti_tunnel_change_mtu,
595 .ndo_get_stats64 = vti_get_stats64,
598 static void vti_dev_free(struct net_device *dev)
600 free_percpu(dev->tstats);
604 static void vti_tunnel_setup(struct net_device *dev)
606 dev->netdev_ops = &vti_netdev_ops;
607 dev->destructor = vti_dev_free;
609 dev->type = ARPHRD_TUNNEL;
610 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
611 dev->mtu = ETH_DATA_LEN;
612 dev->flags = IFF_NOARP;
615 dev->features |= NETIF_F_NETNS_LOCAL;
616 dev->features |= NETIF_F_LLTX;
617 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
620 static int vti_tunnel_init(struct net_device *dev)
622 struct ip_tunnel *tunnel = netdev_priv(dev);
625 strcpy(tunnel->parms.name, dev->name);
627 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
628 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
630 dev->tstats = alloc_percpu(struct pcpu_tstats);
637 static int __net_init vti_fb_tunnel_init(struct net_device *dev)
639 struct ip_tunnel *tunnel = netdev_priv(dev);
640 struct iphdr *iph = &tunnel->parms.iph;
641 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
644 strcpy(tunnel->parms.name, dev->name);
647 iph->protocol = IPPROTO_IPIP;
650 dev->tstats = alloc_percpu(struct pcpu_tstats);
655 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
659 static struct xfrm_tunnel vti_handler __read_mostly = {
661 .err_handler = vti_err,
665 static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
669 for (prio = 1; prio < 4; prio++) {
671 for (h = 0; h < HASH_SIZE; h++) {
674 t = rtnl_dereference(ipn->tunnels[prio][h]);
676 unregister_netdevice_queue(t->dev, head);
677 t = rtnl_dereference(t->next);
683 static int __net_init vti_init_net(struct net *net)
686 struct vti_net *ipn = net_generic(net, vti_net_id);
688 ipn->tunnels[0] = ipn->tunnels_wc;
689 ipn->tunnels[1] = ipn->tunnels_l;
690 ipn->tunnels[2] = ipn->tunnels_r;
691 ipn->tunnels[3] = ipn->tunnels_r_l;
693 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
696 if (!ipn->fb_tunnel_dev) {
700 dev_net_set(ipn->fb_tunnel_dev, net);
702 err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
705 ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
707 err = register_netdev(ipn->fb_tunnel_dev);
713 vti_dev_free(ipn->fb_tunnel_dev);
719 static void __net_exit vti_exit_net(struct net *net)
721 struct vti_net *ipn = net_generic(net, vti_net_id);
725 vti_destroy_tunnels(ipn, &list);
726 unregister_netdevice_many(&list);
730 static struct pernet_operations vti_net_ops = {
731 .init = vti_init_net,
732 .exit = vti_exit_net,
734 .size = sizeof(struct vti_net),
737 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
742 static void vti_netlink_parms(struct nlattr *data[],
743 struct ip_tunnel_parm *parms)
745 memset(parms, 0, sizeof(*parms));
747 parms->iph.protocol = IPPROTO_IPIP;
752 if (data[IFLA_VTI_LINK])
753 parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
755 if (data[IFLA_VTI_IKEY])
756 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
758 if (data[IFLA_VTI_OKEY])
759 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
761 if (data[IFLA_VTI_LOCAL])
762 parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
764 if (data[IFLA_VTI_REMOTE])
765 parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
769 static int vti_newlink(struct net *src_net, struct net_device *dev,
770 struct nlattr *tb[], struct nlattr *data[])
772 struct ip_tunnel *nt;
773 struct net *net = dev_net(dev);
774 struct vti_net *ipn = net_generic(net, vti_net_id);
778 nt = netdev_priv(dev);
779 vti_netlink_parms(data, &nt->parms);
781 if (vti_tunnel_locate(net, &nt->parms, 0))
784 mtu = vti_tunnel_bind_dev(dev);
788 err = register_netdevice(dev);
793 vti_tunnel_link(ipn, nt);
799 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
800 struct nlattr *data[])
802 struct ip_tunnel *t, *nt;
803 struct net *net = dev_net(dev);
804 struct vti_net *ipn = net_generic(net, vti_net_id);
805 struct ip_tunnel_parm p;
808 if (dev == ipn->fb_tunnel_dev)
811 nt = netdev_priv(dev);
812 vti_netlink_parms(data, &p);
814 t = vti_tunnel_locate(net, &p, 0);
822 vti_tunnel_unlink(ipn, t);
823 t->parms.iph.saddr = p.iph.saddr;
824 t->parms.iph.daddr = p.iph.daddr;
825 t->parms.i_key = p.i_key;
826 t->parms.o_key = p.o_key;
827 if (dev->type != ARPHRD_ETHER) {
828 memcpy(dev->dev_addr, &p.iph.saddr, 4);
829 memcpy(dev->broadcast, &p.iph.daddr, 4);
831 vti_tunnel_link(ipn, t);
832 netdev_state_change(dev);
835 if (t->parms.link != p.link) {
836 t->parms.link = p.link;
837 mtu = vti_tunnel_bind_dev(dev);
840 netdev_state_change(dev);
846 static size_t vti_get_size(const struct net_device *dev)
857 /* IFLA_VTI_REMOTE */
862 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
864 struct ip_tunnel *t = netdev_priv(dev);
865 struct ip_tunnel_parm *p = &t->parms;
867 nla_put_u32(skb, IFLA_VTI_LINK, p->link);
868 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
869 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
870 nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
871 nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
876 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
877 [IFLA_VTI_LINK] = { .type = NLA_U32 },
878 [IFLA_VTI_IKEY] = { .type = NLA_U32 },
879 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
880 [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
881 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
884 static struct rtnl_link_ops vti_link_ops __read_mostly = {
886 .maxtype = IFLA_VTI_MAX,
887 .policy = vti_policy,
888 .priv_size = sizeof(struct ip_tunnel),
889 .setup = vti_tunnel_setup,
890 .validate = vti_tunnel_validate,
891 .newlink = vti_newlink,
892 .changelink = vti_changelink,
893 .get_size = vti_get_size,
894 .fill_info = vti_fill_info,
897 static int __init vti_init(void)
901 pr_info("IPv4 over IPSec tunneling driver\n");
903 err = register_pernet_device(&vti_net_ops);
906 err = xfrm4_mode_tunnel_input_register(&vti_handler);
908 unregister_pernet_device(&vti_net_ops);
909 pr_info(KERN_INFO "vti init: can't register tunnel\n");
912 err = rtnl_link_register(&vti_link_ops);
914 goto rtnl_link_failed;
919 xfrm4_mode_tunnel_input_deregister(&vti_handler);
920 unregister_pernet_device(&vti_net_ops);
924 static void __exit vti_fini(void)
926 rtnl_link_unregister(&vti_link_ops);
927 if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
928 pr_info("vti close: can't deregister tunnel\n");
930 unregister_pernet_device(&vti_net_ops);
933 module_init(vti_init);
934 module_exit(vti_fini);
935 MODULE_LICENSE("GPL");
936 MODULE_ALIAS_RTNL_LINK("vti");
937 MODULE_ALIAS_NETDEV("ip_vti0");