2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
18 For comments look at net/ipv4/ip_gre.c --ANK
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/if_ether.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
48 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
50 static struct rtnl_link_ops vti_link_ops __read_mostly;
52 static int vti_net_id __read_mostly;
54 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
55 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
56 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
57 struct ip_tunnel __rcu *tunnels_wc[1];
58 struct ip_tunnel __rcu **tunnels[4];
60 struct net_device *fb_tunnel_dev;
63 static int vti_fb_tunnel_init(struct net_device *dev);
64 static int vti_tunnel_init(struct net_device *dev);
65 static void vti_tunnel_setup(struct net_device *dev);
66 static void vti_dev_free(struct net_device *dev);
67 static int vti_tunnel_bind_dev(struct net_device *dev);
69 /* Locking : hash tables are protected by RCU and RTNL */
71 #define for_each_ip_tunnel_rcu(start) \
72 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
74 #define VTI_XMIT(stats1, stats2) do { \
76 int pkt_len = skb->len; \
77 err = dst_output(skb); \
78 if (net_xmit_eval(err) == 0) { \
79 u64_stats_update_begin(&(stats1)->syncp); \
80 (stats1)->tx_bytes += pkt_len; \
81 (stats1)->tx_packets++; \
82 u64_stats_update_end(&(stats1)->syncp); \
84 (stats2)->tx_errors++; \
85 (stats2)->tx_aborted_errors++; \
90 static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
91 struct rtnl_link_stats64 *tot)
95 for_each_possible_cpu(i) {
96 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
97 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
101 start = u64_stats_fetch_begin_bh(&tstats->syncp);
102 rx_packets = tstats->rx_packets;
103 tx_packets = tstats->tx_packets;
104 rx_bytes = tstats->rx_bytes;
105 tx_bytes = tstats->tx_bytes;
106 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
108 tot->rx_packets += rx_packets;
109 tot->tx_packets += tx_packets;
110 tot->rx_bytes += rx_bytes;
111 tot->tx_bytes += tx_bytes;
114 tot->multicast = dev->stats.multicast;
115 tot->rx_crc_errors = dev->stats.rx_crc_errors;
116 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
117 tot->rx_length_errors = dev->stats.rx_length_errors;
118 tot->rx_errors = dev->stats.rx_errors;
119 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
120 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
121 tot->tx_dropped = dev->stats.tx_dropped;
122 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
123 tot->tx_errors = dev->stats.tx_errors;
128 static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
129 __be32 remote, __be32 local)
131 unsigned h0 = HASH(remote);
132 unsigned h1 = HASH(local);
134 struct vti_net *ipn = net_generic(net, vti_net_id);
136 for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
137 if (local == t->parms.iph.saddr &&
138 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
140 for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
141 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
144 for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
145 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
148 for_each_ip_tunnel_rcu(ipn->tunnels_wc[0])
149 if (t && (t->dev->flags&IFF_UP))
154 static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
155 struct ip_tunnel_parm *parms)
157 __be32 remote = parms->iph.daddr;
158 __be32 local = parms->iph.saddr;
170 return &ipn->tunnels[prio][h];
173 static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
176 return __vti_bucket(ipn, &t->parms);
179 static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
181 struct ip_tunnel __rcu **tp;
182 struct ip_tunnel *iter;
184 for (tp = vti_bucket(ipn, t);
185 (iter = rtnl_dereference(*tp)) != NULL;
188 rcu_assign_pointer(*tp, t->next);
194 static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
196 struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
198 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
199 rcu_assign_pointer(*tp, t);
202 static struct ip_tunnel *vti_tunnel_locate(struct net *net,
203 struct ip_tunnel_parm *parms,
206 __be32 remote = parms->iph.daddr;
207 __be32 local = parms->iph.saddr;
208 struct ip_tunnel *t, *nt;
209 struct ip_tunnel __rcu **tp;
210 struct net_device *dev;
212 struct vti_net *ipn = net_generic(net, vti_net_id);
214 for (tp = __vti_bucket(ipn, parms);
215 (t = rtnl_dereference(*tp)) != NULL;
217 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
224 strlcpy(name, parms->name, IFNAMSIZ);
226 strcpy(name, "vti%d");
228 dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
232 dev_net_set(dev, net);
234 nt = netdev_priv(dev);
236 dev->rtnl_link_ops = &vti_link_ops;
238 vti_tunnel_bind_dev(dev);
240 if (register_netdevice(dev) < 0)
244 vti_tunnel_link(ipn, nt);
252 static void vti_tunnel_uninit(struct net_device *dev)
254 struct net *net = dev_net(dev);
255 struct vti_net *ipn = net_generic(net, vti_net_id);
257 vti_tunnel_unlink(ipn, netdev_priv(dev));
261 static int vti_err(struct sk_buff *skb, u32 info)
264 /* All the routers (except for Linux) return only
265 * 8 bytes of packet payload. It means, that precise relaying of
266 * ICMP in the real Internet is absolutely infeasible.
268 struct iphdr *iph = (struct iphdr *)skb->data;
269 const int type = icmp_hdr(skb)->type;
270 const int code = icmp_hdr(skb)->code;
276 case ICMP_PARAMETERPROB:
279 case ICMP_DEST_UNREACH:
282 case ICMP_PORT_UNREACH:
283 /* Impossible event. */
286 /* All others are translated to HOST_UNREACH. */
290 case ICMP_TIME_EXCEEDED:
291 if (code != ICMP_EXC_TTL)
298 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
302 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
303 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
304 t->parms.link, 0, IPPROTO_IPIP, 0);
310 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
313 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
317 t->err_time = jiffies;
322 /* We dont digest the packet therefore let the packet pass */
323 static int vti_rcv(struct sk_buff *skb)
325 struct ip_tunnel *tunnel;
326 const struct iphdr *iph = ip_hdr(skb);
328 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
329 if (tunnel != NULL) {
330 struct pcpu_tstats *tstats;
332 tstats = this_cpu_ptr(tunnel->dev->tstats);
333 u64_stats_update_begin(&tstats->syncp);
334 tstats->rx_packets++;
335 tstats->rx_bytes += skb->len;
336 u64_stats_update_end(&tstats->syncp);
338 skb->dev = tunnel->dev;
345 /* This function assumes it is being called from dev_queue_xmit()
346 * and that skb is filled properly by that function.
349 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
351 struct ip_tunnel *tunnel = netdev_priv(dev);
352 struct pcpu_tstats *tstats;
353 struct iphdr *tiph = &tunnel->parms.iph;
355 struct rtable *rt; /* Route to the other host */
356 struct net_device *tdev; /* Device to other host */
357 struct iphdr *old_iph = ip_hdr(skb);
358 __be32 dst = tiph->daddr;
361 if (skb->protocol != htons(ETH_P_IP))
366 memset(&fl4, 0, sizeof(fl4));
367 flowi4_init_output(&fl4, tunnel->parms.link,
368 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
371 dst, tiph->saddr, 0, 0);
372 rt = ip_route_output_key(dev_net(dev), &fl4);
374 dev->stats.tx_carrier_errors++;
377 /* if there is no transform then this tunnel is not functional.
378 * Or if the xfrm is not mode tunnel.
381 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
382 dev->stats.tx_carrier_errors++;
389 dev->stats.collisions++;
393 if (tunnel->err_count > 0) {
394 if (time_before(jiffies,
395 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
397 dst_link_failure(skb);
399 tunnel->err_count = 0;
402 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
405 skb_dst_set(skb, &rt->dst);
407 skb->dev = skb_dst(skb)->dev;
409 tstats = this_cpu_ptr(dev->tstats);
410 VTI_XMIT(tstats, &dev->stats);
414 dst_link_failure(skb);
416 dev->stats.tx_errors++;
421 static int vti_tunnel_bind_dev(struct net_device *dev)
423 struct net_device *tdev = NULL;
424 struct ip_tunnel *tunnel;
427 tunnel = netdev_priv(dev);
428 iph = &tunnel->parms.iph;
433 memset(&fl4, 0, sizeof(fl4));
434 flowi4_init_output(&fl4, tunnel->parms.link,
435 be32_to_cpu(tunnel->parms.i_key),
436 RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
438 iph->daddr, iph->saddr, 0, 0);
439 rt = ip_route_output_key(dev_net(dev), &fl4);
444 dev->flags |= IFF_POINTOPOINT;
447 if (!tdev && tunnel->parms.link)
448 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
451 dev->hard_header_len = tdev->hard_header_len +
452 sizeof(struct iphdr);
453 dev->mtu = tdev->mtu;
455 dev->iflink = tunnel->parms.link;
460 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
463 struct ip_tunnel_parm p;
465 struct net *net = dev_net(dev);
466 struct vti_net *ipn = net_generic(net, vti_net_id);
471 if (dev == ipn->fb_tunnel_dev) {
472 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
477 t = vti_tunnel_locate(net, &p, 0);
480 t = netdev_priv(dev);
481 memcpy(&p, &t->parms, sizeof(p));
482 p.i_flags |= GRE_KEY | VTI_ISVTI;
483 p.o_flags |= GRE_KEY;
484 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
491 if (!capable(CAP_NET_ADMIN))
495 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
499 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
503 t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
505 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
512 if (((dev->flags&IFF_POINTOPOINT) &&
514 (!(dev->flags&IFF_POINTOPOINT) &&
519 t = netdev_priv(dev);
520 vti_tunnel_unlink(ipn, t);
522 t->parms.iph.saddr = p.iph.saddr;
523 t->parms.iph.daddr = p.iph.daddr;
524 t->parms.i_key = p.i_key;
525 t->parms.o_key = p.o_key;
526 t->parms.iph.protocol = IPPROTO_IPIP;
527 memcpy(dev->dev_addr, &p.iph.saddr, 4);
528 memcpy(dev->broadcast, &p.iph.daddr, 4);
529 vti_tunnel_link(ipn, t);
530 netdev_state_change(dev);
536 if (cmd == SIOCCHGTUNNEL) {
537 t->parms.i_key = p.i_key;
538 t->parms.o_key = p.o_key;
539 if (t->parms.link != p.link) {
540 t->parms.link = p.link;
541 vti_tunnel_bind_dev(dev);
542 netdev_state_change(dev);
545 p.i_flags |= GRE_KEY | VTI_ISVTI;
546 p.o_flags |= GRE_KEY;
547 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
551 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
556 if (!capable(CAP_NET_ADMIN))
559 if (dev == ipn->fb_tunnel_dev) {
561 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
566 t = vti_tunnel_locate(net, &p, 0);
570 if (t->dev == ipn->fb_tunnel_dev)
574 unregister_netdevice(dev);
586 static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
588 if (new_mtu < 68 || new_mtu > 0xFFF8)
594 static const struct net_device_ops vti_netdev_ops = {
595 .ndo_init = vti_tunnel_init,
596 .ndo_uninit = vti_tunnel_uninit,
597 .ndo_start_xmit = vti_tunnel_xmit,
598 .ndo_do_ioctl = vti_tunnel_ioctl,
599 .ndo_change_mtu = vti_tunnel_change_mtu,
600 .ndo_get_stats64 = vti_get_stats64,
603 static void vti_dev_free(struct net_device *dev)
605 free_percpu(dev->tstats);
609 static void vti_tunnel_setup(struct net_device *dev)
611 dev->netdev_ops = &vti_netdev_ops;
612 dev->destructor = vti_dev_free;
614 dev->type = ARPHRD_TUNNEL;
615 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
616 dev->mtu = ETH_DATA_LEN;
617 dev->flags = IFF_NOARP;
620 dev->features |= NETIF_F_NETNS_LOCAL;
621 dev->features |= NETIF_F_LLTX;
622 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
625 static int vti_tunnel_init(struct net_device *dev)
627 struct ip_tunnel *tunnel = netdev_priv(dev);
630 strcpy(tunnel->parms.name, dev->name);
632 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
633 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
635 dev->tstats = alloc_percpu(struct pcpu_tstats);
642 static int __net_init vti_fb_tunnel_init(struct net_device *dev)
644 struct ip_tunnel *tunnel = netdev_priv(dev);
645 struct iphdr *iph = &tunnel->parms.iph;
646 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
649 strcpy(tunnel->parms.name, dev->name);
652 iph->protocol = IPPROTO_IPIP;
655 dev->tstats = alloc_percpu(struct pcpu_tstats);
660 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
664 static struct xfrm_tunnel vti_handler __read_mostly = {
666 .err_handler = vti_err,
670 static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
674 for (prio = 1; prio < 4; prio++) {
676 for (h = 0; h < HASH_SIZE; h++) {
679 t = rtnl_dereference(ipn->tunnels[prio][h]);
681 unregister_netdevice_queue(t->dev, head);
682 t = rtnl_dereference(t->next);
688 static int __net_init vti_init_net(struct net *net)
691 struct vti_net *ipn = net_generic(net, vti_net_id);
693 ipn->tunnels[0] = ipn->tunnels_wc;
694 ipn->tunnels[1] = ipn->tunnels_l;
695 ipn->tunnels[2] = ipn->tunnels_r;
696 ipn->tunnels[3] = ipn->tunnels_r_l;
698 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
701 if (!ipn->fb_tunnel_dev) {
705 dev_net_set(ipn->fb_tunnel_dev, net);
707 err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
710 ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
712 err = register_netdev(ipn->fb_tunnel_dev);
718 vti_dev_free(ipn->fb_tunnel_dev);
724 static void __net_exit vti_exit_net(struct net *net)
726 struct vti_net *ipn = net_generic(net, vti_net_id);
730 vti_destroy_tunnels(ipn, &list);
731 unregister_netdevice_many(&list);
735 static struct pernet_operations vti_net_ops = {
736 .init = vti_init_net,
737 .exit = vti_exit_net,
739 .size = sizeof(struct vti_net),
742 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
747 static void vti_netlink_parms(struct nlattr *data[],
748 struct ip_tunnel_parm *parms)
750 memset(parms, 0, sizeof(*parms));
752 parms->iph.protocol = IPPROTO_IPIP;
757 if (data[IFLA_VTI_LINK])
758 parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
760 if (data[IFLA_VTI_IKEY])
761 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
763 if (data[IFLA_VTI_OKEY])
764 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
766 if (data[IFLA_VTI_LOCAL])
767 parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
769 if (data[IFLA_VTI_REMOTE])
770 parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
774 static int vti_newlink(struct net *src_net, struct net_device *dev,
775 struct nlattr *tb[], struct nlattr *data[])
777 struct ip_tunnel *nt;
778 struct net *net = dev_net(dev);
779 struct vti_net *ipn = net_generic(net, vti_net_id);
783 nt = netdev_priv(dev);
784 vti_netlink_parms(data, &nt->parms);
786 if (vti_tunnel_locate(net, &nt->parms, 0))
789 mtu = vti_tunnel_bind_dev(dev);
793 err = register_netdevice(dev);
798 vti_tunnel_link(ipn, nt);
804 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
805 struct nlattr *data[])
807 struct ip_tunnel *t, *nt;
808 struct net *net = dev_net(dev);
809 struct vti_net *ipn = net_generic(net, vti_net_id);
810 struct ip_tunnel_parm p;
813 if (dev == ipn->fb_tunnel_dev)
816 nt = netdev_priv(dev);
817 vti_netlink_parms(data, &p);
819 t = vti_tunnel_locate(net, &p, 0);
827 vti_tunnel_unlink(ipn, t);
828 t->parms.iph.saddr = p.iph.saddr;
829 t->parms.iph.daddr = p.iph.daddr;
830 t->parms.i_key = p.i_key;
831 t->parms.o_key = p.o_key;
832 if (dev->type != ARPHRD_ETHER) {
833 memcpy(dev->dev_addr, &p.iph.saddr, 4);
834 memcpy(dev->broadcast, &p.iph.daddr, 4);
836 vti_tunnel_link(ipn, t);
837 netdev_state_change(dev);
840 if (t->parms.link != p.link) {
841 t->parms.link = p.link;
842 mtu = vti_tunnel_bind_dev(dev);
845 netdev_state_change(dev);
851 static size_t vti_get_size(const struct net_device *dev)
862 /* IFLA_VTI_REMOTE */
867 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
869 struct ip_tunnel *t = netdev_priv(dev);
870 struct ip_tunnel_parm *p = &t->parms;
872 nla_put_u32(skb, IFLA_VTI_LINK, p->link);
873 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
874 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
875 nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
876 nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
881 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
882 [IFLA_VTI_LINK] = { .type = NLA_U32 },
883 [IFLA_VTI_IKEY] = { .type = NLA_U32 },
884 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
885 [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
886 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
889 static struct rtnl_link_ops vti_link_ops __read_mostly = {
891 .maxtype = IFLA_VTI_MAX,
892 .policy = vti_policy,
893 .priv_size = sizeof(struct ip_tunnel),
894 .setup = vti_tunnel_setup,
895 .validate = vti_tunnel_validate,
896 .newlink = vti_newlink,
897 .changelink = vti_changelink,
898 .get_size = vti_get_size,
899 .fill_info = vti_fill_info,
902 static int __init vti_init(void)
906 pr_info("IPv4 over IPSec tunneling driver\n");
908 err = register_pernet_device(&vti_net_ops);
911 err = xfrm4_mode_tunnel_input_register(&vti_handler);
913 unregister_pernet_device(&vti_net_ops);
914 pr_info(KERN_INFO "vti init: can't register tunnel\n");
917 err = rtnl_link_register(&vti_link_ops);
919 goto rtnl_link_failed;
924 xfrm4_mode_tunnel_input_deregister(&vti_handler);
925 unregister_pernet_device(&vti_net_ops);
929 static void __exit vti_fini(void)
931 rtnl_link_unregister(&vti_link_ops);
932 if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
933 pr_info("vti close: can't deregister tunnel\n");
935 unregister_pernet_device(&vti_net_ops);
938 module_init(vti_init);
939 module_exit(vti_fini);
940 MODULE_LICENSE("GPL");
941 MODULE_ALIAS_RTNL_LINK("vti");
942 MODULE_ALIAS_NETDEV("ip_vti0");