2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
31 #include <net/ip_fib.h>
32 #include <net/ip6_route.h>
33 #include <net/rtnetlink.h>
34 #include <net/route.h>
35 #include <net/addrconf.h>
38 #define DRV_NAME "vrf"
39 #define DRV_VERSION "1.0"
41 #define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
43 #define vrf_master_get_rcu(dev) \
44 ((struct net_device *)rcu_dereference(dev->rx_handler_data))
52 struct u64_stats_sync syncp;
55 static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
60 static int vrf_ip_local_out(struct sk_buff *skb)
62 return ip_local_out(skb);
65 static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
67 /* TO-DO: return max ethernet size? */
71 static void vrf_dst_destroy(struct dst_entry *dst)
73 /* our dst lives forever - or until the device is closed */
76 static unsigned int vrf_default_advmss(const struct dst_entry *dst)
81 static struct dst_ops vrf_dst_ops = {
83 .local_out = vrf_ip_local_out,
84 .check = vrf_ip_check,
86 .destroy = vrf_dst_destroy,
87 .default_advmss = vrf_default_advmss,
90 static bool is_ip_rx_frame(struct sk_buff *skb)
92 switch (skb->protocol) {
94 case htons(ETH_P_IPV6):
100 /* note: already called with rcu_read_lock */
101 static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
103 struct sk_buff *skb = *pskb;
105 if (is_ip_rx_frame(skb)) {
106 struct net_device *dev = vrf_master_get_rcu(skb->dev);
107 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
109 u64_stats_update_begin(&dstats->syncp);
111 dstats->rx_bytes += skb->len;
112 u64_stats_update_end(&dstats->syncp);
116 return RX_HANDLER_ANOTHER;
118 return RX_HANDLER_PASS;
121 static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
122 struct rtnl_link_stats64 *stats)
126 for_each_possible_cpu(i) {
127 const struct pcpu_dstats *dstats;
128 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
131 dstats = per_cpu_ptr(dev->dstats, i);
133 start = u64_stats_fetch_begin_irq(&dstats->syncp);
134 tbytes = dstats->tx_bytes;
135 tpkts = dstats->tx_pkts;
136 tdrops = dstats->tx_drps;
137 rbytes = dstats->rx_bytes;
138 rpkts = dstats->rx_pkts;
139 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
140 stats->tx_bytes += tbytes;
141 stats->tx_packets += tpkts;
142 stats->tx_dropped += tdrops;
143 stats->rx_bytes += rbytes;
144 stats->rx_packets += rpkts;
149 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
150 struct net_device *dev)
155 static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
156 struct net_device *vrf_dev)
161 rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
165 /* TO-DO: what about broadcast ? */
166 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
172 skb_dst_set(skb, &rt->dst);
178 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
179 struct net_device *vrf_dev)
181 struct iphdr *ip4h = ip_hdr(skb);
182 int ret = NET_XMIT_DROP;
183 struct flowi4 fl4 = {
184 /* needed to match OIF rule */
185 .flowi4_oif = vrf_dev->ifindex,
186 .flowi4_iif = LOOPBACK_IFINDEX,
187 .flowi4_tos = RT_TOS(ip4h->tos),
188 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
189 .daddr = ip4h->daddr,
192 if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
196 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
200 ret = ip_local_out(skb);
201 if (unlikely(net_xmit_eval(ret)))
202 vrf_dev->stats.tx_errors++;
204 ret = NET_XMIT_SUCCESS;
209 vrf_dev->stats.tx_errors++;
214 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
216 switch (skb->protocol) {
217 case htons(ETH_P_IP):
218 return vrf_process_v4_outbound(skb, dev);
219 case htons(ETH_P_IPV6):
220 return vrf_process_v6_outbound(skb, dev);
222 return NET_XMIT_DROP;
226 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
228 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
230 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
231 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
233 u64_stats_update_begin(&dstats->syncp);
235 dstats->tx_bytes += skb->len;
236 u64_stats_update_end(&dstats->syncp);
238 this_cpu_inc(dev->dstats->tx_drps);
244 static netdev_tx_t vrf_finish(struct sock *sk, struct sk_buff *skb)
246 return dev_queue_xmit(skb);
249 static int vrf_output(struct sock *sk, struct sk_buff *skb)
251 struct net_device *dev = skb_dst(skb)->dev;
253 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
256 skb->protocol = htons(ETH_P_IP);
258 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
261 !(IPCB(skb)->flags & IPSKB_REROUTED));
264 static void vrf_rtable_destroy(struct net_vrf *vrf)
266 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
273 static struct rtable *vrf_rtable_create(struct net_device *dev)
277 rth = dst_alloc(&vrf_dst_ops, dev, 2,
279 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
281 rth->dst.output = vrf_output;
282 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
284 rth->rt_type = RTN_UNICAST;
285 rth->rt_is_input = 0;
289 rth->rt_uses_gateway = 0;
290 INIT_LIST_HEAD(&rth->rt_uncached);
291 rth->rt_uncached_list = NULL;
292 rth->rt_lwtstate = NULL;
298 /**************************** device handling ********************/
300 /* cycle interface to flush neighbor cache and move routes across tables */
301 static void cycle_netdev(struct net_device *dev)
303 unsigned int flags = dev->flags;
306 if (!netif_running(dev))
309 ret = dev_change_flags(dev, flags & ~IFF_UP);
311 ret = dev_change_flags(dev, flags);
315 "Failed to cycle device %s; route tables might be wrong!\n",
320 static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
321 struct net_device *dev)
323 struct list_head *head = &queue->all_slaves;
326 list_for_each_entry(slave, head, list) {
327 if (slave->dev == dev)
334 /* inverse of __vrf_insert_slave */
335 static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
338 list_del(&slave->list);
342 static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
344 dev_hold(slave->dev);
345 list_add(&slave->list, &queue->all_slaves);
349 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
351 struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
352 struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
353 struct slave *duplicate_slave;
354 struct net_vrf *vrf = netdev_priv(dev);
355 struct slave_queue *queue = &vrf->queue;
358 if (!slave || !vrf_ptr)
361 slave->dev = port_dev;
363 vrf_ptr->ifindex = dev->ifindex;
364 vrf_ptr->tb_id = vrf->tb_id;
366 duplicate_slave = __vrf_find_slave_dev(queue, port_dev);
367 if (duplicate_slave) {
372 __vrf_insert_slave(queue, slave);
374 /* register the packet handler for slave ports */
375 ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
378 "Device %s failed to register rx_handler\n",
383 ret = netdev_master_upper_dev_link(port_dev, dev);
387 port_dev->flags |= IFF_SLAVE;
389 rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
390 cycle_netdev(port_dev);
395 netdev_rx_handler_unregister(port_dev);
397 __vrf_remove_slave(queue, slave);
404 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
406 if (!netif_is_vrf(dev) || netif_is_vrf(port_dev) ||
407 vrf_is_slave(port_dev))
410 return do_vrf_add_slave(dev, port_dev);
413 /* inverse of do_vrf_add_slave */
414 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
416 struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
417 struct net_vrf *vrf = netdev_priv(dev);
418 struct slave_queue *queue = &vrf->queue;
421 RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
423 netdev_upper_dev_unlink(port_dev, dev);
424 port_dev->flags &= ~IFF_SLAVE;
426 netdev_rx_handler_unregister(port_dev);
428 /* after netdev_rx_handler_unregister for synchronize_rcu */
431 cycle_netdev(port_dev);
433 slave = __vrf_find_slave_dev(queue, port_dev);
435 __vrf_remove_slave(queue, slave);
442 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
444 if (!netif_is_vrf(dev))
447 return do_vrf_del_slave(dev, port_dev);
450 static void vrf_dev_uninit(struct net_device *dev)
452 struct net_vrf *vrf = netdev_priv(dev);
453 struct slave_queue *queue = &vrf->queue;
454 struct list_head *head = &queue->all_slaves;
455 struct slave *slave, *next;
457 vrf_rtable_destroy(vrf);
459 list_for_each_entry_safe(slave, next, head, list)
460 vrf_del_slave(dev, slave->dev);
463 free_percpu(dev->dstats);
467 static int vrf_dev_init(struct net_device *dev)
469 struct net_vrf *vrf = netdev_priv(dev);
471 INIT_LIST_HEAD(&vrf->queue.all_slaves);
473 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
477 /* create the default dst which points back to us */
478 vrf->rth = vrf_rtable_create(dev);
482 dev->flags = IFF_MASTER | IFF_NOARP;
487 free_percpu(dev->dstats);
493 static const struct net_device_ops vrf_netdev_ops = {
494 .ndo_init = vrf_dev_init,
495 .ndo_uninit = vrf_dev_uninit,
496 .ndo_start_xmit = vrf_xmit,
497 .ndo_get_stats64 = vrf_get_stats64,
498 .ndo_add_slave = vrf_add_slave,
499 .ndo_del_slave = vrf_del_slave,
502 static void vrf_get_drvinfo(struct net_device *dev,
503 struct ethtool_drvinfo *info)
505 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
506 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
509 static const struct ethtool_ops vrf_ethtool_ops = {
510 .get_drvinfo = vrf_get_drvinfo,
513 static void vrf_setup(struct net_device *dev)
517 /* Initialize the device structure. */
518 dev->netdev_ops = &vrf_netdev_ops;
519 dev->ethtool_ops = &vrf_ethtool_ops;
520 dev->destructor = free_netdev;
522 /* Fill in device structure with ethernet-generic values. */
523 eth_hw_addr_random(dev);
525 /* don't acquire vrf device's netif_tx_lock when transmitting */
526 dev->features |= NETIF_F_LLTX;
528 /* don't allow vrf devices to change network namespaces. */
529 dev->features |= NETIF_F_NETNS_LOCAL;
532 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
534 if (tb[IFLA_ADDRESS]) {
535 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
537 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
538 return -EADDRNOTAVAIL;
543 static void vrf_dellink(struct net_device *dev, struct list_head *head)
545 struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
547 RCU_INIT_POINTER(dev->vrf_ptr, NULL);
548 kfree_rcu(vrf_ptr, rcu);
549 unregister_netdevice_queue(dev, head);
552 static int vrf_newlink(struct net *src_net, struct net_device *dev,
553 struct nlattr *tb[], struct nlattr *data[])
555 struct net_vrf *vrf = netdev_priv(dev);
556 struct net_vrf_dev *vrf_ptr;
559 if (!data || !data[IFLA_VRF_TABLE])
562 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
564 dev->priv_flags |= IFF_VRF_MASTER;
567 vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
571 vrf_ptr->ifindex = dev->ifindex;
572 vrf_ptr->tb_id = vrf->tb_id;
574 err = register_netdevice(dev);
578 rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
588 static size_t vrf_nl_getsize(const struct net_device *dev)
590 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
593 static int vrf_fillinfo(struct sk_buff *skb,
594 const struct net_device *dev)
596 struct net_vrf *vrf = netdev_priv(dev);
598 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
601 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
602 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
605 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
607 .priv_size = sizeof(struct net_vrf),
609 .get_size = vrf_nl_getsize,
610 .policy = vrf_nl_policy,
611 .validate = vrf_validate,
612 .fill_info = vrf_fillinfo,
614 .newlink = vrf_newlink,
615 .dellink = vrf_dellink,
617 .maxtype = IFLA_VRF_MAX,
620 static int vrf_device_event(struct notifier_block *unused,
621 unsigned long event, void *ptr)
623 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
625 /* only care about unregister events to drop slave references */
626 if (event == NETDEV_UNREGISTER) {
627 struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
628 struct net_device *vrf_dev;
630 if (!vrf_ptr || netif_is_vrf(dev))
633 vrf_dev = __dev_get_by_index(dev_net(dev), vrf_ptr->ifindex);
635 vrf_del_slave(vrf_dev, dev);
641 static struct notifier_block vrf_notifier_block __read_mostly = {
642 .notifier_call = vrf_device_event,
645 static int __init vrf_init_module(void)
649 vrf_dst_ops.kmem_cachep =
650 kmem_cache_create("vrf_ip_dst_cache",
651 sizeof(struct rtable), 0,
652 SLAB_HWCACHE_ALIGN | SLAB_PANIC,
655 if (!vrf_dst_ops.kmem_cachep)
658 register_netdevice_notifier(&vrf_notifier_block);
660 rc = rtnl_link_register(&vrf_link_ops);
667 unregister_netdevice_notifier(&vrf_notifier_block);
668 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
672 static void __exit vrf_cleanup_module(void)
674 rtnl_link_unregister(&vrf_link_ops);
675 unregister_netdevice_notifier(&vrf_notifier_block);
676 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
679 module_init(vrf_init_module);
680 module_exit(vrf_cleanup_module);
681 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
682 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
683 MODULE_LICENSE("GPL");
684 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
685 MODULE_VERSION(DRV_VERSION);