2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/rtnetlink.h>
36 #include <net/route.h>
37 #include <net/addrconf.h>
38 #include <net/l3mdev.h>
40 #define RT_FL_TOS(oldflp4) \
41 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
43 #define DRV_NAME "vrf"
44 #define DRV_VERSION "1.0"
46 #define vrf_master_get_rcu(dev) \
47 ((struct net_device *)rcu_dereference(dev->rx_handler_data))
50 struct list_head list;
51 struct net_device *dev;
55 struct list_head all_slaves;
59 struct slave_queue queue;
71 struct u64_stats_sync syncp;
74 static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
79 static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
81 return ip_local_out(net, sk, skb);
84 static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
86 /* TO-DO: return max ethernet size? */
90 static void vrf_dst_destroy(struct dst_entry *dst)
92 /* our dst lives forever - or until the device is closed */
95 static unsigned int vrf_default_advmss(const struct dst_entry *dst)
100 static struct dst_ops vrf_dst_ops = {
102 .local_out = vrf_ip_local_out,
103 .check = vrf_ip_check,
105 .destroy = vrf_dst_destroy,
106 .default_advmss = vrf_default_advmss,
109 /* neighbor handling is done with actual device; do not want
110 * to flip skb->dev for those ndisc packets. This really fails
111 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
114 #if IS_ENABLED(CONFIG_IPV6)
115 static bool check_ipv6_frame(const struct sk_buff *skb)
117 const struct ipv6hdr *ipv6h;
118 struct ipv6hdr _ipv6h;
121 ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
125 if (ipv6h->nexthdr == NEXTHDR_ICMP) {
126 const struct icmp6hdr *icmph;
127 struct icmp6hdr _icmph;
129 icmph = skb_header_pointer(skb, sizeof(_ipv6h),
130 sizeof(_icmph), &_icmph);
134 switch (icmph->icmp6_type) {
135 case NDISC_ROUTER_SOLICITATION:
136 case NDISC_ROUTER_ADVERTISEMENT:
137 case NDISC_NEIGHBOUR_SOLICITATION:
138 case NDISC_NEIGHBOUR_ADVERTISEMENT:
149 static bool check_ipv6_frame(const struct sk_buff *skb)
155 static bool is_ip_rx_frame(struct sk_buff *skb)
157 switch (skb->protocol) {
158 case htons(ETH_P_IP):
160 case htons(ETH_P_IPV6):
161 return check_ipv6_frame(skb);
166 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
168 vrf_dev->stats.tx_errors++;
172 /* note: already called with rcu_read_lock */
173 static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
175 struct sk_buff *skb = *pskb;
177 if (is_ip_rx_frame(skb)) {
178 struct net_device *dev = vrf_master_get_rcu(skb->dev);
179 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
181 u64_stats_update_begin(&dstats->syncp);
183 dstats->rx_bytes += skb->len;
184 u64_stats_update_end(&dstats->syncp);
188 return RX_HANDLER_ANOTHER;
190 return RX_HANDLER_PASS;
193 static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
194 struct rtnl_link_stats64 *stats)
198 for_each_possible_cpu(i) {
199 const struct pcpu_dstats *dstats;
200 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
203 dstats = per_cpu_ptr(dev->dstats, i);
205 start = u64_stats_fetch_begin_irq(&dstats->syncp);
206 tbytes = dstats->tx_bytes;
207 tpkts = dstats->tx_pkts;
208 tdrops = dstats->tx_drps;
209 rbytes = dstats->rx_bytes;
210 rpkts = dstats->rx_pkts;
211 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
212 stats->tx_bytes += tbytes;
213 stats->tx_packets += tpkts;
214 stats->tx_dropped += tdrops;
215 stats->rx_bytes += rbytes;
216 stats->rx_packets += rpkts;
221 #if IS_ENABLED(CONFIG_IPV6)
222 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
223 struct net_device *dev)
225 const struct ipv6hdr *iph = ipv6_hdr(skb);
226 struct net *net = dev_net(skb->dev);
227 struct flowi6 fl6 = {
228 /* needed to match OIF rule */
229 .flowi6_oif = dev->ifindex,
230 .flowi6_iif = LOOPBACK_IFINDEX,
233 .flowlabel = ip6_flowinfo(iph),
234 .flowi6_mark = skb->mark,
235 .flowi6_proto = iph->nexthdr,
236 .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
238 int ret = NET_XMIT_DROP;
239 struct dst_entry *dst;
240 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
242 dst = ip6_route_output(net, NULL, &fl6);
247 skb_dst_set(skb, dst);
249 ret = ip6_local_out(net, skb->sk, skb);
250 if (unlikely(net_xmit_eval(ret)))
251 dev->stats.tx_errors++;
253 ret = NET_XMIT_SUCCESS;
257 vrf_tx_error(dev, skb);
258 return NET_XMIT_DROP;
261 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
262 struct net_device *dev)
264 vrf_tx_error(dev, skb);
265 return NET_XMIT_DROP;
269 static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
270 struct net_device *vrf_dev)
275 rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
279 /* TO-DO: what about broadcast ? */
280 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
286 skb_dst_set(skb, &rt->dst);
292 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
293 struct net_device *vrf_dev)
295 struct iphdr *ip4h = ip_hdr(skb);
296 int ret = NET_XMIT_DROP;
297 struct flowi4 fl4 = {
298 /* needed to match OIF rule */
299 .flowi4_oif = vrf_dev->ifindex,
300 .flowi4_iif = LOOPBACK_IFINDEX,
301 .flowi4_tos = RT_TOS(ip4h->tos),
302 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
303 FLOWI_FLAG_SKIP_NH_OIF,
304 .flowi4_proto = ip4h->protocol,
305 .daddr = ip4h->daddr,
306 .saddr = ip4h->saddr,
309 if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
313 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
317 ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
318 if (unlikely(net_xmit_eval(ret)))
319 vrf_dev->stats.tx_errors++;
321 ret = NET_XMIT_SUCCESS;
326 vrf_tx_error(vrf_dev, skb);
330 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
332 /* strip the ethernet header added for pass through VRF device */
333 __skb_pull(skb, skb_network_offset(skb));
335 switch (skb->protocol) {
336 case htons(ETH_P_IP):
337 return vrf_process_v4_outbound(skb, dev);
338 case htons(ETH_P_IPV6):
339 return vrf_process_v6_outbound(skb, dev);
341 vrf_tx_error(dev, skb);
342 return NET_XMIT_DROP;
346 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
349 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
351 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
352 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
354 u64_stats_update_begin(&dstats->syncp);
356 dstats->tx_bytes += len;
357 u64_stats_update_end(&dstats->syncp);
359 this_cpu_inc(dev->dstats->tx_drps);
365 #if IS_ENABLED(CONFIG_IPV6)
366 static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
371 static struct dst_ops vrf_dst_ops6 = {
373 .local_out = ip6_local_out,
374 .check = vrf_ip6_check,
376 .destroy = vrf_dst_destroy,
377 .default_advmss = vrf_default_advmss,
380 static int init_dst_ops6_kmem_cachep(void)
382 vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
383 sizeof(struct rt6_info),
388 if (!vrf_dst_ops6.kmem_cachep)
394 static void free_dst_ops6_kmem_cachep(void)
396 kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
399 static int vrf_input6(struct sk_buff *skb)
401 skb->dev->stats.rx_errors++;
406 /* modelled after ip6_finish_output2 */
407 static int vrf_finish_output6(struct net *net, struct sock *sk,
410 struct dst_entry *dst = skb_dst(skb);
411 struct net_device *dev = dst->dev;
412 struct neighbour *neigh;
413 struct in6_addr *nexthop;
418 skb->protocol = htons(ETH_P_IPV6);
422 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
423 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
424 if (unlikely(!neigh))
425 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
426 if (!IS_ERR(neigh)) {
427 ret = dst_neigh_output(dst, neigh, skb);
428 rcu_read_unlock_bh();
431 rcu_read_unlock_bh();
433 IP6_INC_STATS(dev_net(dst->dev),
434 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
439 /* modelled after ip6_output */
440 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
442 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
443 net, sk, skb, NULL, skb_dst(skb)->dev,
445 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
448 static void vrf_rt6_destroy(struct net_vrf *vrf)
450 dst_destroy(&vrf->rt6->dst);
451 free_percpu(vrf->rt6->rt6i_pcpu);
455 static int vrf_rt6_create(struct net_device *dev)
457 struct net_vrf *vrf = netdev_priv(dev);
458 struct dst_entry *dst;
459 struct rt6_info *rt6;
463 rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
465 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
471 rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
472 if (!rt6->rt6i_pcpu) {
476 for_each_possible_cpu(cpu) {
477 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
481 memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
483 INIT_LIST_HEAD(&rt6->rt6i_siblings);
484 INIT_LIST_HEAD(&rt6->rt6i_uncached);
486 rt6->dst.input = vrf_input6;
487 rt6->dst.output = vrf_output6;
489 rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
491 atomic_set(&rt6->dst.__refcnt, 2);
499 static int init_dst_ops6_kmem_cachep(void)
504 static void free_dst_ops6_kmem_cachep(void)
508 static void vrf_rt6_destroy(struct net_vrf *vrf)
512 static int vrf_rt6_create(struct net_device *dev)
518 /* modelled after ip_finish_output2 */
519 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
521 struct dst_entry *dst = skb_dst(skb);
522 struct rtable *rt = (struct rtable *)dst;
523 struct net_device *dev = dst->dev;
524 unsigned int hh_len = LL_RESERVED_SPACE(dev);
525 struct neighbour *neigh;
531 /* Be paranoid, rather than too clever. */
532 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
533 struct sk_buff *skb2;
535 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
541 skb_set_owner_w(skb2, skb->sk);
549 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
550 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
551 if (unlikely(!neigh))
552 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
554 ret = dst_neigh_output(dst, neigh, skb);
556 rcu_read_unlock_bh();
558 if (unlikely(ret < 0))
559 vrf_tx_error(skb->dev, skb);
563 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
565 struct net_device *dev = skb_dst(skb)->dev;
567 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
570 skb->protocol = htons(ETH_P_IP);
572 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
573 net, sk, skb, NULL, dev,
575 !(IPCB(skb)->flags & IPSKB_REROUTED));
578 static void vrf_rtable_destroy(struct net_vrf *vrf)
580 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
586 static struct rtable *vrf_rtable_create(struct net_device *dev)
588 struct net_vrf *vrf = netdev_priv(dev);
591 rth = dst_alloc(&vrf_dst_ops, dev, 2,
593 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
595 rth->dst.output = vrf_output;
596 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
598 rth->rt_type = RTN_UNICAST;
599 rth->rt_is_input = 0;
603 rth->rt_uses_gateway = 0;
604 rth->rt_table_id = vrf->tb_id;
605 INIT_LIST_HEAD(&rth->rt_uncached);
606 rth->rt_uncached_list = NULL;
612 /**************************** device handling ********************/
614 /* cycle interface to flush neighbor cache and move routes across tables */
615 static void cycle_netdev(struct net_device *dev)
617 unsigned int flags = dev->flags;
620 if (!netif_running(dev))
623 ret = dev_change_flags(dev, flags & ~IFF_UP);
625 ret = dev_change_flags(dev, flags);
629 "Failed to cycle device %s; route tables might be wrong!\n",
634 static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
635 struct net_device *dev)
637 struct list_head *head = &queue->all_slaves;
640 list_for_each_entry(slave, head, list) {
641 if (slave->dev == dev)
648 /* inverse of __vrf_insert_slave */
649 static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
651 list_del(&slave->list);
654 static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
656 list_add(&slave->list, &queue->all_slaves);
659 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
661 struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
662 struct net_vrf *vrf = netdev_priv(dev);
663 struct slave_queue *queue = &vrf->queue;
669 slave->dev = port_dev;
671 /* register the packet handler for slave ports */
672 ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
675 "Device %s failed to register rx_handler\n",
680 ret = netdev_master_upper_dev_link(port_dev, dev);
684 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
685 __vrf_insert_slave(queue, slave);
686 cycle_netdev(port_dev);
691 netdev_rx_handler_unregister(port_dev);
697 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
699 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
702 return do_vrf_add_slave(dev, port_dev);
705 /* inverse of do_vrf_add_slave */
706 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
708 struct net_vrf *vrf = netdev_priv(dev);
709 struct slave_queue *queue = &vrf->queue;
712 netdev_upper_dev_unlink(port_dev, dev);
713 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
715 netdev_rx_handler_unregister(port_dev);
717 cycle_netdev(port_dev);
719 slave = __vrf_find_slave_dev(queue, port_dev);
721 __vrf_remove_slave(queue, slave);
728 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
730 return do_vrf_del_slave(dev, port_dev);
733 static void vrf_dev_uninit(struct net_device *dev)
735 struct net_vrf *vrf = netdev_priv(dev);
736 struct slave_queue *queue = &vrf->queue;
737 struct list_head *head = &queue->all_slaves;
738 struct slave *slave, *next;
740 vrf_rtable_destroy(vrf);
741 vrf_rt6_destroy(vrf);
743 list_for_each_entry_safe(slave, next, head, list)
744 vrf_del_slave(dev, slave->dev);
746 free_percpu(dev->dstats);
750 static int vrf_dev_init(struct net_device *dev)
752 struct net_vrf *vrf = netdev_priv(dev);
754 INIT_LIST_HEAD(&vrf->queue.all_slaves);
756 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
760 /* create the default dst which points back to us */
761 vrf->rth = vrf_rtable_create(dev);
765 if (vrf_rt6_create(dev) != 0)
768 dev->flags = IFF_MASTER | IFF_NOARP;
773 vrf_rtable_destroy(vrf);
775 free_percpu(dev->dstats);
781 static const struct net_device_ops vrf_netdev_ops = {
782 .ndo_init = vrf_dev_init,
783 .ndo_uninit = vrf_dev_uninit,
784 .ndo_start_xmit = vrf_xmit,
785 .ndo_get_stats64 = vrf_get_stats64,
786 .ndo_add_slave = vrf_add_slave,
787 .ndo_del_slave = vrf_del_slave,
790 static u32 vrf_fib_table(const struct net_device *dev)
792 struct net_vrf *vrf = netdev_priv(dev);
797 static struct rtable *vrf_get_rtable(const struct net_device *dev,
798 const struct flowi4 *fl4)
800 struct rtable *rth = NULL;
802 if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
803 struct net_vrf *vrf = netdev_priv(dev);
806 atomic_inc(&rth->dst.__refcnt);
812 /* called under rcu_read_lock */
813 static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
815 struct fib_result res = { .tclassid = 0 };
816 struct net *net = dev_net(dev);
817 u32 orig_tos = fl4->flowi4_tos;
818 u8 flags = fl4->flowi4_flags;
819 u8 scope = fl4->flowi4_scope;
820 u8 tos = RT_FL_TOS(fl4);
823 if (unlikely(!fl4->daddr))
826 fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
827 fl4->flowi4_iif = LOOPBACK_IFINDEX;
828 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
829 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
830 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
832 rc = fib_lookup(net, fl4, &res, 0);
834 if (res.type == RTN_LOCAL)
835 fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
837 fib_select_path(net, &res, fl4, -1);
840 fl4->flowi4_flags = flags;
841 fl4->flowi4_tos = orig_tos;
842 fl4->flowi4_scope = scope;
847 #if IS_ENABLED(CONFIG_IPV6)
848 static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
849 const struct flowi6 *fl6)
851 struct rt6_info *rt = NULL;
853 if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
854 struct net_vrf *vrf = netdev_priv(dev);
857 atomic_inc(&rt->dst.__refcnt);
860 return (struct dst_entry *)rt;
864 static const struct l3mdev_ops vrf_l3mdev_ops = {
865 .l3mdev_fib_table = vrf_fib_table,
866 .l3mdev_get_rtable = vrf_get_rtable,
867 .l3mdev_get_saddr = vrf_get_saddr,
868 #if IS_ENABLED(CONFIG_IPV6)
869 .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
873 static void vrf_get_drvinfo(struct net_device *dev,
874 struct ethtool_drvinfo *info)
876 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
877 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
880 static const struct ethtool_ops vrf_ethtool_ops = {
881 .get_drvinfo = vrf_get_drvinfo,
884 static void vrf_setup(struct net_device *dev)
888 /* Initialize the device structure. */
889 dev->netdev_ops = &vrf_netdev_ops;
890 dev->l3mdev_ops = &vrf_l3mdev_ops;
891 dev->ethtool_ops = &vrf_ethtool_ops;
892 dev->destructor = free_netdev;
894 /* Fill in device structure with ethernet-generic values. */
895 eth_hw_addr_random(dev);
897 /* don't acquire vrf device's netif_tx_lock when transmitting */
898 dev->features |= NETIF_F_LLTX;
900 /* don't allow vrf devices to change network namespaces. */
901 dev->features |= NETIF_F_NETNS_LOCAL;
904 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
906 if (tb[IFLA_ADDRESS]) {
907 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
909 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
910 return -EADDRNOTAVAIL;
915 static void vrf_dellink(struct net_device *dev, struct list_head *head)
917 unregister_netdevice_queue(dev, head);
920 static int vrf_newlink(struct net *src_net, struct net_device *dev,
921 struct nlattr *tb[], struct nlattr *data[])
923 struct net_vrf *vrf = netdev_priv(dev);
925 if (!data || !data[IFLA_VRF_TABLE])
928 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
929 if (vrf->tb_id == RT_TABLE_UNSPEC)
932 dev->priv_flags |= IFF_L3MDEV_MASTER;
934 return register_netdevice(dev);
937 static size_t vrf_nl_getsize(const struct net_device *dev)
939 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
942 static int vrf_fillinfo(struct sk_buff *skb,
943 const struct net_device *dev)
945 struct net_vrf *vrf = netdev_priv(dev);
947 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
950 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
951 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
954 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
956 .priv_size = sizeof(struct net_vrf),
958 .get_size = vrf_nl_getsize,
959 .policy = vrf_nl_policy,
960 .validate = vrf_validate,
961 .fill_info = vrf_fillinfo,
963 .newlink = vrf_newlink,
964 .dellink = vrf_dellink,
966 .maxtype = IFLA_VRF_MAX,
969 static int vrf_device_event(struct notifier_block *unused,
970 unsigned long event, void *ptr)
972 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
974 /* only care about unregister events to drop slave references */
975 if (event == NETDEV_UNREGISTER) {
976 struct net_device *vrf_dev;
978 if (!netif_is_l3_slave(dev))
981 vrf_dev = netdev_master_upper_dev_get(dev);
982 vrf_del_slave(vrf_dev, dev);
988 static struct notifier_block vrf_notifier_block __read_mostly = {
989 .notifier_call = vrf_device_event,
992 static int __init vrf_init_module(void)
996 vrf_dst_ops.kmem_cachep =
997 kmem_cache_create("vrf_ip_dst_cache",
998 sizeof(struct rtable), 0,
1002 if (!vrf_dst_ops.kmem_cachep)
1005 rc = init_dst_ops6_kmem_cachep();
1009 register_netdevice_notifier(&vrf_notifier_block);
1011 rc = rtnl_link_register(&vrf_link_ops);
1018 unregister_netdevice_notifier(&vrf_notifier_block);
1019 free_dst_ops6_kmem_cachep();
1021 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
1025 static void __exit vrf_cleanup_module(void)
1027 rtnl_link_unregister(&vrf_link_ops);
1028 unregister_netdevice_notifier(&vrf_notifier_block);
1029 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
1030 free_dst_ops6_kmem_cachep();
1033 module_init(vrf_init_module);
1034 module_exit(vrf_cleanup_module);
1035 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1036 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1037 MODULE_LICENSE("GPL");
1038 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1039 MODULE_VERSION(DRV_VERSION);