2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list;
63 struct sock *mroute6_sk;
64 struct timer_list ipmr_expire_timer;
65 struct list_head mfc6_unres_queue;
66 struct list_head mfc6_cache_array[MFC6_LINES];
67 struct mif_device vif6_table[MAXMIFS];
69 atomic_t cache_resolve_queue_len;
70 bool mroute_do_assert;
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num;
78 struct fib_rule common;
82 struct mr6_table *mrt;
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
89 static DEFINE_RWLOCK(mrt_lock);
92 * Multicast router control variables
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
105 In this case data path is free of exclusive locks at all.
108 static struct kmem_cache *mrt_cachep __read_mostly;
110 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111 static void ip6mr_free_table(struct mr6_table *mrt);
113 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
121 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
123 static void mroute_clean_tables(struct mr6_table *mrt);
124 static void ipmr_expire_process(unsigned long arg);
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
130 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
132 struct mr6_table *mrt;
134 ip6mr_for_each_table(mrt, net) {
141 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt)
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
148 .flags = FIB_LOOKUP_NOREF,
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
159 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
165 switch (rule->action) {
168 case FR_ACT_UNREACHABLE:
170 case FR_ACT_PROHIBIT:
172 case FR_ACT_BLACKHOLE:
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
184 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
189 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
193 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
199 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
205 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
222 .default_pref = fib_default_rule_pref,
223 .fill = ip6mr_rule_fill,
224 .nlgroup = RTNLGRP_IPV6_RULE,
225 .policy = ip6mr_rule_policy,
226 .owner = THIS_MODULE,
229 static int __net_init ip6mr_rules_init(struct net *net)
231 struct fib_rules_ops *ops;
232 struct mr6_table *mrt;
235 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
239 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
241 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
247 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
251 net->ipv6.mr6_rules_ops = ops;
257 fib_rules_unregister(ops);
261 static void __net_exit ip6mr_rules_exit(struct net *net)
263 struct mr6_table *mrt, *next;
266 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
267 list_del(&mrt->list);
268 ip6mr_free_table(mrt);
271 fib_rules_unregister(net->ipv6.mr6_rules_ops);
274 #define ip6mr_for_each_table(mrt, net) \
275 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
277 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
279 return net->ipv6.mrt6;
282 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
283 struct mr6_table **mrt)
285 *mrt = net->ipv6.mrt6;
289 static int __net_init ip6mr_rules_init(struct net *net)
291 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
292 return net->ipv6.mrt6 ? 0 : -ENOMEM;
295 static void __net_exit ip6mr_rules_exit(struct net *net)
298 ip6mr_free_table(net->ipv6.mrt6);
299 net->ipv6.mrt6 = NULL;
304 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
306 struct mr6_table *mrt;
309 mrt = ip6mr_get_table(net, id);
313 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
317 write_pnet(&mrt->net, net);
319 /* Forwarding cache */
320 for (i = 0; i < MFC6_LINES; i++)
321 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
323 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
325 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
328 #ifdef CONFIG_IPV6_PIMSM_V2
329 mrt->mroute_reg_vif_num = -1;
331 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
332 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
337 static void ip6mr_free_table(struct mr6_table *mrt)
339 del_timer(&mrt->ipmr_expire_timer);
340 mroute_clean_tables(mrt);
344 #ifdef CONFIG_PROC_FS
346 struct ipmr_mfc_iter {
347 struct seq_net_private p;
348 struct mr6_table *mrt;
349 struct list_head *cache;
354 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
355 struct ipmr_mfc_iter *it, loff_t pos)
357 struct mr6_table *mrt = it->mrt;
358 struct mfc6_cache *mfc;
360 read_lock(&mrt_lock);
361 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
362 it->cache = &mrt->mfc6_cache_array[it->ct];
363 list_for_each_entry(mfc, it->cache, list)
367 read_unlock(&mrt_lock);
369 spin_lock_bh(&mfc_unres_lock);
370 it->cache = &mrt->mfc6_unres_queue;
371 list_for_each_entry(mfc, it->cache, list)
374 spin_unlock_bh(&mfc_unres_lock);
381 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
384 struct ipmr_vif_iter {
385 struct seq_net_private p;
386 struct mr6_table *mrt;
390 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
391 struct ipmr_vif_iter *iter,
394 struct mr6_table *mrt = iter->mrt;
396 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
397 if (!MIF_EXISTS(mrt, iter->ct))
400 return &mrt->vif6_table[iter->ct];
405 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
408 struct ipmr_vif_iter *iter = seq->private;
409 struct net *net = seq_file_net(seq);
410 struct mr6_table *mrt;
412 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
414 return ERR_PTR(-ENOENT);
418 read_lock(&mrt_lock);
419 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
423 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
425 struct ipmr_vif_iter *iter = seq->private;
426 struct net *net = seq_file_net(seq);
427 struct mr6_table *mrt = iter->mrt;
430 if (v == SEQ_START_TOKEN)
431 return ip6mr_vif_seq_idx(net, iter, 0);
433 while (++iter->ct < mrt->maxvif) {
434 if (!MIF_EXISTS(mrt, iter->ct))
436 return &mrt->vif6_table[iter->ct];
441 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
444 read_unlock(&mrt_lock);
447 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
449 struct ipmr_vif_iter *iter = seq->private;
450 struct mr6_table *mrt = iter->mrt;
452 if (v == SEQ_START_TOKEN) {
454 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
456 const struct mif_device *vif = v;
457 const char *name = vif->dev ? vif->dev->name : "none";
460 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
461 vif - mrt->vif6_table,
462 name, vif->bytes_in, vif->pkt_in,
463 vif->bytes_out, vif->pkt_out,
469 static const struct seq_operations ip6mr_vif_seq_ops = {
470 .start = ip6mr_vif_seq_start,
471 .next = ip6mr_vif_seq_next,
472 .stop = ip6mr_vif_seq_stop,
473 .show = ip6mr_vif_seq_show,
476 static int ip6mr_vif_open(struct inode *inode, struct file *file)
478 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
479 sizeof(struct ipmr_vif_iter));
482 static const struct file_operations ip6mr_vif_fops = {
483 .owner = THIS_MODULE,
484 .open = ip6mr_vif_open,
487 .release = seq_release_net,
490 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
492 struct ipmr_mfc_iter *it = seq->private;
493 struct net *net = seq_file_net(seq);
494 struct mr6_table *mrt;
496 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
498 return ERR_PTR(-ENOENT);
501 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
505 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
507 struct mfc6_cache *mfc = v;
508 struct ipmr_mfc_iter *it = seq->private;
509 struct net *net = seq_file_net(seq);
510 struct mr6_table *mrt = it->mrt;
514 if (v == SEQ_START_TOKEN)
515 return ipmr_mfc_seq_idx(net, seq->private, 0);
517 if (mfc->list.next != it->cache)
518 return list_entry(mfc->list.next, struct mfc6_cache, list);
520 if (it->cache == &mrt->mfc6_unres_queue)
523 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
525 while (++it->ct < MFC6_LINES) {
526 it->cache = &mrt->mfc6_cache_array[it->ct];
527 if (list_empty(it->cache))
529 return list_first_entry(it->cache, struct mfc6_cache, list);
532 /* exhausted cache_array, show unresolved */
533 read_unlock(&mrt_lock);
534 it->cache = &mrt->mfc6_unres_queue;
537 spin_lock_bh(&mfc_unres_lock);
538 if (!list_empty(it->cache))
539 return list_first_entry(it->cache, struct mfc6_cache, list);
542 spin_unlock_bh(&mfc_unres_lock);
548 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
550 struct ipmr_mfc_iter *it = seq->private;
551 struct mr6_table *mrt = it->mrt;
553 if (it->cache == &mrt->mfc6_unres_queue)
554 spin_unlock_bh(&mfc_unres_lock);
555 else if (it->cache == mrt->mfc6_cache_array)
556 read_unlock(&mrt_lock);
559 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
563 if (v == SEQ_START_TOKEN) {
567 "Iif Pkts Bytes Wrong Oifs\n");
569 const struct mfc6_cache *mfc = v;
570 const struct ipmr_mfc_iter *it = seq->private;
571 struct mr6_table *mrt = it->mrt;
573 seq_printf(seq, "%pI6 %pI6 %-3hd",
574 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
577 if (it->cache != &mrt->mfc6_unres_queue) {
578 seq_printf(seq, " %8lu %8lu %8lu",
580 mfc->mfc_un.res.bytes,
581 mfc->mfc_un.res.wrong_if);
582 for (n = mfc->mfc_un.res.minvif;
583 n < mfc->mfc_un.res.maxvif; n++) {
584 if (MIF_EXISTS(mrt, n) &&
585 mfc->mfc_un.res.ttls[n] < 255)
588 n, mfc->mfc_un.res.ttls[n]);
591 /* unresolved mfc_caches don't contain
592 * pkt, bytes and wrong_if values
594 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
601 static const struct seq_operations ipmr_mfc_seq_ops = {
602 .start = ipmr_mfc_seq_start,
603 .next = ipmr_mfc_seq_next,
604 .stop = ipmr_mfc_seq_stop,
605 .show = ipmr_mfc_seq_show,
608 static int ipmr_mfc_open(struct inode *inode, struct file *file)
610 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
611 sizeof(struct ipmr_mfc_iter));
614 static const struct file_operations ip6mr_mfc_fops = {
615 .owner = THIS_MODULE,
616 .open = ipmr_mfc_open,
619 .release = seq_release_net,
623 #ifdef CONFIG_IPV6_PIMSM_V2
625 static int pim6_rcv(struct sk_buff *skb)
627 struct pimreghdr *pim;
628 struct ipv6hdr *encap;
629 struct net_device *reg_dev = NULL;
630 struct net *net = dev_net(skb->dev);
631 struct mr6_table *mrt;
632 struct flowi6 fl6 = {
633 .flowi6_iif = skb->dev->ifindex,
634 .flowi6_mark = skb->mark,
638 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
641 pim = (struct pimreghdr *)skb_transport_header(skb);
642 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
643 (pim->flags & PIM_NULL_REGISTER) ||
644 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
645 sizeof(*pim), IPPROTO_PIM,
646 csum_partial((void *)pim, sizeof(*pim), 0)) &&
647 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
650 /* check if the inner packet is destined to mcast group */
651 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
654 if (!ipv6_addr_is_multicast(&encap->daddr) ||
655 encap->payload_len == 0 ||
656 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
659 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
661 reg_vif_num = mrt->mroute_reg_vif_num;
663 read_lock(&mrt_lock);
664 if (reg_vif_num >= 0)
665 reg_dev = mrt->vif6_table[reg_vif_num].dev;
668 read_unlock(&mrt_lock);
673 skb->mac_header = skb->network_header;
674 skb_pull(skb, (u8 *)encap - skb->data);
675 skb_reset_network_header(skb);
676 skb->protocol = htons(ETH_P_IPV6);
677 skb->ip_summed = CHECKSUM_NONE;
678 skb->pkt_type = PACKET_HOST;
680 skb_tunnel_rx(skb, reg_dev);
691 static const struct inet6_protocol pim6_protocol = {
695 /* Service routines creating virtual interfaces: PIMREG */
697 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
698 struct net_device *dev)
700 struct net *net = dev_net(dev);
701 struct mr6_table *mrt;
702 struct flowi6 fl6 = {
703 .flowi6_oif = dev->ifindex,
704 .flowi6_iif = skb->skb_iif,
705 .flowi6_mark = skb->mark,
709 err = ip6mr_fib_lookup(net, &fl6, &mrt);
715 read_lock(&mrt_lock);
716 dev->stats.tx_bytes += skb->len;
717 dev->stats.tx_packets++;
718 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
719 read_unlock(&mrt_lock);
724 static const struct net_device_ops reg_vif_netdev_ops = {
725 .ndo_start_xmit = reg_vif_xmit,
728 static void reg_vif_setup(struct net_device *dev)
730 dev->type = ARPHRD_PIMREG;
731 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
732 dev->flags = IFF_NOARP;
733 dev->netdev_ops = ®_vif_netdev_ops;
734 dev->destructor = free_netdev;
735 dev->features |= NETIF_F_NETNS_LOCAL;
738 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
740 struct net_device *dev;
743 if (mrt->id == RT6_TABLE_DFLT)
744 sprintf(name, "pim6reg");
746 sprintf(name, "pim6reg%u", mrt->id);
748 dev = alloc_netdev(0, name, reg_vif_setup);
752 dev_net_set(dev, net);
754 if (register_netdevice(dev)) {
767 /* allow the register to be completed before unregistering. */
771 unregister_netdevice(dev);
780 static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
782 struct mif_device *v;
783 struct net_device *dev;
784 struct inet6_dev *in6_dev;
786 if (vifi < 0 || vifi >= mrt->maxvif)
787 return -EADDRNOTAVAIL;
789 v = &mrt->vif6_table[vifi];
791 write_lock_bh(&mrt_lock);
796 write_unlock_bh(&mrt_lock);
797 return -EADDRNOTAVAIL;
800 #ifdef CONFIG_IPV6_PIMSM_V2
801 if (vifi == mrt->mroute_reg_vif_num)
802 mrt->mroute_reg_vif_num = -1;
805 if (vifi + 1 == mrt->maxvif) {
807 for (tmp = vifi - 1; tmp >= 0; tmp--) {
808 if (MIF_EXISTS(mrt, tmp))
811 mrt->maxvif = tmp + 1;
814 write_unlock_bh(&mrt_lock);
816 dev_set_allmulti(dev, -1);
818 in6_dev = __in6_dev_get(dev);
820 in6_dev->cnf.mc_forwarding--;
821 inet6_netconf_notify_devconf(dev_net(dev),
822 NETCONFA_MC_FORWARDING,
823 dev->ifindex, &in6_dev->cnf);
826 if (v->flags & MIFF_REGISTER)
827 unregister_netdevice_queue(dev, head);
833 static inline void ip6mr_cache_free(struct mfc6_cache *c)
835 kmem_cache_free(mrt_cachep, c);
838 /* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
842 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
844 struct net *net = read_pnet(&mrt->net);
847 atomic_dec(&mrt->cache_resolve_queue_len);
849 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
850 if (ipv6_hdr(skb)->version == 0) {
851 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
852 nlh->nlmsg_type = NLMSG_ERROR;
853 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
854 skb_trim(skb, nlh->nlmsg_len);
855 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
856 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
865 /* Timer process for all the unresolved queue. */
867 static void ipmr_do_expire_process(struct mr6_table *mrt)
869 unsigned long now = jiffies;
870 unsigned long expires = 10 * HZ;
871 struct mfc6_cache *c, *next;
873 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
874 if (time_after(c->mfc_un.unres.expires, now)) {
876 unsigned long interval = c->mfc_un.unres.expires - now;
877 if (interval < expires)
883 mr6_netlink_event(mrt, c, RTM_DELROUTE);
884 ip6mr_destroy_unres(mrt, c);
887 if (!list_empty(&mrt->mfc6_unres_queue))
888 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
891 static void ipmr_expire_process(unsigned long arg)
893 struct mr6_table *mrt = (struct mr6_table *)arg;
895 if (!spin_trylock(&mfc_unres_lock)) {
896 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
900 if (!list_empty(&mrt->mfc6_unres_queue))
901 ipmr_do_expire_process(mrt);
903 spin_unlock(&mfc_unres_lock);
906 /* Fill oifs list. It is called under write locked mrt_lock. */
908 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
913 cache->mfc_un.res.minvif = MAXMIFS;
914 cache->mfc_un.res.maxvif = 0;
915 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
917 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
918 if (MIF_EXISTS(mrt, vifi) &&
919 ttls[vifi] && ttls[vifi] < 255) {
920 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
921 if (cache->mfc_un.res.minvif > vifi)
922 cache->mfc_un.res.minvif = vifi;
923 if (cache->mfc_un.res.maxvif <= vifi)
924 cache->mfc_un.res.maxvif = vifi + 1;
929 static int mif6_add(struct net *net, struct mr6_table *mrt,
930 struct mif6ctl *vifc, int mrtsock)
932 int vifi = vifc->mif6c_mifi;
933 struct mif_device *v = &mrt->vif6_table[vifi];
934 struct net_device *dev;
935 struct inet6_dev *in6_dev;
939 if (MIF_EXISTS(mrt, vifi))
942 switch (vifc->mif6c_flags) {
943 #ifdef CONFIG_IPV6_PIMSM_V2
946 * Special Purpose VIF in PIM
947 * All the packets will be sent to the daemon
949 if (mrt->mroute_reg_vif_num >= 0)
951 dev = ip6mr_reg_vif(net, mrt);
954 err = dev_set_allmulti(dev, 1);
956 unregister_netdevice(dev);
963 dev = dev_get_by_index(net, vifc->mif6c_pifi);
965 return -EADDRNOTAVAIL;
966 err = dev_set_allmulti(dev, 1);
976 in6_dev = __in6_dev_get(dev);
978 in6_dev->cnf.mc_forwarding++;
979 inet6_netconf_notify_devconf(dev_net(dev),
980 NETCONFA_MC_FORWARDING,
981 dev->ifindex, &in6_dev->cnf);
985 * Fill in the VIF structures
987 v->rate_limit = vifc->vifc_rate_limit;
988 v->flags = vifc->mif6c_flags;
990 v->flags |= VIFF_STATIC;
991 v->threshold = vifc->vifc_threshold;
996 v->link = dev->ifindex;
997 if (v->flags & MIFF_REGISTER)
998 v->link = dev->iflink;
1000 /* And finish update writing critical data */
1001 write_lock_bh(&mrt_lock);
1003 #ifdef CONFIG_IPV6_PIMSM_V2
1004 if (v->flags & MIFF_REGISTER)
1005 mrt->mroute_reg_vif_num = vifi;
1007 if (vifi + 1 > mrt->maxvif)
1008 mrt->maxvif = vifi + 1;
1009 write_unlock_bh(&mrt_lock);
1013 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1014 const struct in6_addr *origin,
1015 const struct in6_addr *mcastgrp)
1017 int line = MFC6_HASH(mcastgrp, origin);
1018 struct mfc6_cache *c;
1020 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1021 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1022 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1028 /* Look for a (*,*,oif) entry */
1029 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1032 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1033 struct mfc6_cache *c;
1035 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1036 if (ipv6_addr_any(&c->mf6c_origin) &&
1037 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1038 (c->mfc_un.res.ttls[mifi] < 255))
1044 /* Look for a (*,G) entry */
1045 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1046 struct in6_addr *mcastgrp,
1049 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1050 struct mfc6_cache *c, *proxy;
1052 if (ipv6_addr_any(mcastgrp))
1055 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1056 if (ipv6_addr_any(&c->mf6c_origin) &&
1057 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1058 if (c->mfc_un.res.ttls[mifi] < 255)
1061 /* It's ok if the mifi is part of the static tree */
1062 proxy = ip6mr_cache_find_any_parent(mrt,
1064 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1069 return ip6mr_cache_find_any_parent(mrt, mifi);
1073 * Allocate a multicast cache entry
1075 static struct mfc6_cache *ip6mr_cache_alloc(void)
1077 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1080 c->mfc_un.res.minvif = MAXMIFS;
1084 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1086 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1089 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1090 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1095 * A cache entry has gone into a resolved state from queued
1098 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1099 struct mfc6_cache *uc, struct mfc6_cache *c)
1101 struct sk_buff *skb;
1104 * Play the pending entries through our router
1107 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1108 if (ipv6_hdr(skb)->version == 0) {
1109 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1111 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1112 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1114 nlh->nlmsg_type = NLMSG_ERROR;
1115 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1116 skb_trim(skb, nlh->nlmsg_len);
1117 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1119 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1121 ip6_mr_forward(net, mrt, skb, c);
1126 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1127 * expects the following bizarre scheme.
1129 * Called under mrt_lock.
1132 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1133 mifi_t mifi, int assert)
1135 struct sk_buff *skb;
1136 struct mrt6msg *msg;
1139 #ifdef CONFIG_IPV6_PIMSM_V2
1140 if (assert == MRT6MSG_WHOLEPKT)
1141 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1145 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1150 /* I suppose that internal messages
1151 * do not require checksums */
1153 skb->ip_summed = CHECKSUM_UNNECESSARY;
1155 #ifdef CONFIG_IPV6_PIMSM_V2
1156 if (assert == MRT6MSG_WHOLEPKT) {
1157 /* Ugly, but we have no choice with this interface.
1158 Duplicate old header, fix length etc.
1159 And all this only to mangle msg->im6_msgtype and
1160 to set msg->im6_mbz to "mbz" :-)
1162 skb_push(skb, -skb_network_offset(pkt));
1164 skb_push(skb, sizeof(*msg));
1165 skb_reset_transport_header(skb);
1166 msg = (struct mrt6msg *)skb_transport_header(skb);
1168 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1169 msg->im6_mif = mrt->mroute_reg_vif_num;
1171 msg->im6_src = ipv6_hdr(pkt)->saddr;
1172 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
1179 * Copy the IP header
1182 skb_put(skb, sizeof(struct ipv6hdr));
1183 skb_reset_network_header(skb);
1184 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1189 skb_put(skb, sizeof(*msg));
1190 skb_reset_transport_header(skb);
1191 msg = (struct mrt6msg *)skb_transport_header(skb);
1194 msg->im6_msgtype = assert;
1195 msg->im6_mif = mifi;
1197 msg->im6_src = ipv6_hdr(pkt)->saddr;
1198 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1200 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1201 skb->ip_summed = CHECKSUM_UNNECESSARY;
1204 if (mrt->mroute6_sk == NULL) {
1210 * Deliver to user space multicast routing algorithms
1212 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1214 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1222 * Queue a packet for resolution. It gets locked cache entry!
1226 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1230 struct mfc6_cache *c;
1232 spin_lock_bh(&mfc_unres_lock);
1233 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1234 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1235 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1243 * Create a new entry if allowable
1246 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1247 (c = ip6mr_cache_alloc_unres()) == NULL) {
1248 spin_unlock_bh(&mfc_unres_lock);
1255 * Fill in the new cache entry
1257 c->mf6c_parent = -1;
1258 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1259 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1262 * Reflect first query at pim6sd
1264 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1266 /* If the report failed throw the cache entry
1269 spin_unlock_bh(&mfc_unres_lock);
1271 ip6mr_cache_free(c);
1276 atomic_inc(&mrt->cache_resolve_queue_len);
1277 list_add(&c->list, &mrt->mfc6_unres_queue);
1278 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1280 ipmr_do_expire_process(mrt);
1284 * See if we can append the packet
1286 if (c->mfc_un.unres.unresolved.qlen > 3) {
1290 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1294 spin_unlock_bh(&mfc_unres_lock);
1299 * MFC6 cache manipulation by user space
1302 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1306 struct mfc6_cache *c, *next;
1308 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1310 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1311 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1312 ipv6_addr_equal(&c->mf6c_mcastgrp,
1313 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1314 (parent == -1 || parent == c->mf6c_parent)) {
1315 write_lock_bh(&mrt_lock);
1317 write_unlock_bh(&mrt_lock);
1319 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1320 ip6mr_cache_free(c);
1327 static int ip6mr_device_event(struct notifier_block *this,
1328 unsigned long event, void *ptr)
1330 struct net_device *dev = ptr;
1331 struct net *net = dev_net(dev);
1332 struct mr6_table *mrt;
1333 struct mif_device *v;
1337 if (event != NETDEV_UNREGISTER)
1340 ip6mr_for_each_table(mrt, net) {
1341 v = &mrt->vif6_table[0];
1342 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1344 mif6_delete(mrt, ct, &list);
1347 unregister_netdevice_many(&list);
1352 static struct notifier_block ip6_mr_notifier = {
1353 .notifier_call = ip6mr_device_event
1357 * Setup for IP multicast routing
1360 static int __net_init ip6mr_net_init(struct net *net)
1364 err = ip6mr_rules_init(net);
1368 #ifdef CONFIG_PROC_FS
1370 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1372 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1373 goto proc_cache_fail;
1378 #ifdef CONFIG_PROC_FS
1380 remove_proc_entry("ip6_mr_vif", net->proc_net);
1382 ip6mr_rules_exit(net);
1388 static void __net_exit ip6mr_net_exit(struct net *net)
1390 #ifdef CONFIG_PROC_FS
1391 remove_proc_entry("ip6_mr_cache", net->proc_net);
1392 remove_proc_entry("ip6_mr_vif", net->proc_net);
1394 ip6mr_rules_exit(net);
1397 static struct pernet_operations ip6mr_net_ops = {
1398 .init = ip6mr_net_init,
1399 .exit = ip6mr_net_exit,
1402 int __init ip6_mr_init(void)
1406 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1407 sizeof(struct mfc6_cache),
1408 0, SLAB_HWCACHE_ALIGN,
1413 err = register_pernet_subsys(&ip6mr_net_ops);
1415 goto reg_pernet_fail;
1417 err = register_netdevice_notifier(&ip6_mr_notifier);
1419 goto reg_notif_fail;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1422 pr_err("%s: can't add PIM protocol\n", __func__);
1424 goto add_proto_fail;
1427 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1428 ip6mr_rtm_dumproute, NULL);
1430 #ifdef CONFIG_IPV6_PIMSM_V2
1432 unregister_netdevice_notifier(&ip6_mr_notifier);
1435 unregister_pernet_subsys(&ip6mr_net_ops);
1437 kmem_cache_destroy(mrt_cachep);
1441 void ip6_mr_cleanup(void)
1443 unregister_netdevice_notifier(&ip6_mr_notifier);
1444 unregister_pernet_subsys(&ip6mr_net_ops);
1445 kmem_cache_destroy(mrt_cachep);
1448 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1449 struct mf6cctl *mfc, int mrtsock, int parent)
1453 struct mfc6_cache *uc, *c;
1454 unsigned char ttls[MAXMIFS];
1457 if (mfc->mf6cc_parent >= MAXMIFS)
1460 memset(ttls, 255, MAXMIFS);
1461 for (i = 0; i < MAXMIFS; i++) {
1462 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1467 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1469 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1470 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1471 ipv6_addr_equal(&c->mf6c_mcastgrp,
1472 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1473 (parent == -1 || parent == mfc->mf6cc_parent)) {
1480 write_lock_bh(&mrt_lock);
1481 c->mf6c_parent = mfc->mf6cc_parent;
1482 ip6mr_update_thresholds(mrt, c, ttls);
1484 c->mfc_flags |= MFC_STATIC;
1485 write_unlock_bh(&mrt_lock);
1486 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1490 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1491 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1494 c = ip6mr_cache_alloc();
1498 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1499 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1500 c->mf6c_parent = mfc->mf6cc_parent;
1501 ip6mr_update_thresholds(mrt, c, ttls);
1503 c->mfc_flags |= MFC_STATIC;
1505 write_lock_bh(&mrt_lock);
1506 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1507 write_unlock_bh(&mrt_lock);
1510 * Check to see if we resolved a queued list. If so we
1511 * need to send on the frames and tidy up.
1514 spin_lock_bh(&mfc_unres_lock);
1515 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1516 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1517 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1518 list_del(&uc->list);
1519 atomic_dec(&mrt->cache_resolve_queue_len);
1524 if (list_empty(&mrt->mfc6_unres_queue))
1525 del_timer(&mrt->ipmr_expire_timer);
1526 spin_unlock_bh(&mfc_unres_lock);
1529 ip6mr_cache_resolve(net, mrt, uc, c);
1530 ip6mr_cache_free(uc);
1532 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1537 * Close the multicast socket, and clear the vif tables etc
1540 static void mroute_clean_tables(struct mr6_table *mrt)
1544 struct mfc6_cache *c, *next;
1547 * Shut down all active vif entries
1549 for (i = 0; i < mrt->maxvif; i++) {
1550 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1551 mif6_delete(mrt, i, &list);
1553 unregister_netdevice_many(&list);
1558 for (i = 0; i < MFC6_LINES; i++) {
1559 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1560 if (c->mfc_flags & MFC_STATIC)
1562 write_lock_bh(&mrt_lock);
1564 write_unlock_bh(&mrt_lock);
1566 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1567 ip6mr_cache_free(c);
1571 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1572 spin_lock_bh(&mfc_unres_lock);
1573 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1575 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1576 ip6mr_destroy_unres(mrt, c);
1578 spin_unlock_bh(&mfc_unres_lock);
1582 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1585 struct net *net = sock_net(sk);
1588 write_lock_bh(&mrt_lock);
1589 if (likely(mrt->mroute6_sk == NULL)) {
1590 mrt->mroute6_sk = sk;
1591 net->ipv6.devconf_all->mc_forwarding++;
1592 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1593 NETCONFA_IFINDEX_ALL,
1594 net->ipv6.devconf_all);
1598 write_unlock_bh(&mrt_lock);
1605 int ip6mr_sk_done(struct sock *sk)
1608 struct net *net = sock_net(sk);
1609 struct mr6_table *mrt;
1612 ip6mr_for_each_table(mrt, net) {
1613 if (sk == mrt->mroute6_sk) {
1614 write_lock_bh(&mrt_lock);
1615 mrt->mroute6_sk = NULL;
1616 net->ipv6.devconf_all->mc_forwarding--;
1617 inet6_netconf_notify_devconf(net,
1618 NETCONFA_MC_FORWARDING,
1619 NETCONFA_IFINDEX_ALL,
1620 net->ipv6.devconf_all);
1621 write_unlock_bh(&mrt_lock);
1623 mroute_clean_tables(mrt);
1633 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1635 struct mr6_table *mrt;
1636 struct flowi6 fl6 = {
1637 .flowi6_iif = skb->skb_iif,
1638 .flowi6_oif = skb->dev->ifindex,
1639 .flowi6_mark = skb->mark,
1642 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1645 return mrt->mroute6_sk;
1649 * Socket options and virtual interface manipulation. The whole
1650 * virtual interface system is a complete heap, but unfortunately
1651 * that's how BSD mrouted happens to think. Maybe one day with a proper
1652 * MOSPF/PIM router set up we can clean this up.
1655 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1657 int ret, parent = 0;
1661 struct net *net = sock_net(sk);
1662 struct mr6_table *mrt;
1664 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1668 if (optname != MRT6_INIT) {
1669 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1675 if (sk->sk_type != SOCK_RAW ||
1676 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1678 if (optlen < sizeof(int))
1681 return ip6mr_sk_init(mrt, sk);
1684 return ip6mr_sk_done(sk);
1687 if (optlen < sizeof(vif))
1689 if (copy_from_user(&vif, optval, sizeof(vif)))
1691 if (vif.mif6c_mifi >= MAXMIFS)
1694 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1699 if (optlen < sizeof(mifi_t))
1701 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1704 ret = mif6_delete(mrt, mifi, NULL);
1709 * Manipulate the forwarding caches. These live
1710 * in a sort of kernel/user symbiosis.
1715 case MRT6_ADD_MFC_PROXY:
1716 case MRT6_DEL_MFC_PROXY:
1717 if (optlen < sizeof(mfc))
1719 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1722 parent = mfc.mf6cc_parent;
1724 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1725 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1727 ret = ip6mr_mfc_add(net, mrt, &mfc,
1728 sk == mrt->mroute6_sk, parent);
1733 * Control PIM assert (to activate pim will activate assert)
1739 if (optlen != sizeof(v))
1741 if (get_user(v, (int __user *)optval))
1743 mrt->mroute_do_assert = v;
1747 #ifdef CONFIG_IPV6_PIMSM_V2
1752 if (optlen != sizeof(v))
1754 if (get_user(v, (int __user *)optval))
1759 if (v != mrt->mroute_do_pim) {
1760 mrt->mroute_do_pim = v;
1761 mrt->mroute_do_assert = v;
1768 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1773 if (optlen != sizeof(u32))
1775 if (get_user(v, (u32 __user *)optval))
1777 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1778 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1780 if (sk == mrt->mroute6_sk)
1785 if (!ip6mr_new_table(net, v))
1787 raw6_sk(sk)->ip6mr_table = v;
1793 * Spurious command, or MRT6_VERSION which you cannot
1797 return -ENOPROTOOPT;
1802 * Getsock opt support for the multicast routing system.
1805 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1810 struct net *net = sock_net(sk);
1811 struct mr6_table *mrt;
1813 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1821 #ifdef CONFIG_IPV6_PIMSM_V2
1823 val = mrt->mroute_do_pim;
1827 val = mrt->mroute_do_assert;
1830 return -ENOPROTOOPT;
1833 if (get_user(olr, optlen))
1836 olr = min_t(int, olr, sizeof(int));
1840 if (put_user(olr, optlen))
1842 if (copy_to_user(optval, &val, olr))
1848 * The IP multicast ioctl support routines.
1851 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1853 struct sioc_sg_req6 sr;
1854 struct sioc_mif_req6 vr;
1855 struct mif_device *vif;
1856 struct mfc6_cache *c;
1857 struct net *net = sock_net(sk);
1858 struct mr6_table *mrt;
1860 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1865 case SIOCGETMIFCNT_IN6:
1866 if (copy_from_user(&vr, arg, sizeof(vr)))
1868 if (vr.mifi >= mrt->maxvif)
1870 read_lock(&mrt_lock);
1871 vif = &mrt->vif6_table[vr.mifi];
1872 if (MIF_EXISTS(mrt, vr.mifi)) {
1873 vr.icount = vif->pkt_in;
1874 vr.ocount = vif->pkt_out;
1875 vr.ibytes = vif->bytes_in;
1876 vr.obytes = vif->bytes_out;
1877 read_unlock(&mrt_lock);
1879 if (copy_to_user(arg, &vr, sizeof(vr)))
1883 read_unlock(&mrt_lock);
1884 return -EADDRNOTAVAIL;
1885 case SIOCGETSGCNT_IN6:
1886 if (copy_from_user(&sr, arg, sizeof(sr)))
1889 read_lock(&mrt_lock);
1890 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1892 sr.pktcnt = c->mfc_un.res.pkt;
1893 sr.bytecnt = c->mfc_un.res.bytes;
1894 sr.wrong_if = c->mfc_un.res.wrong_if;
1895 read_unlock(&mrt_lock);
1897 if (copy_to_user(arg, &sr, sizeof(sr)))
1901 read_unlock(&mrt_lock);
1902 return -EADDRNOTAVAIL;
1904 return -ENOIOCTLCMD;
1908 #ifdef CONFIG_COMPAT
1909 struct compat_sioc_sg_req6 {
1910 struct sockaddr_in6 src;
1911 struct sockaddr_in6 grp;
1912 compat_ulong_t pktcnt;
1913 compat_ulong_t bytecnt;
1914 compat_ulong_t wrong_if;
1917 struct compat_sioc_mif_req6 {
1919 compat_ulong_t icount;
1920 compat_ulong_t ocount;
1921 compat_ulong_t ibytes;
1922 compat_ulong_t obytes;
1925 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1927 struct compat_sioc_sg_req6 sr;
1928 struct compat_sioc_mif_req6 vr;
1929 struct mif_device *vif;
1930 struct mfc6_cache *c;
1931 struct net *net = sock_net(sk);
1932 struct mr6_table *mrt;
1934 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1939 case SIOCGETMIFCNT_IN6:
1940 if (copy_from_user(&vr, arg, sizeof(vr)))
1942 if (vr.mifi >= mrt->maxvif)
1944 read_lock(&mrt_lock);
1945 vif = &mrt->vif6_table[vr.mifi];
1946 if (MIF_EXISTS(mrt, vr.mifi)) {
1947 vr.icount = vif->pkt_in;
1948 vr.ocount = vif->pkt_out;
1949 vr.ibytes = vif->bytes_in;
1950 vr.obytes = vif->bytes_out;
1951 read_unlock(&mrt_lock);
1953 if (copy_to_user(arg, &vr, sizeof(vr)))
1957 read_unlock(&mrt_lock);
1958 return -EADDRNOTAVAIL;
1959 case SIOCGETSGCNT_IN6:
1960 if (copy_from_user(&sr, arg, sizeof(sr)))
1963 read_lock(&mrt_lock);
1964 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1966 sr.pktcnt = c->mfc_un.res.pkt;
1967 sr.bytecnt = c->mfc_un.res.bytes;
1968 sr.wrong_if = c->mfc_un.res.wrong_if;
1969 read_unlock(&mrt_lock);
1971 if (copy_to_user(arg, &sr, sizeof(sr)))
1975 read_unlock(&mrt_lock);
1976 return -EADDRNOTAVAIL;
1978 return -ENOIOCTLCMD;
1983 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1985 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1986 IPSTATS_MIB_OUTFORWDATAGRAMS);
1987 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1988 IPSTATS_MIB_OUTOCTETS, skb->len);
1989 return dst_output(skb);
1993 * Processing handlers for ip6mr_forward
1996 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1997 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1999 struct ipv6hdr *ipv6h;
2000 struct mif_device *vif = &mrt->vif6_table[vifi];
2001 struct net_device *dev;
2002 struct dst_entry *dst;
2005 if (vif->dev == NULL)
2008 #ifdef CONFIG_IPV6_PIMSM_V2
2009 if (vif->flags & MIFF_REGISTER) {
2011 vif->bytes_out += skb->len;
2012 vif->dev->stats.tx_bytes += skb->len;
2013 vif->dev->stats.tx_packets++;
2014 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2019 ipv6h = ipv6_hdr(skb);
2021 fl6 = (struct flowi6) {
2022 .flowi6_oif = vif->link,
2023 .daddr = ipv6h->daddr,
2026 dst = ip6_route_output(net, NULL, &fl6);
2033 skb_dst_set(skb, dst);
2036 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2037 * not only before forwarding, but after forwarding on all output
2038 * interfaces. It is clear, if mrouter runs a multicasting
2039 * program, it should receive packets not depending to what interface
2040 * program is joined.
2041 * If we will not make it, the program will have to join on all
2042 * interfaces. On the other hand, multihoming host (or router, but
2043 * not mrouter) cannot join to more than one interface - it will
2044 * result in receiving multiple packets.
2049 vif->bytes_out += skb->len;
2051 /* We are about to write */
2052 /* XXX: extension headers? */
2053 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2056 ipv6h = ipv6_hdr(skb);
2059 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2061 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2062 ip6mr_forward2_finish);
2069 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2073 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2074 if (mrt->vif6_table[ct].dev == dev)
2080 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2081 struct sk_buff *skb, struct mfc6_cache *cache)
2085 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2087 vif = cache->mf6c_parent;
2088 cache->mfc_un.res.pkt++;
2089 cache->mfc_un.res.bytes += skb->len;
2091 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2092 struct mfc6_cache *cache_proxy;
2094 /* For an (*,G) entry, we only check that the incomming
2095 * interface is part of the static tree.
2097 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2099 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2104 * Wrong interface: drop packet and (maybe) send PIM assert.
2106 if (mrt->vif6_table[vif].dev != skb->dev) {
2107 cache->mfc_un.res.wrong_if++;
2109 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2110 /* pimsm uses asserts, when switching from RPT to SPT,
2111 so that we cannot check that packet arrived on an oif.
2112 It is bad, but otherwise we would need to move pretty
2113 large chunk of pimd to kernel. Ough... --ANK
2115 (mrt->mroute_do_pim ||
2116 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2118 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2119 cache->mfc_un.res.last_assert = jiffies;
2120 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2126 mrt->vif6_table[vif].pkt_in++;
2127 mrt->vif6_table[vif].bytes_in += skb->len;
2132 if (ipv6_addr_any(&cache->mf6c_origin) &&
2133 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2134 if (true_vifi >= 0 &&
2135 true_vifi != cache->mf6c_parent &&
2136 ipv6_hdr(skb)->hop_limit >
2137 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2138 /* It's an (*,*) entry and the packet is not coming from
2139 * the upstream: forward the packet to the upstream
2142 psend = cache->mf6c_parent;
2147 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2148 /* For (*,G) entry, don't forward to the incoming interface */
2149 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2150 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2152 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2154 ip6mr_forward2(net, mrt, skb2, cache, psend);
2161 ip6mr_forward2(net, mrt, skb, cache, psend);
2172 * Multicast packets for forwarding arrive here
2175 int ip6_mr_input(struct sk_buff *skb)
2177 struct mfc6_cache *cache;
2178 struct net *net = dev_net(skb->dev);
2179 struct mr6_table *mrt;
2180 struct flowi6 fl6 = {
2181 .flowi6_iif = skb->dev->ifindex,
2182 .flowi6_mark = skb->mark,
2186 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2192 read_lock(&mrt_lock);
2193 cache = ip6mr_cache_find(mrt,
2194 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2195 if (cache == NULL) {
2196 int vif = ip6mr_find_vif(mrt, skb->dev);
2199 cache = ip6mr_cache_find_any(mrt,
2200 &ipv6_hdr(skb)->daddr,
2205 * No usable cache entry
2207 if (cache == NULL) {
2210 vif = ip6mr_find_vif(mrt, skb->dev);
2212 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2213 read_unlock(&mrt_lock);
2217 read_unlock(&mrt_lock);
2222 ip6_mr_forward(net, mrt, skb, cache);
2224 read_unlock(&mrt_lock);
2230 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2231 struct mfc6_cache *c, struct rtmsg *rtm)
2234 struct rtnexthop *nhp;
2235 struct nlattr *mp_attr;
2236 struct rta_mfc_stats mfcs;
2238 /* If cache is unresolved, don't try to parse IIF and OIF */
2239 if (c->mf6c_parent >= MAXMIFS)
2242 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2243 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2245 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2246 if (mp_attr == NULL)
2249 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2250 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2251 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2253 nla_nest_cancel(skb, mp_attr);
2257 nhp->rtnh_flags = 0;
2258 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2259 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2260 nhp->rtnh_len = sizeof(*nhp);
2264 nla_nest_end(skb, mp_attr);
2266 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2267 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2268 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2269 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2272 rtm->rtm_type = RTN_MULTICAST;
2276 int ip6mr_get_route(struct net *net,
2277 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2280 struct mr6_table *mrt;
2281 struct mfc6_cache *cache;
2282 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2284 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2288 read_lock(&mrt_lock);
2289 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2290 if (!cache && skb->dev) {
2291 int vif = ip6mr_find_vif(mrt, skb->dev);
2294 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2299 struct sk_buff *skb2;
2300 struct ipv6hdr *iph;
2301 struct net_device *dev;
2305 read_unlock(&mrt_lock);
2310 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2311 read_unlock(&mrt_lock);
2315 /* really correct? */
2316 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2318 read_unlock(&mrt_lock);
2322 skb_reset_transport_header(skb2);
2324 skb_put(skb2, sizeof(struct ipv6hdr));
2325 skb_reset_network_header(skb2);
2327 iph = ipv6_hdr(skb2);
2330 iph->flow_lbl[0] = 0;
2331 iph->flow_lbl[1] = 0;
2332 iph->flow_lbl[2] = 0;
2333 iph->payload_len = 0;
2334 iph->nexthdr = IPPROTO_NONE;
2336 iph->saddr = rt->rt6i_src.addr;
2337 iph->daddr = rt->rt6i_dst.addr;
2339 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2340 read_unlock(&mrt_lock);
2345 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2346 cache->mfc_flags |= MFC_NOTIFY;
2348 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2349 read_unlock(&mrt_lock);
2353 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2354 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2357 struct nlmsghdr *nlh;
2361 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2365 rtm = nlmsg_data(nlh);
2366 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2367 rtm->rtm_dst_len = 128;
2368 rtm->rtm_src_len = 128;
2370 rtm->rtm_table = mrt->id;
2371 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2372 goto nla_put_failure;
2373 rtm->rtm_type = RTN_MULTICAST;
2374 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2375 if (c->mfc_flags & MFC_STATIC)
2376 rtm->rtm_protocol = RTPROT_STATIC;
2378 rtm->rtm_protocol = RTPROT_MROUTED;
2381 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2382 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2383 goto nla_put_failure;
2384 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2385 /* do not break the dump if cache is unresolved */
2386 if (err < 0 && err != -ENOENT)
2387 goto nla_put_failure;
2389 return nlmsg_end(skb, nlh);
2392 nlmsg_cancel(skb, nlh);
2396 static int mr6_msgsize(bool unresolved, int maxvif)
2399 NLMSG_ALIGN(sizeof(struct rtmsg))
2400 + nla_total_size(4) /* RTA_TABLE */
2401 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2402 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2407 + nla_total_size(4) /* RTA_IIF */
2408 + nla_total_size(0) /* RTA_MULTIPATH */
2409 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2411 + nla_total_size(sizeof(struct rta_mfc_stats))
2417 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2420 struct net *net = read_pnet(&mrt->net);
2421 struct sk_buff *skb;
2424 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2429 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2433 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2439 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2442 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2444 struct net *net = sock_net(skb->sk);
2445 struct mr6_table *mrt;
2446 struct mfc6_cache *mfc;
2447 unsigned int t = 0, s_t;
2448 unsigned int h = 0, s_h;
2449 unsigned int e = 0, s_e;
2455 read_lock(&mrt_lock);
2456 ip6mr_for_each_table(mrt, net) {
2461 for (h = s_h; h < MFC6_LINES; h++) {
2462 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2465 if (ip6mr_fill_mroute(mrt, skb,
2466 NETLINK_CB(cb->skb).portid,
2476 spin_lock_bh(&mfc_unres_lock);
2477 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2480 if (ip6mr_fill_mroute(mrt, skb,
2481 NETLINK_CB(cb->skb).portid,
2485 spin_unlock_bh(&mfc_unres_lock);
2491 spin_unlock_bh(&mfc_unres_lock);
2498 read_unlock(&mrt_lock);