2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
75 struct list_head list;
78 struct sock __rcu *mroute_sk;
79 struct timer_list ipmr_expire_timer;
80 struct list_head mfc_unres_queue;
81 struct list_head mfc_cache_array[MFC_LINES];
82 struct vif_device vif_table[MAXVIFS];
84 atomic_t cache_resolve_queue_len;
85 bool mroute_do_assert;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num;
93 struct fib_rule common;
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101 * Note that the changes are semaphored via rtnl_lock.
104 static DEFINE_RWLOCK(mrt_lock);
107 * Multicast router control variables
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock);
115 /* We return to original Alan's scheme. Hash table of resolved
116 * entries is changed only in process context and protected
117 * with weak lock mrt_lock. Queue of unresolved entries is protected
118 * with strong spinlock mfc_unres_lock.
120 * In this case data path is free of exclusive locks at all.
123 static struct kmem_cache *mrt_cachep __read_mostly;
125 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
126 static void ipmr_free_table(struct mr_table *mrt);
128 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
129 struct sk_buff *skb, struct mfc_cache *cache,
131 static int ipmr_cache_report(struct mr_table *mrt,
132 struct sk_buff *pkt, vifi_t vifi, int assert);
133 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
134 struct mfc_cache *c, struct rtmsg *rtm);
135 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
137 static void mroute_clean_tables(struct mr_table *mrt, bool all);
138 static void ipmr_expire_process(unsigned long arg);
140 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
141 #define ipmr_for_each_table(mrt, net) \
142 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
144 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
146 struct mr_table *mrt;
148 ipmr_for_each_table(mrt, net) {
155 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
156 struct mr_table **mrt)
159 struct ipmr_result res;
160 struct fib_lookup_arg arg = {
162 .flags = FIB_LOOKUP_NOREF,
165 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
166 flowi4_to_flowi(flp4), 0, &arg);
173 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
174 int flags, struct fib_lookup_arg *arg)
176 struct ipmr_result *res = arg->result;
177 struct mr_table *mrt;
179 switch (rule->action) {
182 case FR_ACT_UNREACHABLE:
184 case FR_ACT_PROHIBIT:
186 case FR_ACT_BLACKHOLE:
191 mrt = ipmr_get_table(rule->fr_net, rule->table);
198 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
203 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
207 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
208 struct fib_rule_hdr *frh, struct nlattr **tb)
213 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
219 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
220 struct fib_rule_hdr *frh)
228 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
229 .family = RTNL_FAMILY_IPMR,
230 .rule_size = sizeof(struct ipmr_rule),
231 .addr_size = sizeof(u32),
232 .action = ipmr_rule_action,
233 .match = ipmr_rule_match,
234 .configure = ipmr_rule_configure,
235 .compare = ipmr_rule_compare,
236 .fill = ipmr_rule_fill,
237 .nlgroup = RTNLGRP_IPV4_RULE,
238 .policy = ipmr_rule_policy,
239 .owner = THIS_MODULE,
242 static int __net_init ipmr_rules_init(struct net *net)
244 struct fib_rules_ops *ops;
245 struct mr_table *mrt;
248 ops = fib_rules_register(&ipmr_rules_ops_template, net);
252 INIT_LIST_HEAD(&net->ipv4.mr_tables);
254 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
260 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
264 net->ipv4.mr_rules_ops = ops;
268 ipmr_free_table(mrt);
270 fib_rules_unregister(ops);
274 static void __net_exit ipmr_rules_exit(struct net *net)
276 struct mr_table *mrt, *next;
279 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
280 list_del(&mrt->list);
281 ipmr_free_table(mrt);
283 fib_rules_unregister(net->ipv4.mr_rules_ops);
287 #define ipmr_for_each_table(mrt, net) \
288 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
290 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
292 return net->ipv4.mrt;
295 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
296 struct mr_table **mrt)
298 *mrt = net->ipv4.mrt;
302 static int __net_init ipmr_rules_init(struct net *net)
304 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
305 return net->ipv4.mrt ? 0 : -ENOMEM;
308 static void __net_exit ipmr_rules_exit(struct net *net)
311 ipmr_free_table(net->ipv4.mrt);
312 net->ipv4.mrt = NULL;
317 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
319 struct mr_table *mrt;
322 mrt = ipmr_get_table(net, id);
326 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
329 write_pnet(&mrt->net, net);
332 /* Forwarding cache */
333 for (i = 0; i < MFC_LINES; i++)
334 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
336 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
338 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
341 #ifdef CONFIG_IP_PIMSM
342 mrt->mroute_reg_vif_num = -1;
344 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
345 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
350 static void ipmr_free_table(struct mr_table *mrt)
352 del_timer_sync(&mrt->ipmr_expire_timer);
353 mroute_clean_tables(mrt, true);
357 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
359 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
361 struct net *net = dev_net(dev);
365 dev = __dev_get_by_name(net, "tunl0");
367 const struct net_device_ops *ops = dev->netdev_ops;
369 struct ip_tunnel_parm p;
371 memset(&p, 0, sizeof(p));
372 p.iph.daddr = v->vifc_rmt_addr.s_addr;
373 p.iph.saddr = v->vifc_lcl_addr.s_addr;
376 p.iph.protocol = IPPROTO_IPIP;
377 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
378 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
380 if (ops->ndo_do_ioctl) {
381 mm_segment_t oldfs = get_fs();
384 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
391 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
393 struct net_device *dev;
395 dev = __dev_get_by_name(net, "tunl0");
398 const struct net_device_ops *ops = dev->netdev_ops;
401 struct ip_tunnel_parm p;
402 struct in_device *in_dev;
404 memset(&p, 0, sizeof(p));
405 p.iph.daddr = v->vifc_rmt_addr.s_addr;
406 p.iph.saddr = v->vifc_lcl_addr.s_addr;
409 p.iph.protocol = IPPROTO_IPIP;
410 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
411 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
413 if (ops->ndo_do_ioctl) {
414 mm_segment_t oldfs = get_fs();
417 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
425 (dev = __dev_get_by_name(net, p.name)) != NULL) {
426 dev->flags |= IFF_MULTICAST;
428 in_dev = __in_dev_get_rtnl(dev);
432 ipv4_devconf_setall(in_dev);
433 neigh_parms_data_state_setall(in_dev->arp_parms);
434 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
444 unregister_netdevice(dev);
448 #ifdef CONFIG_IP_PIMSM
450 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
452 struct net *net = dev_net(dev);
453 struct mr_table *mrt;
454 struct flowi4 fl4 = {
455 .flowi4_oif = dev->ifindex,
456 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
457 .flowi4_mark = skb->mark,
461 err = ipmr_fib_lookup(net, &fl4, &mrt);
467 read_lock(&mrt_lock);
468 dev->stats.tx_bytes += skb->len;
469 dev->stats.tx_packets++;
470 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
471 read_unlock(&mrt_lock);
476 static int reg_vif_get_iflink(const struct net_device *dev)
481 static const struct net_device_ops reg_vif_netdev_ops = {
482 .ndo_start_xmit = reg_vif_xmit,
483 .ndo_get_iflink = reg_vif_get_iflink,
486 static void reg_vif_setup(struct net_device *dev)
488 dev->type = ARPHRD_PIMREG;
489 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
490 dev->flags = IFF_NOARP;
491 dev->netdev_ops = ®_vif_netdev_ops;
492 dev->destructor = free_netdev;
493 dev->features |= NETIF_F_NETNS_LOCAL;
496 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
498 struct net_device *dev;
499 struct in_device *in_dev;
502 if (mrt->id == RT_TABLE_DEFAULT)
503 sprintf(name, "pimreg");
505 sprintf(name, "pimreg%u", mrt->id);
507 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
512 dev_net_set(dev, net);
514 if (register_netdevice(dev)) {
520 in_dev = __in_dev_get_rcu(dev);
526 ipv4_devconf_setall(in_dev);
527 neigh_parms_data_state_setall(in_dev->arp_parms);
528 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
539 unregister_netdevice(dev);
545 * vif_delete - Delete a VIF entry
546 * @notify: Set to 1, if the caller is a notifier_call
549 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
550 struct list_head *head)
552 struct vif_device *v;
553 struct net_device *dev;
554 struct in_device *in_dev;
556 if (vifi < 0 || vifi >= mrt->maxvif)
557 return -EADDRNOTAVAIL;
559 v = &mrt->vif_table[vifi];
561 write_lock_bh(&mrt_lock);
566 write_unlock_bh(&mrt_lock);
567 return -EADDRNOTAVAIL;
570 #ifdef CONFIG_IP_PIMSM
571 if (vifi == mrt->mroute_reg_vif_num)
572 mrt->mroute_reg_vif_num = -1;
575 if (vifi + 1 == mrt->maxvif) {
578 for (tmp = vifi - 1; tmp >= 0; tmp--) {
579 if (VIF_EXISTS(mrt, tmp))
585 write_unlock_bh(&mrt_lock);
587 dev_set_allmulti(dev, -1);
589 in_dev = __in_dev_get_rtnl(dev);
591 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
592 inet_netconf_notify_devconf(dev_net(dev),
593 NETCONFA_MC_FORWARDING,
594 dev->ifindex, &in_dev->cnf);
595 ip_rt_multicast_event(in_dev);
598 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
599 unregister_netdevice_queue(dev, head);
605 static void ipmr_cache_free_rcu(struct rcu_head *head)
607 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
609 kmem_cache_free(mrt_cachep, c);
612 static inline void ipmr_cache_free(struct mfc_cache *c)
614 call_rcu(&c->rcu, ipmr_cache_free_rcu);
617 /* Destroy an unresolved cache entry, killing queued skbs
618 * and reporting error to netlink readers.
621 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
623 struct net *net = read_pnet(&mrt->net);
627 atomic_dec(&mrt->cache_resolve_queue_len);
629 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
630 if (ip_hdr(skb)->version == 0) {
631 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
632 nlh->nlmsg_type = NLMSG_ERROR;
633 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
634 skb_trim(skb, nlh->nlmsg_len);
636 e->error = -ETIMEDOUT;
637 memset(&e->msg, 0, sizeof(e->msg));
639 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
649 /* Timer process for the unresolved queue. */
651 static void ipmr_expire_process(unsigned long arg)
653 struct mr_table *mrt = (struct mr_table *)arg;
655 unsigned long expires;
656 struct mfc_cache *c, *next;
658 if (!spin_trylock(&mfc_unres_lock)) {
659 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
663 if (list_empty(&mrt->mfc_unres_queue))
669 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
670 if (time_after(c->mfc_un.unres.expires, now)) {
671 unsigned long interval = c->mfc_un.unres.expires - now;
672 if (interval < expires)
678 mroute_netlink_event(mrt, c, RTM_DELROUTE);
679 ipmr_destroy_unres(mrt, c);
682 if (!list_empty(&mrt->mfc_unres_queue))
683 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
686 spin_unlock(&mfc_unres_lock);
689 /* Fill oifs list. It is called under write locked mrt_lock. */
691 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
696 cache->mfc_un.res.minvif = MAXVIFS;
697 cache->mfc_un.res.maxvif = 0;
698 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
700 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
701 if (VIF_EXISTS(mrt, vifi) &&
702 ttls[vifi] && ttls[vifi] < 255) {
703 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
704 if (cache->mfc_un.res.minvif > vifi)
705 cache->mfc_un.res.minvif = vifi;
706 if (cache->mfc_un.res.maxvif <= vifi)
707 cache->mfc_un.res.maxvif = vifi + 1;
712 static int vif_add(struct net *net, struct mr_table *mrt,
713 struct vifctl *vifc, int mrtsock)
715 int vifi = vifc->vifc_vifi;
716 struct vif_device *v = &mrt->vif_table[vifi];
717 struct net_device *dev;
718 struct in_device *in_dev;
722 if (VIF_EXISTS(mrt, vifi))
725 switch (vifc->vifc_flags) {
726 #ifdef CONFIG_IP_PIMSM
729 * Special Purpose VIF in PIM
730 * All the packets will be sent to the daemon
732 if (mrt->mroute_reg_vif_num >= 0)
734 dev = ipmr_reg_vif(net, mrt);
737 err = dev_set_allmulti(dev, 1);
739 unregister_netdevice(dev);
746 dev = ipmr_new_tunnel(net, vifc);
749 err = dev_set_allmulti(dev, 1);
751 ipmr_del_tunnel(dev, vifc);
757 case VIFF_USE_IFINDEX:
759 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
760 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
761 if (dev && !__in_dev_get_rtnl(dev)) {
763 return -EADDRNOTAVAIL;
766 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
769 return -EADDRNOTAVAIL;
770 err = dev_set_allmulti(dev, 1);
780 in_dev = __in_dev_get_rtnl(dev);
783 return -EADDRNOTAVAIL;
785 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
786 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
788 ip_rt_multicast_event(in_dev);
790 /* Fill in the VIF structures */
792 v->rate_limit = vifc->vifc_rate_limit;
793 v->local = vifc->vifc_lcl_addr.s_addr;
794 v->remote = vifc->vifc_rmt_addr.s_addr;
795 v->flags = vifc->vifc_flags;
797 v->flags |= VIFF_STATIC;
798 v->threshold = vifc->vifc_threshold;
803 v->link = dev->ifindex;
804 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
805 v->link = dev_get_iflink(dev);
807 /* And finish update writing critical data */
808 write_lock_bh(&mrt_lock);
810 #ifdef CONFIG_IP_PIMSM
811 if (v->flags & VIFF_REGISTER)
812 mrt->mroute_reg_vif_num = vifi;
814 if (vifi+1 > mrt->maxvif)
815 mrt->maxvif = vifi+1;
816 write_unlock_bh(&mrt_lock);
820 /* called with rcu_read_lock() */
821 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
825 int line = MFC_HASH(mcastgrp, origin);
828 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
829 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
835 /* Look for a (*,*,oif) entry */
836 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
839 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
842 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
843 if (c->mfc_origin == htonl(INADDR_ANY) &&
844 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
845 c->mfc_un.res.ttls[vifi] < 255)
851 /* Look for a (*,G) entry */
852 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
853 __be32 mcastgrp, int vifi)
855 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
856 struct mfc_cache *c, *proxy;
858 if (mcastgrp == htonl(INADDR_ANY))
861 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
862 if (c->mfc_origin == htonl(INADDR_ANY) &&
863 c->mfc_mcastgrp == mcastgrp) {
864 if (c->mfc_un.res.ttls[vifi] < 255)
867 /* It's ok if the vifi is part of the static tree */
868 proxy = ipmr_cache_find_any_parent(mrt,
870 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
875 return ipmr_cache_find_any_parent(mrt, vifi);
879 * Allocate a multicast cache entry
881 static struct mfc_cache *ipmr_cache_alloc(void)
883 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
886 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
887 c->mfc_un.res.minvif = MAXVIFS;
892 static struct mfc_cache *ipmr_cache_alloc_unres(void)
894 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
897 skb_queue_head_init(&c->mfc_un.unres.unresolved);
898 c->mfc_un.unres.expires = jiffies + 10*HZ;
904 * A cache entry has gone into a resolved state from queued
907 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
908 struct mfc_cache *uc, struct mfc_cache *c)
913 /* Play the pending entries through our router */
915 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
916 if (ip_hdr(skb)->version == 0) {
917 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
919 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
920 nlh->nlmsg_len = skb_tail_pointer(skb) -
923 nlh->nlmsg_type = NLMSG_ERROR;
924 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
925 skb_trim(skb, nlh->nlmsg_len);
927 e->error = -EMSGSIZE;
928 memset(&e->msg, 0, sizeof(e->msg));
931 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
933 ip_mr_forward(net, mrt, skb, c, 0);
939 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
940 * expects the following bizarre scheme.
942 * Called under mrt_lock.
945 static int ipmr_cache_report(struct mr_table *mrt,
946 struct sk_buff *pkt, vifi_t vifi, int assert)
949 const int ihl = ip_hdrlen(pkt);
950 struct igmphdr *igmp;
952 struct sock *mroute_sk;
955 #ifdef CONFIG_IP_PIMSM
956 if (assert == IGMPMSG_WHOLEPKT)
957 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
960 skb = alloc_skb(128, GFP_ATOMIC);
965 #ifdef CONFIG_IP_PIMSM
966 if (assert == IGMPMSG_WHOLEPKT) {
967 /* Ugly, but we have no choice with this interface.
968 * Duplicate old header, fix ihl, length etc.
969 * And all this only to mangle msg->im_msgtype and
970 * to set msg->im_mbz to "mbz" :-)
972 skb_push(skb, sizeof(struct iphdr));
973 skb_reset_network_header(skb);
974 skb_reset_transport_header(skb);
975 msg = (struct igmpmsg *)skb_network_header(skb);
976 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
977 msg->im_msgtype = IGMPMSG_WHOLEPKT;
979 msg->im_vif = mrt->mroute_reg_vif_num;
980 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
981 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
982 sizeof(struct iphdr));
987 /* Copy the IP header */
989 skb_set_network_header(skb, skb->len);
991 skb_copy_to_linear_data(skb, pkt->data, ihl);
992 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
993 msg = (struct igmpmsg *)skb_network_header(skb);
995 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
999 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1001 msg->im_msgtype = assert;
1003 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1004 skb->transport_header = skb->network_header;
1008 mroute_sk = rcu_dereference(mrt->mroute_sk);
1015 /* Deliver to mrouted */
1017 ret = sock_queue_rcv_skb(mroute_sk, skb);
1020 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1028 * Queue a packet for resolution. It gets locked cache entry!
1032 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1036 struct mfc_cache *c;
1037 const struct iphdr *iph = ip_hdr(skb);
1039 spin_lock_bh(&mfc_unres_lock);
1040 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1041 if (c->mfc_mcastgrp == iph->daddr &&
1042 c->mfc_origin == iph->saddr) {
1049 /* Create a new entry if allowable */
1051 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1052 (c = ipmr_cache_alloc_unres()) == NULL) {
1053 spin_unlock_bh(&mfc_unres_lock);
1059 /* Fill in the new cache entry */
1062 c->mfc_origin = iph->saddr;
1063 c->mfc_mcastgrp = iph->daddr;
1065 /* Reflect first query at mrouted. */
1067 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1069 /* If the report failed throw the cache entry
1072 spin_unlock_bh(&mfc_unres_lock);
1079 atomic_inc(&mrt->cache_resolve_queue_len);
1080 list_add(&c->list, &mrt->mfc_unres_queue);
1081 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1083 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1084 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1087 /* See if we can append the packet */
1089 if (c->mfc_un.unres.unresolved.qlen > 3) {
1093 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1097 spin_unlock_bh(&mfc_unres_lock);
1102 * MFC cache manipulation by user space mroute daemon
1105 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1108 struct mfc_cache *c, *next;
1110 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1112 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1113 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1114 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1115 (parent == -1 || parent == c->mfc_parent)) {
1116 list_del_rcu(&c->list);
1117 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1125 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1126 struct mfcctl *mfc, int mrtsock, int parent)
1130 struct mfc_cache *uc, *c;
1132 if (mfc->mfcc_parent >= MAXVIFS)
1135 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1137 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1138 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1139 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1140 (parent == -1 || parent == c->mfc_parent)) {
1147 write_lock_bh(&mrt_lock);
1148 c->mfc_parent = mfc->mfcc_parent;
1149 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1151 c->mfc_flags |= MFC_STATIC;
1152 write_unlock_bh(&mrt_lock);
1153 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1157 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1158 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1161 c = ipmr_cache_alloc();
1165 c->mfc_origin = mfc->mfcc_origin.s_addr;
1166 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1167 c->mfc_parent = mfc->mfcc_parent;
1168 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1170 c->mfc_flags |= MFC_STATIC;
1172 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1175 * Check to see if we resolved a queued list. If so we
1176 * need to send on the frames and tidy up.
1179 spin_lock_bh(&mfc_unres_lock);
1180 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1181 if (uc->mfc_origin == c->mfc_origin &&
1182 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1183 list_del(&uc->list);
1184 atomic_dec(&mrt->cache_resolve_queue_len);
1189 if (list_empty(&mrt->mfc_unres_queue))
1190 del_timer(&mrt->ipmr_expire_timer);
1191 spin_unlock_bh(&mfc_unres_lock);
1194 ipmr_cache_resolve(net, mrt, uc, c);
1195 ipmr_cache_free(uc);
1197 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1202 * Close the multicast socket, and clear the vif tables etc
1205 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1209 struct mfc_cache *c, *next;
1211 /* Shut down all active vif entries */
1213 for (i = 0; i < mrt->maxvif; i++) {
1214 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1216 vif_delete(mrt, i, 0, &list);
1218 unregister_netdevice_many(&list);
1220 /* Wipe the cache */
1222 for (i = 0; i < MFC_LINES; i++) {
1223 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1224 if (!all && (c->mfc_flags & MFC_STATIC))
1226 list_del_rcu(&c->list);
1227 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1232 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1233 spin_lock_bh(&mfc_unres_lock);
1234 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1236 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1237 ipmr_destroy_unres(mrt, c);
1239 spin_unlock_bh(&mfc_unres_lock);
1243 /* called from ip_ra_control(), before an RCU grace period,
1244 * we dont need to call synchronize_rcu() here
1246 static void mrtsock_destruct(struct sock *sk)
1248 struct net *net = sock_net(sk);
1249 struct mr_table *mrt;
1252 ipmr_for_each_table(mrt, net) {
1253 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1254 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1255 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1256 NETCONFA_IFINDEX_ALL,
1257 net->ipv4.devconf_all);
1258 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1259 mroute_clean_tables(mrt, false);
1266 * Socket options and virtual interface manipulation. The whole
1267 * virtual interface system is a complete heap, but unfortunately
1268 * that's how BSD mrouted happens to think. Maybe one day with a proper
1269 * MOSPF/PIM router set up we can clean this up.
1272 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1274 int ret, parent = 0;
1277 struct net *net = sock_net(sk);
1278 struct mr_table *mrt;
1280 if (sk->sk_type != SOCK_RAW ||
1281 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1284 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1288 if (optname != MRT_INIT) {
1289 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1290 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1296 if (optlen != sizeof(int))
1300 if (rtnl_dereference(mrt->mroute_sk)) {
1305 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1307 rcu_assign_pointer(mrt->mroute_sk, sk);
1308 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1309 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1310 NETCONFA_IFINDEX_ALL,
1311 net->ipv4.devconf_all);
1316 if (sk != rcu_access_pointer(mrt->mroute_sk))
1318 return ip_ra_control(sk, 0, NULL);
1321 if (optlen != sizeof(vif))
1323 if (copy_from_user(&vif, optval, sizeof(vif)))
1325 if (vif.vifc_vifi >= MAXVIFS)
1328 if (optname == MRT_ADD_VIF) {
1329 ret = vif_add(net, mrt, &vif,
1330 sk == rtnl_dereference(mrt->mroute_sk));
1332 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1338 * Manipulate the forwarding caches. These live
1339 * in a sort of kernel/user symbiosis.
1344 case MRT_ADD_MFC_PROXY:
1345 case MRT_DEL_MFC_PROXY:
1346 if (optlen != sizeof(mfc))
1348 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1351 parent = mfc.mfcc_parent;
1353 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1354 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1356 ret = ipmr_mfc_add(net, mrt, &mfc,
1357 sk == rtnl_dereference(mrt->mroute_sk),
1362 * Control PIM assert.
1367 if (optlen != sizeof(v))
1369 if (get_user(v, (int __user *)optval))
1371 mrt->mroute_do_assert = v;
1374 #ifdef CONFIG_IP_PIMSM
1379 if (optlen != sizeof(v))
1381 if (get_user(v, (int __user *)optval))
1387 if (v != mrt->mroute_do_pim) {
1388 mrt->mroute_do_pim = v;
1389 mrt->mroute_do_assert = v;
1395 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1400 if (optlen != sizeof(u32))
1402 if (get_user(v, (u32 __user *)optval))
1405 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1406 if (v != RT_TABLE_DEFAULT && v >= 1000000000)
1411 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1414 if (!ipmr_new_table(net, v))
1417 raw_sk(sk)->ipmr_table = v;
1424 * Spurious command, or MRT_VERSION which you cannot
1428 return -ENOPROTOOPT;
1433 * Getsock opt support for the multicast routing system.
1436 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1440 struct net *net = sock_net(sk);
1441 struct mr_table *mrt;
1443 if (sk->sk_type != SOCK_RAW ||
1444 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1447 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1451 if (optname != MRT_VERSION &&
1452 #ifdef CONFIG_IP_PIMSM
1453 optname != MRT_PIM &&
1455 optname != MRT_ASSERT)
1456 return -ENOPROTOOPT;
1458 if (get_user(olr, optlen))
1461 olr = min_t(unsigned int, olr, sizeof(int));
1465 if (put_user(olr, optlen))
1467 if (optname == MRT_VERSION)
1469 #ifdef CONFIG_IP_PIMSM
1470 else if (optname == MRT_PIM)
1471 val = mrt->mroute_do_pim;
1474 val = mrt->mroute_do_assert;
1475 if (copy_to_user(optval, &val, olr))
1481 * The IP multicast ioctl support routines.
1484 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1486 struct sioc_sg_req sr;
1487 struct sioc_vif_req vr;
1488 struct vif_device *vif;
1489 struct mfc_cache *c;
1490 struct net *net = sock_net(sk);
1491 struct mr_table *mrt;
1493 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1499 if (copy_from_user(&vr, arg, sizeof(vr)))
1501 if (vr.vifi >= mrt->maxvif)
1503 read_lock(&mrt_lock);
1504 vif = &mrt->vif_table[vr.vifi];
1505 if (VIF_EXISTS(mrt, vr.vifi)) {
1506 vr.icount = vif->pkt_in;
1507 vr.ocount = vif->pkt_out;
1508 vr.ibytes = vif->bytes_in;
1509 vr.obytes = vif->bytes_out;
1510 read_unlock(&mrt_lock);
1512 if (copy_to_user(arg, &vr, sizeof(vr)))
1516 read_unlock(&mrt_lock);
1517 return -EADDRNOTAVAIL;
1519 if (copy_from_user(&sr, arg, sizeof(sr)))
1523 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1525 sr.pktcnt = c->mfc_un.res.pkt;
1526 sr.bytecnt = c->mfc_un.res.bytes;
1527 sr.wrong_if = c->mfc_un.res.wrong_if;
1530 if (copy_to_user(arg, &sr, sizeof(sr)))
1535 return -EADDRNOTAVAIL;
1537 return -ENOIOCTLCMD;
1541 #ifdef CONFIG_COMPAT
1542 struct compat_sioc_sg_req {
1545 compat_ulong_t pktcnt;
1546 compat_ulong_t bytecnt;
1547 compat_ulong_t wrong_if;
1550 struct compat_sioc_vif_req {
1551 vifi_t vifi; /* Which iface */
1552 compat_ulong_t icount;
1553 compat_ulong_t ocount;
1554 compat_ulong_t ibytes;
1555 compat_ulong_t obytes;
1558 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1560 struct compat_sioc_sg_req sr;
1561 struct compat_sioc_vif_req vr;
1562 struct vif_device *vif;
1563 struct mfc_cache *c;
1564 struct net *net = sock_net(sk);
1565 struct mr_table *mrt;
1567 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1573 if (copy_from_user(&vr, arg, sizeof(vr)))
1575 if (vr.vifi >= mrt->maxvif)
1577 read_lock(&mrt_lock);
1578 vif = &mrt->vif_table[vr.vifi];
1579 if (VIF_EXISTS(mrt, vr.vifi)) {
1580 vr.icount = vif->pkt_in;
1581 vr.ocount = vif->pkt_out;
1582 vr.ibytes = vif->bytes_in;
1583 vr.obytes = vif->bytes_out;
1584 read_unlock(&mrt_lock);
1586 if (copy_to_user(arg, &vr, sizeof(vr)))
1590 read_unlock(&mrt_lock);
1591 return -EADDRNOTAVAIL;
1593 if (copy_from_user(&sr, arg, sizeof(sr)))
1597 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1599 sr.pktcnt = c->mfc_un.res.pkt;
1600 sr.bytecnt = c->mfc_un.res.bytes;
1601 sr.wrong_if = c->mfc_un.res.wrong_if;
1604 if (copy_to_user(arg, &sr, sizeof(sr)))
1609 return -EADDRNOTAVAIL;
1611 return -ENOIOCTLCMD;
1617 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1619 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1620 struct net *net = dev_net(dev);
1621 struct mr_table *mrt;
1622 struct vif_device *v;
1625 if (event != NETDEV_UNREGISTER)
1628 ipmr_for_each_table(mrt, net) {
1629 v = &mrt->vif_table[0];
1630 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1632 vif_delete(mrt, ct, 1, NULL);
1639 static struct notifier_block ip_mr_notifier = {
1640 .notifier_call = ipmr_device_event,
1644 * Encapsulate a packet by attaching a valid IPIP header to it.
1645 * This avoids tunnel drivers and other mess and gives us the speed so
1646 * important for multicast video.
1649 static void ip_encap(struct net *net, struct sk_buff *skb,
1650 __be32 saddr, __be32 daddr)
1653 const struct iphdr *old_iph = ip_hdr(skb);
1655 skb_push(skb, sizeof(struct iphdr));
1656 skb->transport_header = skb->network_header;
1657 skb_reset_network_header(skb);
1661 iph->tos = old_iph->tos;
1662 iph->ttl = old_iph->ttl;
1666 iph->protocol = IPPROTO_IPIP;
1668 iph->tot_len = htons(skb->len);
1669 ip_select_ident(net, skb, NULL);
1672 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1676 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1677 struct sk_buff *skb)
1679 struct ip_options *opt = &(IPCB(skb)->opt);
1681 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1682 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1684 if (unlikely(opt->optlen))
1685 ip_forward_options(skb);
1687 return dst_output(net, sk, skb);
1691 * Processing handlers for ipmr_forward
1694 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1695 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1697 const struct iphdr *iph = ip_hdr(skb);
1698 struct vif_device *vif = &mrt->vif_table[vifi];
1699 struct net_device *dev;
1707 #ifdef CONFIG_IP_PIMSM
1708 if (vif->flags & VIFF_REGISTER) {
1710 vif->bytes_out += skb->len;
1711 vif->dev->stats.tx_bytes += skb->len;
1712 vif->dev->stats.tx_packets++;
1713 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1718 if (vif->flags & VIFF_TUNNEL) {
1719 rt = ip_route_output_ports(net, &fl4, NULL,
1720 vif->remote, vif->local,
1723 RT_TOS(iph->tos), vif->link);
1726 encap = sizeof(struct iphdr);
1728 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1731 RT_TOS(iph->tos), vif->link);
1738 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1739 /* Do not fragment multicasts. Alas, IPv4 does not
1740 * allow to send ICMP, so that packets will disappear
1744 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1749 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1751 if (skb_cow(skb, encap)) {
1757 vif->bytes_out += skb->len;
1760 skb_dst_set(skb, &rt->dst);
1761 ip_decrease_ttl(ip_hdr(skb));
1763 /* FIXME: forward and output firewalls used to be called here.
1764 * What do we do with netfilter? -- RR
1766 if (vif->flags & VIFF_TUNNEL) {
1767 ip_encap(net, skb, vif->local, vif->remote);
1768 /* FIXME: extra output firewall step used to be here. --RR */
1769 vif->dev->stats.tx_packets++;
1770 vif->dev->stats.tx_bytes += skb->len;
1773 IPCB(skb)->flags |= IPSKB_FORWARDED;
1776 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1777 * not only before forwarding, but after forwarding on all output
1778 * interfaces. It is clear, if mrouter runs a multicasting
1779 * program, it should receive packets not depending to what interface
1780 * program is joined.
1781 * If we will not make it, the program will have to join on all
1782 * interfaces. On the other hand, multihoming host (or router, but
1783 * not mrouter) cannot join to more than one interface - it will
1784 * result in receiving multiple packets.
1786 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1787 net, NULL, skb, skb->dev, dev,
1788 ipmr_forward_finish);
1795 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1799 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1800 if (mrt->vif_table[ct].dev == dev)
1806 /* "local" means that we should preserve one skb (for local delivery) */
1808 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1809 struct sk_buff *skb, struct mfc_cache *cache,
1814 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1816 vif = cache->mfc_parent;
1817 cache->mfc_un.res.pkt++;
1818 cache->mfc_un.res.bytes += skb->len;
1820 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1821 struct mfc_cache *cache_proxy;
1823 /* For an (*,G) entry, we only check that the incomming
1824 * interface is part of the static tree.
1826 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1828 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1833 * Wrong interface: drop packet and (maybe) send PIM assert.
1835 if (mrt->vif_table[vif].dev != skb->dev) {
1836 if (rt_is_output_route(skb_rtable(skb))) {
1837 /* It is our own packet, looped back.
1838 * Very complicated situation...
1840 * The best workaround until routing daemons will be
1841 * fixed is not to redistribute packet, if it was
1842 * send through wrong interface. It means, that
1843 * multicast applications WILL NOT work for
1844 * (S,G), which have default multicast route pointing
1845 * to wrong oif. In any case, it is not a good
1846 * idea to use multicasting applications on router.
1851 cache->mfc_un.res.wrong_if++;
1853 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1854 /* pimsm uses asserts, when switching from RPT to SPT,
1855 * so that we cannot check that packet arrived on an oif.
1856 * It is bad, but otherwise we would need to move pretty
1857 * large chunk of pimd to kernel. Ough... --ANK
1859 (mrt->mroute_do_pim ||
1860 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1862 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1863 cache->mfc_un.res.last_assert = jiffies;
1864 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1870 mrt->vif_table[vif].pkt_in++;
1871 mrt->vif_table[vif].bytes_in += skb->len;
1876 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1877 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1878 if (true_vifi >= 0 &&
1879 true_vifi != cache->mfc_parent &&
1881 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1882 /* It's an (*,*) entry and the packet is not coming from
1883 * the upstream: forward the packet to the upstream
1886 psend = cache->mfc_parent;
1891 for (ct = cache->mfc_un.res.maxvif - 1;
1892 ct >= cache->mfc_un.res.minvif; ct--) {
1893 /* For (*,G) entry, don't forward to the incoming interface */
1894 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1896 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1898 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1901 ipmr_queue_xmit(net, mrt, skb2, cache,
1910 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1913 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1915 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1925 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1927 struct rtable *rt = skb_rtable(skb);
1928 struct iphdr *iph = ip_hdr(skb);
1929 struct flowi4 fl4 = {
1930 .daddr = iph->daddr,
1931 .saddr = iph->saddr,
1932 .flowi4_tos = RT_TOS(iph->tos),
1933 .flowi4_oif = (rt_is_output_route(rt) ?
1934 skb->dev->ifindex : 0),
1935 .flowi4_iif = (rt_is_output_route(rt) ?
1938 .flowi4_mark = skb->mark,
1940 struct mr_table *mrt;
1943 err = ipmr_fib_lookup(net, &fl4, &mrt);
1945 return ERR_PTR(err);
1950 * Multicast packets for forwarding arrive here
1951 * Called with rcu_read_lock();
1954 int ip_mr_input(struct sk_buff *skb)
1956 struct mfc_cache *cache;
1957 struct net *net = dev_net(skb->dev);
1958 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1959 struct mr_table *mrt;
1961 /* Packet is looped back after forward, it should not be
1962 * forwarded second time, but still can be delivered locally.
1964 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1967 mrt = ipmr_rt_fib_lookup(net, skb);
1970 return PTR_ERR(mrt);
1973 if (IPCB(skb)->opt.router_alert) {
1974 if (ip_call_ra_chain(skb))
1976 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1977 /* IGMPv1 (and broken IGMPv2 implementations sort of
1978 * Cisco IOS <= 11.2(8)) do not put router alert
1979 * option to IGMP packets destined to routable
1980 * groups. It is very bad, because it means
1981 * that we can forward NO IGMP messages.
1983 struct sock *mroute_sk;
1985 mroute_sk = rcu_dereference(mrt->mroute_sk);
1988 raw_rcv(mroute_sk, skb);
1994 /* already under rcu_read_lock() */
1995 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1997 int vif = ipmr_find_vif(mrt, skb->dev);
2000 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2005 * No usable cache entry
2011 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2012 ip_local_deliver(skb);
2018 read_lock(&mrt_lock);
2019 vif = ipmr_find_vif(mrt, skb->dev);
2021 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2022 read_unlock(&mrt_lock);
2026 read_unlock(&mrt_lock);
2031 read_lock(&mrt_lock);
2032 ip_mr_forward(net, mrt, skb, cache, local);
2033 read_unlock(&mrt_lock);
2036 return ip_local_deliver(skb);
2042 return ip_local_deliver(skb);
2047 #ifdef CONFIG_IP_PIMSM
2048 /* called with rcu_read_lock() */
2049 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
2050 unsigned int pimlen)
2052 struct net_device *reg_dev = NULL;
2053 struct iphdr *encap;
2055 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
2058 * a. packet is really sent to a multicast group
2059 * b. packet is not a NULL-REGISTER
2060 * c. packet is not truncated
2062 if (!ipv4_is_multicast(encap->daddr) ||
2063 encap->tot_len == 0 ||
2064 ntohs(encap->tot_len) + pimlen > skb->len)
2067 read_lock(&mrt_lock);
2068 if (mrt->mroute_reg_vif_num >= 0)
2069 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
2070 read_unlock(&mrt_lock);
2075 skb->mac_header = skb->network_header;
2076 skb_pull(skb, (u8 *)encap - skb->data);
2077 skb_reset_network_header(skb);
2078 skb->protocol = htons(ETH_P_IP);
2079 skb->ip_summed = CHECKSUM_NONE;
2081 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
2085 return NET_RX_SUCCESS;
2089 #ifdef CONFIG_IP_PIMSM_V1
2091 * Handle IGMP messages of PIMv1
2094 int pim_rcv_v1(struct sk_buff *skb)
2096 struct igmphdr *pim;
2097 struct net *net = dev_net(skb->dev);
2098 struct mr_table *mrt;
2100 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2103 pim = igmp_hdr(skb);
2105 mrt = ipmr_rt_fib_lookup(net, skb);
2108 if (!mrt->mroute_do_pim ||
2109 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2112 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2120 #ifdef CONFIG_IP_PIMSM_V2
2121 static int pim_rcv(struct sk_buff *skb)
2123 struct pimreghdr *pim;
2124 struct net *net = dev_net(skb->dev);
2125 struct mr_table *mrt;
2127 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2130 pim = (struct pimreghdr *)skb_transport_header(skb);
2131 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2132 (pim->flags & PIM_NULL_REGISTER) ||
2133 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2134 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2137 mrt = ipmr_rt_fib_lookup(net, skb);
2140 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2148 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2149 struct mfc_cache *c, struct rtmsg *rtm)
2152 struct rtnexthop *nhp;
2153 struct nlattr *mp_attr;
2154 struct rta_mfc_stats mfcs;
2156 /* If cache is unresolved, don't try to parse IIF and OIF */
2157 if (c->mfc_parent >= MAXVIFS)
2160 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2161 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2164 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2167 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2168 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2169 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2170 nla_nest_cancel(skb, mp_attr);
2174 nhp->rtnh_flags = 0;
2175 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2176 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2177 nhp->rtnh_len = sizeof(*nhp);
2181 nla_nest_end(skb, mp_attr);
2183 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2184 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2185 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2186 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2189 rtm->rtm_type = RTN_MULTICAST;
2193 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2194 __be32 saddr, __be32 daddr,
2195 struct rtmsg *rtm, int nowait, u32 portid)
2197 struct mfc_cache *cache;
2198 struct mr_table *mrt;
2201 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2206 cache = ipmr_cache_find(mrt, saddr, daddr);
2207 if (!cache && skb->dev) {
2208 int vif = ipmr_find_vif(mrt, skb->dev);
2211 cache = ipmr_cache_find_any(mrt, daddr, vif);
2214 struct sk_buff *skb2;
2216 struct net_device *dev;
2225 read_lock(&mrt_lock);
2227 vif = ipmr_find_vif(mrt, dev);
2229 read_unlock(&mrt_lock);
2233 skb2 = skb_clone(skb, GFP_ATOMIC);
2235 read_unlock(&mrt_lock);
2240 NETLINK_CB(skb2).portid = portid;
2241 skb_push(skb2, sizeof(struct iphdr));
2242 skb_reset_network_header(skb2);
2244 iph->ihl = sizeof(struct iphdr) >> 2;
2248 err = ipmr_cache_unresolved(mrt, vif, skb2);
2249 read_unlock(&mrt_lock);
2254 read_lock(&mrt_lock);
2255 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2256 cache->mfc_flags |= MFC_NOTIFY;
2257 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2258 read_unlock(&mrt_lock);
2263 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2264 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2267 struct nlmsghdr *nlh;
2271 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2275 rtm = nlmsg_data(nlh);
2276 rtm->rtm_family = RTNL_FAMILY_IPMR;
2277 rtm->rtm_dst_len = 32;
2278 rtm->rtm_src_len = 32;
2280 rtm->rtm_table = mrt->id;
2281 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2282 goto nla_put_failure;
2283 rtm->rtm_type = RTN_MULTICAST;
2284 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2285 if (c->mfc_flags & MFC_STATIC)
2286 rtm->rtm_protocol = RTPROT_STATIC;
2288 rtm->rtm_protocol = RTPROT_MROUTED;
2291 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2292 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2293 goto nla_put_failure;
2294 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2295 /* do not break the dump if cache is unresolved */
2296 if (err < 0 && err != -ENOENT)
2297 goto nla_put_failure;
2299 nlmsg_end(skb, nlh);
2303 nlmsg_cancel(skb, nlh);
2307 static size_t mroute_msgsize(bool unresolved, int maxvif)
2310 NLMSG_ALIGN(sizeof(struct rtmsg))
2311 + nla_total_size(4) /* RTA_TABLE */
2312 + nla_total_size(4) /* RTA_SRC */
2313 + nla_total_size(4) /* RTA_DST */
2318 + nla_total_size(4) /* RTA_IIF */
2319 + nla_total_size(0) /* RTA_MULTIPATH */
2320 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2322 + nla_total_size(sizeof(struct rta_mfc_stats))
2328 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2331 struct net *net = read_pnet(&mrt->net);
2332 struct sk_buff *skb;
2335 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2340 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2344 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2350 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2353 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2355 struct net *net = sock_net(skb->sk);
2356 struct mr_table *mrt;
2357 struct mfc_cache *mfc;
2358 unsigned int t = 0, s_t;
2359 unsigned int h = 0, s_h;
2360 unsigned int e = 0, s_e;
2367 ipmr_for_each_table(mrt, net) {
2372 for (h = s_h; h < MFC_LINES; h++) {
2373 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2376 if (ipmr_fill_mroute(mrt, skb,
2377 NETLINK_CB(cb->skb).portid,
2387 spin_lock_bh(&mfc_unres_lock);
2388 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2391 if (ipmr_fill_mroute(mrt, skb,
2392 NETLINK_CB(cb->skb).portid,
2396 spin_unlock_bh(&mfc_unres_lock);
2402 spin_unlock_bh(&mfc_unres_lock);
2418 #ifdef CONFIG_PROC_FS
2420 * The /proc interfaces to multicast routing :
2421 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2423 struct ipmr_vif_iter {
2424 struct seq_net_private p;
2425 struct mr_table *mrt;
2429 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2430 struct ipmr_vif_iter *iter,
2433 struct mr_table *mrt = iter->mrt;
2435 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2436 if (!VIF_EXISTS(mrt, iter->ct))
2439 return &mrt->vif_table[iter->ct];
2444 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2445 __acquires(mrt_lock)
2447 struct ipmr_vif_iter *iter = seq->private;
2448 struct net *net = seq_file_net(seq);
2449 struct mr_table *mrt;
2451 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2453 return ERR_PTR(-ENOENT);
2457 read_lock(&mrt_lock);
2458 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2462 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2464 struct ipmr_vif_iter *iter = seq->private;
2465 struct net *net = seq_file_net(seq);
2466 struct mr_table *mrt = iter->mrt;
2469 if (v == SEQ_START_TOKEN)
2470 return ipmr_vif_seq_idx(net, iter, 0);
2472 while (++iter->ct < mrt->maxvif) {
2473 if (!VIF_EXISTS(mrt, iter->ct))
2475 return &mrt->vif_table[iter->ct];
2480 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2481 __releases(mrt_lock)
2483 read_unlock(&mrt_lock);
2486 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2488 struct ipmr_vif_iter *iter = seq->private;
2489 struct mr_table *mrt = iter->mrt;
2491 if (v == SEQ_START_TOKEN) {
2493 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2495 const struct vif_device *vif = v;
2496 const char *name = vif->dev ? vif->dev->name : "none";
2499 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2500 vif - mrt->vif_table,
2501 name, vif->bytes_in, vif->pkt_in,
2502 vif->bytes_out, vif->pkt_out,
2503 vif->flags, vif->local, vif->remote);
2508 static const struct seq_operations ipmr_vif_seq_ops = {
2509 .start = ipmr_vif_seq_start,
2510 .next = ipmr_vif_seq_next,
2511 .stop = ipmr_vif_seq_stop,
2512 .show = ipmr_vif_seq_show,
2515 static int ipmr_vif_open(struct inode *inode, struct file *file)
2517 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2518 sizeof(struct ipmr_vif_iter));
2521 static const struct file_operations ipmr_vif_fops = {
2522 .owner = THIS_MODULE,
2523 .open = ipmr_vif_open,
2525 .llseek = seq_lseek,
2526 .release = seq_release_net,
2529 struct ipmr_mfc_iter {
2530 struct seq_net_private p;
2531 struct mr_table *mrt;
2532 struct list_head *cache;
2537 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2538 struct ipmr_mfc_iter *it, loff_t pos)
2540 struct mr_table *mrt = it->mrt;
2541 struct mfc_cache *mfc;
2544 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2545 it->cache = &mrt->mfc_cache_array[it->ct];
2546 list_for_each_entry_rcu(mfc, it->cache, list)
2552 spin_lock_bh(&mfc_unres_lock);
2553 it->cache = &mrt->mfc_unres_queue;
2554 list_for_each_entry(mfc, it->cache, list)
2557 spin_unlock_bh(&mfc_unres_lock);
2564 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2566 struct ipmr_mfc_iter *it = seq->private;
2567 struct net *net = seq_file_net(seq);
2568 struct mr_table *mrt;
2570 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2572 return ERR_PTR(-ENOENT);
2577 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2581 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2583 struct mfc_cache *mfc = v;
2584 struct ipmr_mfc_iter *it = seq->private;
2585 struct net *net = seq_file_net(seq);
2586 struct mr_table *mrt = it->mrt;
2590 if (v == SEQ_START_TOKEN)
2591 return ipmr_mfc_seq_idx(net, seq->private, 0);
2593 if (mfc->list.next != it->cache)
2594 return list_entry(mfc->list.next, struct mfc_cache, list);
2596 if (it->cache == &mrt->mfc_unres_queue)
2599 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2601 while (++it->ct < MFC_LINES) {
2602 it->cache = &mrt->mfc_cache_array[it->ct];
2603 if (list_empty(it->cache))
2605 return list_first_entry(it->cache, struct mfc_cache, list);
2608 /* exhausted cache_array, show unresolved */
2610 it->cache = &mrt->mfc_unres_queue;
2613 spin_lock_bh(&mfc_unres_lock);
2614 if (!list_empty(it->cache))
2615 return list_first_entry(it->cache, struct mfc_cache, list);
2618 spin_unlock_bh(&mfc_unres_lock);
2624 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2626 struct ipmr_mfc_iter *it = seq->private;
2627 struct mr_table *mrt = it->mrt;
2629 if (it->cache == &mrt->mfc_unres_queue)
2630 spin_unlock_bh(&mfc_unres_lock);
2631 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2635 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2639 if (v == SEQ_START_TOKEN) {
2641 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2643 const struct mfc_cache *mfc = v;
2644 const struct ipmr_mfc_iter *it = seq->private;
2645 const struct mr_table *mrt = it->mrt;
2647 seq_printf(seq, "%08X %08X %-3hd",
2648 (__force u32) mfc->mfc_mcastgrp,
2649 (__force u32) mfc->mfc_origin,
2652 if (it->cache != &mrt->mfc_unres_queue) {
2653 seq_printf(seq, " %8lu %8lu %8lu",
2654 mfc->mfc_un.res.pkt,
2655 mfc->mfc_un.res.bytes,
2656 mfc->mfc_un.res.wrong_if);
2657 for (n = mfc->mfc_un.res.minvif;
2658 n < mfc->mfc_un.res.maxvif; n++) {
2659 if (VIF_EXISTS(mrt, n) &&
2660 mfc->mfc_un.res.ttls[n] < 255)
2663 n, mfc->mfc_un.res.ttls[n]);
2666 /* unresolved mfc_caches don't contain
2667 * pkt, bytes and wrong_if values
2669 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2671 seq_putc(seq, '\n');
2676 static const struct seq_operations ipmr_mfc_seq_ops = {
2677 .start = ipmr_mfc_seq_start,
2678 .next = ipmr_mfc_seq_next,
2679 .stop = ipmr_mfc_seq_stop,
2680 .show = ipmr_mfc_seq_show,
2683 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2685 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2686 sizeof(struct ipmr_mfc_iter));
2689 static const struct file_operations ipmr_mfc_fops = {
2690 .owner = THIS_MODULE,
2691 .open = ipmr_mfc_open,
2693 .llseek = seq_lseek,
2694 .release = seq_release_net,
2698 #ifdef CONFIG_IP_PIMSM_V2
2699 static const struct net_protocol pim_protocol = {
2707 * Setup for IP multicast routing
2709 static int __net_init ipmr_net_init(struct net *net)
2713 err = ipmr_rules_init(net);
2717 #ifdef CONFIG_PROC_FS
2719 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2721 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2722 goto proc_cache_fail;
2726 #ifdef CONFIG_PROC_FS
2728 remove_proc_entry("ip_mr_vif", net->proc_net);
2730 ipmr_rules_exit(net);
2736 static void __net_exit ipmr_net_exit(struct net *net)
2738 #ifdef CONFIG_PROC_FS
2739 remove_proc_entry("ip_mr_cache", net->proc_net);
2740 remove_proc_entry("ip_mr_vif", net->proc_net);
2742 ipmr_rules_exit(net);
2745 static struct pernet_operations ipmr_net_ops = {
2746 .init = ipmr_net_init,
2747 .exit = ipmr_net_exit,
2750 int __init ip_mr_init(void)
2754 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2755 sizeof(struct mfc_cache),
2756 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2761 err = register_pernet_subsys(&ipmr_net_ops);
2763 goto reg_pernet_fail;
2765 err = register_netdevice_notifier(&ip_mr_notifier);
2767 goto reg_notif_fail;
2768 #ifdef CONFIG_IP_PIMSM_V2
2769 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2770 pr_err("%s: can't add PIM protocol\n", __func__);
2772 goto add_proto_fail;
2775 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2776 NULL, ipmr_rtm_dumproute, NULL);
2779 #ifdef CONFIG_IP_PIMSM_V2
2781 unregister_netdevice_notifier(&ip_mr_notifier);
2784 unregister_pernet_subsys(&ipmr_net_ops);
2786 kmem_cache_destroy(mrt_cachep);