2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
75 struct list_head list;
80 struct sock __rcu *mroute_sk;
81 struct timer_list ipmr_expire_timer;
82 struct list_head mfc_unres_queue;
83 struct list_head mfc_cache_array[MFC_LINES];
84 struct vif_device vif_table[MAXVIFS];
86 atomic_t cache_resolve_queue_len;
87 bool mroute_do_assert;
89 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
90 int mroute_reg_vif_num;
95 struct fib_rule common;
102 /* Big lock, protecting vif table, mrt cache and mroute socket state.
103 * Note that the changes are semaphored via rtnl_lock.
106 static DEFINE_RWLOCK(mrt_lock);
109 * Multicast router control variables
112 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
114 /* Special spinlock for queue of unresolved entries */
115 static DEFINE_SPINLOCK(mfc_unres_lock);
117 /* We return to original Alan's scheme. Hash table of resolved
118 * entries is changed only in process context and protected
119 * with weak lock mrt_lock. Queue of unresolved entries is protected
120 * with strong spinlock mfc_unres_lock.
122 * In this case data path is free of exclusive locks at all.
125 static struct kmem_cache *mrt_cachep __read_mostly;
127 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
128 static void ipmr_free_table(struct mr_table *mrt);
130 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
131 struct sk_buff *skb, struct mfc_cache *cache,
133 static int ipmr_cache_report(struct mr_table *mrt,
134 struct sk_buff *pkt, vifi_t vifi, int assert);
135 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
136 struct mfc_cache *c, struct rtmsg *rtm);
137 static void mroute_clean_tables(struct mr_table *mrt);
138 static void ipmr_expire_process(unsigned long arg);
140 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
141 #define ipmr_for_each_table(mrt, net) \
142 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
144 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
146 struct mr_table *mrt;
148 ipmr_for_each_table(mrt, net) {
155 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
156 struct mr_table **mrt)
158 struct ipmr_result res;
159 struct fib_lookup_arg arg = { .result = &res, };
162 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
163 flowi4_to_flowi(flp4), 0, &arg);
170 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
171 int flags, struct fib_lookup_arg *arg)
173 struct ipmr_result *res = arg->result;
174 struct mr_table *mrt;
176 switch (rule->action) {
179 case FR_ACT_UNREACHABLE:
181 case FR_ACT_PROHIBIT:
183 case FR_ACT_BLACKHOLE:
188 mrt = ipmr_get_table(rule->fr_net, rule->table);
195 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
200 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
204 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
205 struct fib_rule_hdr *frh, struct nlattr **tb)
210 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
216 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
217 struct fib_rule_hdr *frh)
225 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
226 .family = RTNL_FAMILY_IPMR,
227 .rule_size = sizeof(struct ipmr_rule),
228 .addr_size = sizeof(u32),
229 .action = ipmr_rule_action,
230 .match = ipmr_rule_match,
231 .configure = ipmr_rule_configure,
232 .compare = ipmr_rule_compare,
233 .default_pref = fib_default_rule_pref,
234 .fill = ipmr_rule_fill,
235 .nlgroup = RTNLGRP_IPV4_RULE,
236 .policy = ipmr_rule_policy,
237 .owner = THIS_MODULE,
240 static int __net_init ipmr_rules_init(struct net *net)
242 struct fib_rules_ops *ops;
243 struct mr_table *mrt;
246 ops = fib_rules_register(&ipmr_rules_ops_template, net);
250 INIT_LIST_HEAD(&net->ipv4.mr_tables);
252 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
258 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
262 net->ipv4.mr_rules_ops = ops;
268 fib_rules_unregister(ops);
272 static void __net_exit ipmr_rules_exit(struct net *net)
274 struct mr_table *mrt, *next;
276 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
277 list_del(&mrt->list);
278 ipmr_free_table(mrt);
280 fib_rules_unregister(net->ipv4.mr_rules_ops);
283 #define ipmr_for_each_table(mrt, net) \
284 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
286 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
288 return net->ipv4.mrt;
291 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
292 struct mr_table **mrt)
294 *mrt = net->ipv4.mrt;
298 static int __net_init ipmr_rules_init(struct net *net)
300 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
301 return net->ipv4.mrt ? 0 : -ENOMEM;
304 static void __net_exit ipmr_rules_exit(struct net *net)
306 ipmr_free_table(net->ipv4.mrt);
310 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
312 struct mr_table *mrt;
315 mrt = ipmr_get_table(net, id);
319 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
322 write_pnet(&mrt->net, net);
325 /* Forwarding cache */
326 for (i = 0; i < MFC_LINES; i++)
327 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
329 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
331 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
334 #ifdef CONFIG_IP_PIMSM
335 mrt->mroute_reg_vif_num = -1;
337 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
338 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
343 static void ipmr_free_table(struct mr_table *mrt)
345 del_timer_sync(&mrt->ipmr_expire_timer);
346 mroute_clean_tables(mrt);
350 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
352 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
354 struct net *net = dev_net(dev);
358 dev = __dev_get_by_name(net, "tunl0");
360 const struct net_device_ops *ops = dev->netdev_ops;
362 struct ip_tunnel_parm p;
364 memset(&p, 0, sizeof(p));
365 p.iph.daddr = v->vifc_rmt_addr.s_addr;
366 p.iph.saddr = v->vifc_lcl_addr.s_addr;
369 p.iph.protocol = IPPROTO_IPIP;
370 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
371 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
373 if (ops->ndo_do_ioctl) {
374 mm_segment_t oldfs = get_fs();
377 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
384 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
386 struct net_device *dev;
388 dev = __dev_get_by_name(net, "tunl0");
391 const struct net_device_ops *ops = dev->netdev_ops;
394 struct ip_tunnel_parm p;
395 struct in_device *in_dev;
397 memset(&p, 0, sizeof(p));
398 p.iph.daddr = v->vifc_rmt_addr.s_addr;
399 p.iph.saddr = v->vifc_lcl_addr.s_addr;
402 p.iph.protocol = IPPROTO_IPIP;
403 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
404 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
406 if (ops->ndo_do_ioctl) {
407 mm_segment_t oldfs = get_fs();
410 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
418 (dev = __dev_get_by_name(net, p.name)) != NULL) {
419 dev->flags |= IFF_MULTICAST;
421 in_dev = __in_dev_get_rtnl(dev);
425 ipv4_devconf_setall(in_dev);
426 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
436 /* allow the register to be completed before unregistering. */
440 unregister_netdevice(dev);
444 #ifdef CONFIG_IP_PIMSM
446 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
448 struct net *net = dev_net(dev);
449 struct mr_table *mrt;
450 struct flowi4 fl4 = {
451 .flowi4_oif = dev->ifindex,
452 .flowi4_iif = skb->skb_iif,
453 .flowi4_mark = skb->mark,
457 err = ipmr_fib_lookup(net, &fl4, &mrt);
463 read_lock(&mrt_lock);
464 dev->stats.tx_bytes += skb->len;
465 dev->stats.tx_packets++;
466 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
467 read_unlock(&mrt_lock);
472 static const struct net_device_ops reg_vif_netdev_ops = {
473 .ndo_start_xmit = reg_vif_xmit,
476 static void reg_vif_setup(struct net_device *dev)
478 dev->type = ARPHRD_PIMREG;
479 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
480 dev->flags = IFF_NOARP;
481 dev->netdev_ops = ®_vif_netdev_ops,
482 dev->destructor = free_netdev;
483 dev->features |= NETIF_F_NETNS_LOCAL;
486 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
488 struct net_device *dev;
489 struct in_device *in_dev;
492 if (mrt->id == RT_TABLE_DEFAULT)
493 sprintf(name, "pimreg");
495 sprintf(name, "pimreg%u", mrt->id);
497 dev = alloc_netdev(0, name, reg_vif_setup);
502 dev_net_set(dev, net);
504 if (register_netdevice(dev)) {
511 in_dev = __in_dev_get_rcu(dev);
517 ipv4_devconf_setall(in_dev);
518 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
529 /* allow the register to be completed before unregistering. */
533 unregister_netdevice(dev);
539 * vif_delete - Delete a VIF entry
540 * @notify: Set to 1, if the caller is a notifier_call
543 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
544 struct list_head *head)
546 struct vif_device *v;
547 struct net_device *dev;
548 struct in_device *in_dev;
550 if (vifi < 0 || vifi >= mrt->maxvif)
551 return -EADDRNOTAVAIL;
553 v = &mrt->vif_table[vifi];
555 write_lock_bh(&mrt_lock);
560 write_unlock_bh(&mrt_lock);
561 return -EADDRNOTAVAIL;
564 #ifdef CONFIG_IP_PIMSM
565 if (vifi == mrt->mroute_reg_vif_num)
566 mrt->mroute_reg_vif_num = -1;
569 if (vifi + 1 == mrt->maxvif) {
572 for (tmp = vifi - 1; tmp >= 0; tmp--) {
573 if (VIF_EXISTS(mrt, tmp))
579 write_unlock_bh(&mrt_lock);
581 dev_set_allmulti(dev, -1);
583 in_dev = __in_dev_get_rtnl(dev);
585 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
586 inet_netconf_notify_devconf(dev_net(dev),
587 NETCONFA_MC_FORWARDING,
588 dev->ifindex, &in_dev->cnf);
589 ip_rt_multicast_event(in_dev);
592 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
593 unregister_netdevice_queue(dev, head);
599 static void ipmr_cache_free_rcu(struct rcu_head *head)
601 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
603 kmem_cache_free(mrt_cachep, c);
606 static inline void ipmr_cache_free(struct mfc_cache *c)
608 call_rcu(&c->rcu, ipmr_cache_free_rcu);
611 /* Destroy an unresolved cache entry, killing queued skbs
612 * and reporting error to netlink readers.
615 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
617 struct net *net = read_pnet(&mrt->net);
621 atomic_dec(&mrt->cache_resolve_queue_len);
623 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
624 if (ip_hdr(skb)->version == 0) {
625 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
626 nlh->nlmsg_type = NLMSG_ERROR;
627 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
628 skb_trim(skb, nlh->nlmsg_len);
630 e->error = -ETIMEDOUT;
631 memset(&e->msg, 0, sizeof(e->msg));
633 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
643 /* Timer process for the unresolved queue. */
645 static void ipmr_expire_process(unsigned long arg)
647 struct mr_table *mrt = (struct mr_table *)arg;
649 unsigned long expires;
650 struct mfc_cache *c, *next;
652 if (!spin_trylock(&mfc_unres_lock)) {
653 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
657 if (list_empty(&mrt->mfc_unres_queue))
663 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
664 if (time_after(c->mfc_un.unres.expires, now)) {
665 unsigned long interval = c->mfc_un.unres.expires - now;
666 if (interval < expires)
672 ipmr_destroy_unres(mrt, c);
675 if (!list_empty(&mrt->mfc_unres_queue))
676 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
679 spin_unlock(&mfc_unres_lock);
682 /* Fill oifs list. It is called under write locked mrt_lock. */
684 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
689 cache->mfc_un.res.minvif = MAXVIFS;
690 cache->mfc_un.res.maxvif = 0;
691 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
693 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
694 if (VIF_EXISTS(mrt, vifi) &&
695 ttls[vifi] && ttls[vifi] < 255) {
696 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
697 if (cache->mfc_un.res.minvif > vifi)
698 cache->mfc_un.res.minvif = vifi;
699 if (cache->mfc_un.res.maxvif <= vifi)
700 cache->mfc_un.res.maxvif = vifi + 1;
705 static int vif_add(struct net *net, struct mr_table *mrt,
706 struct vifctl *vifc, int mrtsock)
708 int vifi = vifc->vifc_vifi;
709 struct vif_device *v = &mrt->vif_table[vifi];
710 struct net_device *dev;
711 struct in_device *in_dev;
715 if (VIF_EXISTS(mrt, vifi))
718 switch (vifc->vifc_flags) {
719 #ifdef CONFIG_IP_PIMSM
722 * Special Purpose VIF in PIM
723 * All the packets will be sent to the daemon
725 if (mrt->mroute_reg_vif_num >= 0)
727 dev = ipmr_reg_vif(net, mrt);
730 err = dev_set_allmulti(dev, 1);
732 unregister_netdevice(dev);
739 dev = ipmr_new_tunnel(net, vifc);
742 err = dev_set_allmulti(dev, 1);
744 ipmr_del_tunnel(dev, vifc);
750 case VIFF_USE_IFINDEX:
752 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
753 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
754 if (dev && __in_dev_get_rtnl(dev) == NULL) {
756 return -EADDRNOTAVAIL;
759 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
762 return -EADDRNOTAVAIL;
763 err = dev_set_allmulti(dev, 1);
773 in_dev = __in_dev_get_rtnl(dev);
776 return -EADDRNOTAVAIL;
778 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
779 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
781 ip_rt_multicast_event(in_dev);
783 /* Fill in the VIF structures */
785 v->rate_limit = vifc->vifc_rate_limit;
786 v->local = vifc->vifc_lcl_addr.s_addr;
787 v->remote = vifc->vifc_rmt_addr.s_addr;
788 v->flags = vifc->vifc_flags;
790 v->flags |= VIFF_STATIC;
791 v->threshold = vifc->vifc_threshold;
796 v->link = dev->ifindex;
797 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
798 v->link = dev->iflink;
800 /* And finish update writing critical data */
801 write_lock_bh(&mrt_lock);
803 #ifdef CONFIG_IP_PIMSM
804 if (v->flags & VIFF_REGISTER)
805 mrt->mroute_reg_vif_num = vifi;
807 if (vifi+1 > mrt->maxvif)
808 mrt->maxvif = vifi+1;
809 write_unlock_bh(&mrt_lock);
813 /* called with rcu_read_lock() */
814 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
818 int line = MFC_HASH(mcastgrp, origin);
821 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
822 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
829 * Allocate a multicast cache entry
831 static struct mfc_cache *ipmr_cache_alloc(void)
833 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
836 c->mfc_un.res.minvif = MAXVIFS;
840 static struct mfc_cache *ipmr_cache_alloc_unres(void)
842 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
845 skb_queue_head_init(&c->mfc_un.unres.unresolved);
846 c->mfc_un.unres.expires = jiffies + 10*HZ;
852 * A cache entry has gone into a resolved state from queued
855 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
856 struct mfc_cache *uc, struct mfc_cache *c)
861 /* Play the pending entries through our router */
863 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
864 if (ip_hdr(skb)->version == 0) {
865 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
867 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
868 nlh->nlmsg_len = skb_tail_pointer(skb) -
871 nlh->nlmsg_type = NLMSG_ERROR;
872 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
873 skb_trim(skb, nlh->nlmsg_len);
875 e->error = -EMSGSIZE;
876 memset(&e->msg, 0, sizeof(e->msg));
879 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
881 ip_mr_forward(net, mrt, skb, c, 0);
887 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
888 * expects the following bizarre scheme.
890 * Called under mrt_lock.
893 static int ipmr_cache_report(struct mr_table *mrt,
894 struct sk_buff *pkt, vifi_t vifi, int assert)
897 const int ihl = ip_hdrlen(pkt);
898 struct igmphdr *igmp;
900 struct sock *mroute_sk;
903 #ifdef CONFIG_IP_PIMSM
904 if (assert == IGMPMSG_WHOLEPKT)
905 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
908 skb = alloc_skb(128, GFP_ATOMIC);
913 #ifdef CONFIG_IP_PIMSM
914 if (assert == IGMPMSG_WHOLEPKT) {
915 /* Ugly, but we have no choice with this interface.
916 * Duplicate old header, fix ihl, length etc.
917 * And all this only to mangle msg->im_msgtype and
918 * to set msg->im_mbz to "mbz" :-)
920 skb_push(skb, sizeof(struct iphdr));
921 skb_reset_network_header(skb);
922 skb_reset_transport_header(skb);
923 msg = (struct igmpmsg *)skb_network_header(skb);
924 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
925 msg->im_msgtype = IGMPMSG_WHOLEPKT;
927 msg->im_vif = mrt->mroute_reg_vif_num;
928 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
929 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
930 sizeof(struct iphdr));
935 /* Copy the IP header */
937 skb->network_header = skb->tail;
939 skb_copy_to_linear_data(skb, pkt->data, ihl);
940 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
941 msg = (struct igmpmsg *)skb_network_header(skb);
943 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
947 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
949 msg->im_msgtype = assert;
951 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
952 skb->transport_header = skb->network_header;
956 mroute_sk = rcu_dereference(mrt->mroute_sk);
957 if (mroute_sk == NULL) {
963 /* Deliver to mrouted */
965 ret = sock_queue_rcv_skb(mroute_sk, skb);
968 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
976 * Queue a packet for resolution. It gets locked cache entry!
980 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
985 const struct iphdr *iph = ip_hdr(skb);
987 spin_lock_bh(&mfc_unres_lock);
988 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
989 if (c->mfc_mcastgrp == iph->daddr &&
990 c->mfc_origin == iph->saddr) {
997 /* Create a new entry if allowable */
999 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1000 (c = ipmr_cache_alloc_unres()) == NULL) {
1001 spin_unlock_bh(&mfc_unres_lock);
1007 /* Fill in the new cache entry */
1010 c->mfc_origin = iph->saddr;
1011 c->mfc_mcastgrp = iph->daddr;
1013 /* Reflect first query at mrouted. */
1015 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1017 /* If the report failed throw the cache entry
1020 spin_unlock_bh(&mfc_unres_lock);
1027 atomic_inc(&mrt->cache_resolve_queue_len);
1028 list_add(&c->list, &mrt->mfc_unres_queue);
1030 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1031 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1034 /* See if we can append the packet */
1036 if (c->mfc_un.unres.unresolved.qlen > 3) {
1040 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1044 spin_unlock_bh(&mfc_unres_lock);
1049 * MFC cache manipulation by user space mroute daemon
1052 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1055 struct mfc_cache *c, *next;
1057 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1059 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1060 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1061 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1062 list_del_rcu(&c->list);
1071 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1072 struct mfcctl *mfc, int mrtsock)
1076 struct mfc_cache *uc, *c;
1078 if (mfc->mfcc_parent >= MAXVIFS)
1081 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1083 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1084 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1085 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1092 write_lock_bh(&mrt_lock);
1093 c->mfc_parent = mfc->mfcc_parent;
1094 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1096 c->mfc_flags |= MFC_STATIC;
1097 write_unlock_bh(&mrt_lock);
1101 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1104 c = ipmr_cache_alloc();
1108 c->mfc_origin = mfc->mfcc_origin.s_addr;
1109 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1110 c->mfc_parent = mfc->mfcc_parent;
1111 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1113 c->mfc_flags |= MFC_STATIC;
1115 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1118 * Check to see if we resolved a queued list. If so we
1119 * need to send on the frames and tidy up.
1122 spin_lock_bh(&mfc_unres_lock);
1123 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1124 if (uc->mfc_origin == c->mfc_origin &&
1125 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1126 list_del(&uc->list);
1127 atomic_dec(&mrt->cache_resolve_queue_len);
1132 if (list_empty(&mrt->mfc_unres_queue))
1133 del_timer(&mrt->ipmr_expire_timer);
1134 spin_unlock_bh(&mfc_unres_lock);
1137 ipmr_cache_resolve(net, mrt, uc, c);
1138 ipmr_cache_free(uc);
1144 * Close the multicast socket, and clear the vif tables etc
1147 static void mroute_clean_tables(struct mr_table *mrt)
1151 struct mfc_cache *c, *next;
1153 /* Shut down all active vif entries */
1155 for (i = 0; i < mrt->maxvif; i++) {
1156 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1157 vif_delete(mrt, i, 0, &list);
1159 unregister_netdevice_many(&list);
1161 /* Wipe the cache */
1163 for (i = 0; i < MFC_LINES; i++) {
1164 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1165 if (c->mfc_flags & MFC_STATIC)
1167 list_del_rcu(&c->list);
1172 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1173 spin_lock_bh(&mfc_unres_lock);
1174 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1176 ipmr_destroy_unres(mrt, c);
1178 spin_unlock_bh(&mfc_unres_lock);
1182 /* called from ip_ra_control(), before an RCU grace period,
1183 * we dont need to call synchronize_rcu() here
1185 static void mrtsock_destruct(struct sock *sk)
1187 struct net *net = sock_net(sk);
1188 struct mr_table *mrt;
1191 ipmr_for_each_table(mrt, net) {
1192 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1193 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1194 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1195 NETCONFA_IFINDEX_ALL,
1196 net->ipv4.devconf_all);
1197 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1198 mroute_clean_tables(mrt);
1205 * Socket options and virtual interface manipulation. The whole
1206 * virtual interface system is a complete heap, but unfortunately
1207 * that's how BSD mrouted happens to think. Maybe one day with a proper
1208 * MOSPF/PIM router set up we can clean this up.
1211 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1216 struct net *net = sock_net(sk);
1217 struct mr_table *mrt;
1219 if (sk->sk_type != SOCK_RAW ||
1220 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1223 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1227 if (optname != MRT_INIT) {
1228 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1229 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1235 if (optlen != sizeof(int))
1239 if (rtnl_dereference(mrt->mroute_sk)) {
1244 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1246 rcu_assign_pointer(mrt->mroute_sk, sk);
1247 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1248 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1249 NETCONFA_IFINDEX_ALL,
1250 net->ipv4.devconf_all);
1255 if (sk != rcu_access_pointer(mrt->mroute_sk))
1257 return ip_ra_control(sk, 0, NULL);
1260 if (optlen != sizeof(vif))
1262 if (copy_from_user(&vif, optval, sizeof(vif)))
1264 if (vif.vifc_vifi >= MAXVIFS)
1267 if (optname == MRT_ADD_VIF) {
1268 ret = vif_add(net, mrt, &vif,
1269 sk == rtnl_dereference(mrt->mroute_sk));
1271 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1277 * Manipulate the forwarding caches. These live
1278 * in a sort of kernel/user symbiosis.
1282 if (optlen != sizeof(mfc))
1284 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1287 if (optname == MRT_DEL_MFC)
1288 ret = ipmr_mfc_delete(mrt, &mfc);
1290 ret = ipmr_mfc_add(net, mrt, &mfc,
1291 sk == rtnl_dereference(mrt->mroute_sk));
1295 * Control PIM assert.
1300 if (optlen != sizeof(v))
1302 if (get_user(v, (int __user *)optval))
1304 mrt->mroute_do_assert = v;
1307 #ifdef CONFIG_IP_PIMSM
1312 if (optlen != sizeof(v))
1314 if (get_user(v, (int __user *)optval))
1320 if (v != mrt->mroute_do_pim) {
1321 mrt->mroute_do_pim = v;
1322 mrt->mroute_do_assert = v;
1328 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1333 if (optlen != sizeof(u32))
1335 if (get_user(v, (u32 __user *)optval))
1338 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1339 if (v != RT_TABLE_DEFAULT && v >= 1000000000)
1344 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1347 if (!ipmr_new_table(net, v))
1350 raw_sk(sk)->ipmr_table = v;
1357 * Spurious command, or MRT_VERSION which you cannot
1361 return -ENOPROTOOPT;
1366 * Getsock opt support for the multicast routing system.
1369 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1373 struct net *net = sock_net(sk);
1374 struct mr_table *mrt;
1376 if (sk->sk_type != SOCK_RAW ||
1377 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1380 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1384 if (optname != MRT_VERSION &&
1385 #ifdef CONFIG_IP_PIMSM
1386 optname != MRT_PIM &&
1388 optname != MRT_ASSERT)
1389 return -ENOPROTOOPT;
1391 if (get_user(olr, optlen))
1394 olr = min_t(unsigned int, olr, sizeof(int));
1398 if (put_user(olr, optlen))
1400 if (optname == MRT_VERSION)
1402 #ifdef CONFIG_IP_PIMSM
1403 else if (optname == MRT_PIM)
1404 val = mrt->mroute_do_pim;
1407 val = mrt->mroute_do_assert;
1408 if (copy_to_user(optval, &val, olr))
1414 * The IP multicast ioctl support routines.
1417 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1419 struct sioc_sg_req sr;
1420 struct sioc_vif_req vr;
1421 struct vif_device *vif;
1422 struct mfc_cache *c;
1423 struct net *net = sock_net(sk);
1424 struct mr_table *mrt;
1426 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1432 if (copy_from_user(&vr, arg, sizeof(vr)))
1434 if (vr.vifi >= mrt->maxvif)
1436 read_lock(&mrt_lock);
1437 vif = &mrt->vif_table[vr.vifi];
1438 if (VIF_EXISTS(mrt, vr.vifi)) {
1439 vr.icount = vif->pkt_in;
1440 vr.ocount = vif->pkt_out;
1441 vr.ibytes = vif->bytes_in;
1442 vr.obytes = vif->bytes_out;
1443 read_unlock(&mrt_lock);
1445 if (copy_to_user(arg, &vr, sizeof(vr)))
1449 read_unlock(&mrt_lock);
1450 return -EADDRNOTAVAIL;
1452 if (copy_from_user(&sr, arg, sizeof(sr)))
1456 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1458 sr.pktcnt = c->mfc_un.res.pkt;
1459 sr.bytecnt = c->mfc_un.res.bytes;
1460 sr.wrong_if = c->mfc_un.res.wrong_if;
1463 if (copy_to_user(arg, &sr, sizeof(sr)))
1468 return -EADDRNOTAVAIL;
1470 return -ENOIOCTLCMD;
1474 #ifdef CONFIG_COMPAT
1475 struct compat_sioc_sg_req {
1478 compat_ulong_t pktcnt;
1479 compat_ulong_t bytecnt;
1480 compat_ulong_t wrong_if;
1483 struct compat_sioc_vif_req {
1484 vifi_t vifi; /* Which iface */
1485 compat_ulong_t icount;
1486 compat_ulong_t ocount;
1487 compat_ulong_t ibytes;
1488 compat_ulong_t obytes;
1491 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1493 struct compat_sioc_sg_req sr;
1494 struct compat_sioc_vif_req vr;
1495 struct vif_device *vif;
1496 struct mfc_cache *c;
1497 struct net *net = sock_net(sk);
1498 struct mr_table *mrt;
1500 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1506 if (copy_from_user(&vr, arg, sizeof(vr)))
1508 if (vr.vifi >= mrt->maxvif)
1510 read_lock(&mrt_lock);
1511 vif = &mrt->vif_table[vr.vifi];
1512 if (VIF_EXISTS(mrt, vr.vifi)) {
1513 vr.icount = vif->pkt_in;
1514 vr.ocount = vif->pkt_out;
1515 vr.ibytes = vif->bytes_in;
1516 vr.obytes = vif->bytes_out;
1517 read_unlock(&mrt_lock);
1519 if (copy_to_user(arg, &vr, sizeof(vr)))
1523 read_unlock(&mrt_lock);
1524 return -EADDRNOTAVAIL;
1526 if (copy_from_user(&sr, arg, sizeof(sr)))
1530 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1532 sr.pktcnt = c->mfc_un.res.pkt;
1533 sr.bytecnt = c->mfc_un.res.bytes;
1534 sr.wrong_if = c->mfc_un.res.wrong_if;
1537 if (copy_to_user(arg, &sr, sizeof(sr)))
1542 return -EADDRNOTAVAIL;
1544 return -ENOIOCTLCMD;
1550 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1552 struct net_device *dev = ptr;
1553 struct net *net = dev_net(dev);
1554 struct mr_table *mrt;
1555 struct vif_device *v;
1558 if (event != NETDEV_UNREGISTER)
1561 ipmr_for_each_table(mrt, net) {
1562 v = &mrt->vif_table[0];
1563 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1565 vif_delete(mrt, ct, 1, NULL);
1572 static struct notifier_block ip_mr_notifier = {
1573 .notifier_call = ipmr_device_event,
1577 * Encapsulate a packet by attaching a valid IPIP header to it.
1578 * This avoids tunnel drivers and other mess and gives us the speed so
1579 * important for multicast video.
1582 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1585 const struct iphdr *old_iph = ip_hdr(skb);
1587 skb_push(skb, sizeof(struct iphdr));
1588 skb->transport_header = skb->network_header;
1589 skb_reset_network_header(skb);
1593 iph->tos = old_iph->tos;
1594 iph->ttl = old_iph->ttl;
1598 iph->protocol = IPPROTO_IPIP;
1600 iph->tot_len = htons(skb->len);
1601 ip_select_ident(iph, skb_dst(skb), NULL);
1604 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1608 static inline int ipmr_forward_finish(struct sk_buff *skb)
1610 struct ip_options *opt = &(IPCB(skb)->opt);
1612 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1613 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1615 if (unlikely(opt->optlen))
1616 ip_forward_options(skb);
1618 return dst_output(skb);
1622 * Processing handlers for ipmr_forward
1625 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1626 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1628 const struct iphdr *iph = ip_hdr(skb);
1629 struct vif_device *vif = &mrt->vif_table[vifi];
1630 struct net_device *dev;
1635 if (vif->dev == NULL)
1638 #ifdef CONFIG_IP_PIMSM
1639 if (vif->flags & VIFF_REGISTER) {
1641 vif->bytes_out += skb->len;
1642 vif->dev->stats.tx_bytes += skb->len;
1643 vif->dev->stats.tx_packets++;
1644 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1649 if (vif->flags & VIFF_TUNNEL) {
1650 rt = ip_route_output_ports(net, &fl4, NULL,
1651 vif->remote, vif->local,
1654 RT_TOS(iph->tos), vif->link);
1657 encap = sizeof(struct iphdr);
1659 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1662 RT_TOS(iph->tos), vif->link);
1669 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1670 /* Do not fragment multicasts. Alas, IPv4 does not
1671 * allow to send ICMP, so that packets will disappear
1675 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1680 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1682 if (skb_cow(skb, encap)) {
1688 vif->bytes_out += skb->len;
1691 skb_dst_set(skb, &rt->dst);
1692 ip_decrease_ttl(ip_hdr(skb));
1694 /* FIXME: forward and output firewalls used to be called here.
1695 * What do we do with netfilter? -- RR
1697 if (vif->flags & VIFF_TUNNEL) {
1698 ip_encap(skb, vif->local, vif->remote);
1699 /* FIXME: extra output firewall step used to be here. --RR */
1700 vif->dev->stats.tx_packets++;
1701 vif->dev->stats.tx_bytes += skb->len;
1704 IPCB(skb)->flags |= IPSKB_FORWARDED;
1707 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1708 * not only before forwarding, but after forwarding on all output
1709 * interfaces. It is clear, if mrouter runs a multicasting
1710 * program, it should receive packets not depending to what interface
1711 * program is joined.
1712 * If we will not make it, the program will have to join on all
1713 * interfaces. On the other hand, multihoming host (or router, but
1714 * not mrouter) cannot join to more than one interface - it will
1715 * result in receiving multiple packets.
1717 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
1718 ipmr_forward_finish);
1725 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1729 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1730 if (mrt->vif_table[ct].dev == dev)
1736 /* "local" means that we should preserve one skb (for local delivery) */
1738 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1739 struct sk_buff *skb, struct mfc_cache *cache,
1745 vif = cache->mfc_parent;
1746 cache->mfc_un.res.pkt++;
1747 cache->mfc_un.res.bytes += skb->len;
1750 * Wrong interface: drop packet and (maybe) send PIM assert.
1752 if (mrt->vif_table[vif].dev != skb->dev) {
1755 if (rt_is_output_route(skb_rtable(skb))) {
1756 /* It is our own packet, looped back.
1757 * Very complicated situation...
1759 * The best workaround until routing daemons will be
1760 * fixed is not to redistribute packet, if it was
1761 * send through wrong interface. It means, that
1762 * multicast applications WILL NOT work for
1763 * (S,G), which have default multicast route pointing
1764 * to wrong oif. In any case, it is not a good
1765 * idea to use multicasting applications on router.
1770 cache->mfc_un.res.wrong_if++;
1771 true_vifi = ipmr_find_vif(mrt, skb->dev);
1773 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1774 /* pimsm uses asserts, when switching from RPT to SPT,
1775 * so that we cannot check that packet arrived on an oif.
1776 * It is bad, but otherwise we would need to move pretty
1777 * large chunk of pimd to kernel. Ough... --ANK
1779 (mrt->mroute_do_pim ||
1780 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1782 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1783 cache->mfc_un.res.last_assert = jiffies;
1784 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1789 mrt->vif_table[vif].pkt_in++;
1790 mrt->vif_table[vif].bytes_in += skb->len;
1795 for (ct = cache->mfc_un.res.maxvif - 1;
1796 ct >= cache->mfc_un.res.minvif; ct--) {
1797 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1799 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1802 ipmr_queue_xmit(net, mrt, skb2, cache,
1810 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1813 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1815 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1826 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1828 struct rtable *rt = skb_rtable(skb);
1829 struct iphdr *iph = ip_hdr(skb);
1830 struct flowi4 fl4 = {
1831 .daddr = iph->daddr,
1832 .saddr = iph->saddr,
1833 .flowi4_tos = RT_TOS(iph->tos),
1834 .flowi4_oif = (rt_is_output_route(rt) ?
1835 skb->dev->ifindex : 0),
1836 .flowi4_iif = (rt_is_output_route(rt) ?
1839 .flowi4_mark = skb->mark,
1841 struct mr_table *mrt;
1844 err = ipmr_fib_lookup(net, &fl4, &mrt);
1846 return ERR_PTR(err);
1851 * Multicast packets for forwarding arrive here
1852 * Called with rcu_read_lock();
1855 int ip_mr_input(struct sk_buff *skb)
1857 struct mfc_cache *cache;
1858 struct net *net = dev_net(skb->dev);
1859 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1860 struct mr_table *mrt;
1862 /* Packet is looped back after forward, it should not be
1863 * forwarded second time, but still can be delivered locally.
1865 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1868 mrt = ipmr_rt_fib_lookup(net, skb);
1871 return PTR_ERR(mrt);
1874 if (IPCB(skb)->opt.router_alert) {
1875 if (ip_call_ra_chain(skb))
1877 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1878 /* IGMPv1 (and broken IGMPv2 implementations sort of
1879 * Cisco IOS <= 11.2(8)) do not put router alert
1880 * option to IGMP packets destined to routable
1881 * groups. It is very bad, because it means
1882 * that we can forward NO IGMP messages.
1884 struct sock *mroute_sk;
1886 mroute_sk = rcu_dereference(mrt->mroute_sk);
1889 raw_rcv(mroute_sk, skb);
1895 /* already under rcu_read_lock() */
1896 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1899 * No usable cache entry
1901 if (cache == NULL) {
1905 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1906 ip_local_deliver(skb);
1912 read_lock(&mrt_lock);
1913 vif = ipmr_find_vif(mrt, skb->dev);
1915 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1916 read_unlock(&mrt_lock);
1920 read_unlock(&mrt_lock);
1925 read_lock(&mrt_lock);
1926 ip_mr_forward(net, mrt, skb, cache, local);
1927 read_unlock(&mrt_lock);
1930 return ip_local_deliver(skb);
1936 return ip_local_deliver(skb);
1941 #ifdef CONFIG_IP_PIMSM
1942 /* called with rcu_read_lock() */
1943 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1944 unsigned int pimlen)
1946 struct net_device *reg_dev = NULL;
1947 struct iphdr *encap;
1949 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1952 * a. packet is really sent to a multicast group
1953 * b. packet is not a NULL-REGISTER
1954 * c. packet is not truncated
1956 if (!ipv4_is_multicast(encap->daddr) ||
1957 encap->tot_len == 0 ||
1958 ntohs(encap->tot_len) + pimlen > skb->len)
1961 read_lock(&mrt_lock);
1962 if (mrt->mroute_reg_vif_num >= 0)
1963 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1964 read_unlock(&mrt_lock);
1966 if (reg_dev == NULL)
1969 skb->mac_header = skb->network_header;
1970 skb_pull(skb, (u8 *)encap - skb->data);
1971 skb_reset_network_header(skb);
1972 skb->protocol = htons(ETH_P_IP);
1973 skb->ip_summed = CHECKSUM_NONE;
1974 skb->pkt_type = PACKET_HOST;
1976 skb_tunnel_rx(skb, reg_dev);
1980 return NET_RX_SUCCESS;
1984 #ifdef CONFIG_IP_PIMSM_V1
1986 * Handle IGMP messages of PIMv1
1989 int pim_rcv_v1(struct sk_buff *skb)
1991 struct igmphdr *pim;
1992 struct net *net = dev_net(skb->dev);
1993 struct mr_table *mrt;
1995 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1998 pim = igmp_hdr(skb);
2000 mrt = ipmr_rt_fib_lookup(net, skb);
2003 if (!mrt->mroute_do_pim ||
2004 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2007 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2015 #ifdef CONFIG_IP_PIMSM_V2
2016 static int pim_rcv(struct sk_buff *skb)
2018 struct pimreghdr *pim;
2019 struct net *net = dev_net(skb->dev);
2020 struct mr_table *mrt;
2022 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2025 pim = (struct pimreghdr *)skb_transport_header(skb);
2026 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2027 (pim->flags & PIM_NULL_REGISTER) ||
2028 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2029 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2032 mrt = ipmr_rt_fib_lookup(net, skb);
2035 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2043 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2044 struct mfc_cache *c, struct rtmsg *rtm)
2047 struct rtnexthop *nhp;
2048 struct nlattr *mp_attr;
2049 struct rta_mfc_stats mfcs;
2051 /* If cache is unresolved, don't try to parse IIF and OIF */
2052 if (c->mfc_parent >= MAXVIFS)
2055 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2056 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2059 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2062 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2063 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2064 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2065 nla_nest_cancel(skb, mp_attr);
2069 nhp->rtnh_flags = 0;
2070 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2071 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2072 nhp->rtnh_len = sizeof(*nhp);
2076 nla_nest_end(skb, mp_attr);
2078 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2079 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2080 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2081 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2084 rtm->rtm_type = RTN_MULTICAST;
2088 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2089 __be32 saddr, __be32 daddr,
2090 struct rtmsg *rtm, int nowait)
2092 struct mfc_cache *cache;
2093 struct mr_table *mrt;
2096 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2101 cache = ipmr_cache_find(mrt, saddr, daddr);
2103 if (cache == NULL) {
2104 struct sk_buff *skb2;
2106 struct net_device *dev;
2115 read_lock(&mrt_lock);
2117 vif = ipmr_find_vif(mrt, dev);
2119 read_unlock(&mrt_lock);
2123 skb2 = skb_clone(skb, GFP_ATOMIC);
2125 read_unlock(&mrt_lock);
2130 skb_push(skb2, sizeof(struct iphdr));
2131 skb_reset_network_header(skb2);
2133 iph->ihl = sizeof(struct iphdr) >> 2;
2137 err = ipmr_cache_unresolved(mrt, vif, skb2);
2138 read_unlock(&mrt_lock);
2143 read_lock(&mrt_lock);
2144 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2145 cache->mfc_flags |= MFC_NOTIFY;
2146 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2147 read_unlock(&mrt_lock);
2152 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2153 u32 portid, u32 seq, struct mfc_cache *c)
2155 struct nlmsghdr *nlh;
2159 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2163 rtm = nlmsg_data(nlh);
2164 rtm->rtm_family = RTNL_FAMILY_IPMR;
2165 rtm->rtm_dst_len = 32;
2166 rtm->rtm_src_len = 32;
2168 rtm->rtm_table = mrt->id;
2169 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2170 goto nla_put_failure;
2171 rtm->rtm_type = RTN_MULTICAST;
2172 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2173 if (c->mfc_flags & MFC_STATIC)
2174 rtm->rtm_protocol = RTPROT_STATIC;
2176 rtm->rtm_protocol = RTPROT_MROUTED;
2179 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2180 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2181 goto nla_put_failure;
2182 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2183 /* do not break the dump if cache is unresolved */
2184 if (err < 0 && err != -ENOENT)
2185 goto nla_put_failure;
2187 return nlmsg_end(skb, nlh);
2190 nlmsg_cancel(skb, nlh);
2194 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2196 struct net *net = sock_net(skb->sk);
2197 struct mr_table *mrt;
2198 struct mfc_cache *mfc;
2199 unsigned int t = 0, s_t;
2200 unsigned int h = 0, s_h;
2201 unsigned int e = 0, s_e;
2208 ipmr_for_each_table(mrt, net) {
2213 for (h = s_h; h < MFC_LINES; h++) {
2214 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2217 if (ipmr_fill_mroute(mrt, skb,
2218 NETLINK_CB(cb->skb).portid,
2227 spin_lock_bh(&mfc_unres_lock);
2228 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2231 if (ipmr_fill_mroute(mrt, skb,
2232 NETLINK_CB(cb->skb).portid,
2235 spin_unlock_bh(&mfc_unres_lock);
2241 spin_unlock_bh(&mfc_unres_lock);
2257 #ifdef CONFIG_PROC_FS
2259 * The /proc interfaces to multicast routing :
2260 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2262 struct ipmr_vif_iter {
2263 struct seq_net_private p;
2264 struct mr_table *mrt;
2268 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2269 struct ipmr_vif_iter *iter,
2272 struct mr_table *mrt = iter->mrt;
2274 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2275 if (!VIF_EXISTS(mrt, iter->ct))
2278 return &mrt->vif_table[iter->ct];
2283 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2284 __acquires(mrt_lock)
2286 struct ipmr_vif_iter *iter = seq->private;
2287 struct net *net = seq_file_net(seq);
2288 struct mr_table *mrt;
2290 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2292 return ERR_PTR(-ENOENT);
2296 read_lock(&mrt_lock);
2297 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2301 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2303 struct ipmr_vif_iter *iter = seq->private;
2304 struct net *net = seq_file_net(seq);
2305 struct mr_table *mrt = iter->mrt;
2308 if (v == SEQ_START_TOKEN)
2309 return ipmr_vif_seq_idx(net, iter, 0);
2311 while (++iter->ct < mrt->maxvif) {
2312 if (!VIF_EXISTS(mrt, iter->ct))
2314 return &mrt->vif_table[iter->ct];
2319 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2320 __releases(mrt_lock)
2322 read_unlock(&mrt_lock);
2325 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2327 struct ipmr_vif_iter *iter = seq->private;
2328 struct mr_table *mrt = iter->mrt;
2330 if (v == SEQ_START_TOKEN) {
2332 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2334 const struct vif_device *vif = v;
2335 const char *name = vif->dev ? vif->dev->name : "none";
2338 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2339 vif - mrt->vif_table,
2340 name, vif->bytes_in, vif->pkt_in,
2341 vif->bytes_out, vif->pkt_out,
2342 vif->flags, vif->local, vif->remote);
2347 static const struct seq_operations ipmr_vif_seq_ops = {
2348 .start = ipmr_vif_seq_start,
2349 .next = ipmr_vif_seq_next,
2350 .stop = ipmr_vif_seq_stop,
2351 .show = ipmr_vif_seq_show,
2354 static int ipmr_vif_open(struct inode *inode, struct file *file)
2356 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2357 sizeof(struct ipmr_vif_iter));
2360 static const struct file_operations ipmr_vif_fops = {
2361 .owner = THIS_MODULE,
2362 .open = ipmr_vif_open,
2364 .llseek = seq_lseek,
2365 .release = seq_release_net,
2368 struct ipmr_mfc_iter {
2369 struct seq_net_private p;
2370 struct mr_table *mrt;
2371 struct list_head *cache;
2376 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2377 struct ipmr_mfc_iter *it, loff_t pos)
2379 struct mr_table *mrt = it->mrt;
2380 struct mfc_cache *mfc;
2383 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2384 it->cache = &mrt->mfc_cache_array[it->ct];
2385 list_for_each_entry_rcu(mfc, it->cache, list)
2391 spin_lock_bh(&mfc_unres_lock);
2392 it->cache = &mrt->mfc_unres_queue;
2393 list_for_each_entry(mfc, it->cache, list)
2396 spin_unlock_bh(&mfc_unres_lock);
2403 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2405 struct ipmr_mfc_iter *it = seq->private;
2406 struct net *net = seq_file_net(seq);
2407 struct mr_table *mrt;
2409 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2411 return ERR_PTR(-ENOENT);
2416 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2420 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2422 struct mfc_cache *mfc = v;
2423 struct ipmr_mfc_iter *it = seq->private;
2424 struct net *net = seq_file_net(seq);
2425 struct mr_table *mrt = it->mrt;
2429 if (v == SEQ_START_TOKEN)
2430 return ipmr_mfc_seq_idx(net, seq->private, 0);
2432 if (mfc->list.next != it->cache)
2433 return list_entry(mfc->list.next, struct mfc_cache, list);
2435 if (it->cache == &mrt->mfc_unres_queue)
2438 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2440 while (++it->ct < MFC_LINES) {
2441 it->cache = &mrt->mfc_cache_array[it->ct];
2442 if (list_empty(it->cache))
2444 return list_first_entry(it->cache, struct mfc_cache, list);
2447 /* exhausted cache_array, show unresolved */
2449 it->cache = &mrt->mfc_unres_queue;
2452 spin_lock_bh(&mfc_unres_lock);
2453 if (!list_empty(it->cache))
2454 return list_first_entry(it->cache, struct mfc_cache, list);
2457 spin_unlock_bh(&mfc_unres_lock);
2463 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2465 struct ipmr_mfc_iter *it = seq->private;
2466 struct mr_table *mrt = it->mrt;
2468 if (it->cache == &mrt->mfc_unres_queue)
2469 spin_unlock_bh(&mfc_unres_lock);
2470 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2474 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2478 if (v == SEQ_START_TOKEN) {
2480 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2482 const struct mfc_cache *mfc = v;
2483 const struct ipmr_mfc_iter *it = seq->private;
2484 const struct mr_table *mrt = it->mrt;
2486 seq_printf(seq, "%08X %08X %-3hd",
2487 (__force u32) mfc->mfc_mcastgrp,
2488 (__force u32) mfc->mfc_origin,
2491 if (it->cache != &mrt->mfc_unres_queue) {
2492 seq_printf(seq, " %8lu %8lu %8lu",
2493 mfc->mfc_un.res.pkt,
2494 mfc->mfc_un.res.bytes,
2495 mfc->mfc_un.res.wrong_if);
2496 for (n = mfc->mfc_un.res.minvif;
2497 n < mfc->mfc_un.res.maxvif; n++) {
2498 if (VIF_EXISTS(mrt, n) &&
2499 mfc->mfc_un.res.ttls[n] < 255)
2502 n, mfc->mfc_un.res.ttls[n]);
2505 /* unresolved mfc_caches don't contain
2506 * pkt, bytes and wrong_if values
2508 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2510 seq_putc(seq, '\n');
2515 static const struct seq_operations ipmr_mfc_seq_ops = {
2516 .start = ipmr_mfc_seq_start,
2517 .next = ipmr_mfc_seq_next,
2518 .stop = ipmr_mfc_seq_stop,
2519 .show = ipmr_mfc_seq_show,
2522 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2524 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2525 sizeof(struct ipmr_mfc_iter));
2528 static const struct file_operations ipmr_mfc_fops = {
2529 .owner = THIS_MODULE,
2530 .open = ipmr_mfc_open,
2532 .llseek = seq_lseek,
2533 .release = seq_release_net,
2537 #ifdef CONFIG_IP_PIMSM_V2
2538 static const struct net_protocol pim_protocol = {
2546 * Setup for IP multicast routing
2548 static int __net_init ipmr_net_init(struct net *net)
2552 err = ipmr_rules_init(net);
2556 #ifdef CONFIG_PROC_FS
2558 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2560 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2561 goto proc_cache_fail;
2565 #ifdef CONFIG_PROC_FS
2567 proc_net_remove(net, "ip_mr_vif");
2569 ipmr_rules_exit(net);
2575 static void __net_exit ipmr_net_exit(struct net *net)
2577 #ifdef CONFIG_PROC_FS
2578 proc_net_remove(net, "ip_mr_cache");
2579 proc_net_remove(net, "ip_mr_vif");
2581 ipmr_rules_exit(net);
2584 static struct pernet_operations ipmr_net_ops = {
2585 .init = ipmr_net_init,
2586 .exit = ipmr_net_exit,
2589 int __init ip_mr_init(void)
2593 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2594 sizeof(struct mfc_cache),
2595 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2600 err = register_pernet_subsys(&ipmr_net_ops);
2602 goto reg_pernet_fail;
2604 err = register_netdevice_notifier(&ip_mr_notifier);
2606 goto reg_notif_fail;
2607 #ifdef CONFIG_IP_PIMSM_V2
2608 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2609 pr_err("%s: can't add PIM protocol\n", __func__);
2611 goto add_proto_fail;
2614 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2615 NULL, ipmr_rtm_dumproute, NULL);
2618 #ifdef CONFIG_IP_PIMSM_V2
2620 unregister_netdevice_notifier(&ip_mr_notifier);
2623 unregister_pernet_subsys(&ipmr_net_ops);
2625 kmem_cache_destroy(mrt_cachep);