Preliminary work to make IPv4 multicast routing netns-aware.
Make IPv4 multicast routing mroute_socket per-namespace,
moves it into struct netns_ipv4.
At the moment, mroute_socket is only referenced in init_net.
Signed-off-by: Benjamin Thery <benjamin.thery@bull.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct timer_list rt_secret_timer;
atomic_t rt_genid;
struct timer_list rt_secret_timer;
atomic_t rt_genid;
+
+#ifdef CONFIG_IP_MROUTE
+ struct sock *mroute_sk;
+#endif
#define CONFIG_IP_PIMSM 1
#endif
#define CONFIG_IP_PIMSM 1
#endif
-static struct sock *mroute_socket;
-
-
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
skb->transport_header = skb->network_header;
}
skb->transport_header = skb->network_header;
}
- if (mroute_socket == NULL) {
+ if (init_net.ipv4.mroute_sk == NULL) {
kfree_skb(skb);
return -EINVAL;
}
kfree_skb(skb);
return -EINVAL;
}
/*
* Deliver to mrouted
*/
/*
* Deliver to mrouted
*/
- if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
+ ret = sock_queue_rcv_skb(init_net.ipv4.mroute_sk, skb);
+ if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
kfree_skb(skb);
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
kfree_skb(skb);
static void mrtsock_destruct(struct sock *sk)
{
rtnl_lock();
static void mrtsock_destruct(struct sock *sk)
{
rtnl_lock();
- if (sk == mroute_socket) {
+ if (sk == init_net.ipv4.mroute_sk) {
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
write_lock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
write_lock_bh(&mrt_lock);
+ init_net.ipv4.mroute_sk = NULL;
write_unlock_bh(&mrt_lock);
mroute_clean_tables(sk);
write_unlock_bh(&mrt_lock);
mroute_clean_tables(sk);
struct mfcctl mfc;
if (optname != MRT_INIT) {
struct mfcctl mfc;
if (optname != MRT_INIT) {
- if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
+ if (sk != init_net.ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
return -ENOPROTOOPT;
rtnl_lock();
return -ENOPROTOOPT;
rtnl_lock();
+ if (init_net.ipv4.mroute_sk) {
rtnl_unlock();
return -EADDRINUSE;
}
rtnl_unlock();
return -EADDRINUSE;
}
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
write_lock_bh(&mrt_lock);
+ init_net.ipv4.mroute_sk = sk;
write_unlock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
write_unlock_bh(&mrt_lock);
IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
rtnl_unlock();
return ret;
case MRT_DONE:
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != mroute_socket)
+ if (sk != init_net.ipv4.mroute_sk)
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(&vif, sk==mroute_socket);
+ ret = vif_add(&vif, sk == init_net.ipv4.mroute_sk);
} else {
ret = vif_delete(vif.vifc_vifi, 0);
}
} else {
ret = vif_delete(vif.vifc_vifi, 0);
}
if (optname == MRT_DEL_MFC)
ret = ipmr_mfc_delete(&mfc);
else
if (optname == MRT_DEL_MFC)
ret = ipmr_mfc_delete(&mfc);
else
- ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
+ ret = ipmr_mfc_add(&mfc, sk == init_net.ipv4.mroute_sk);
rtnl_unlock();
return ret;
/*
rtnl_unlock();
return ret;
/*
that we can forward NO IGMP messages.
*/
read_lock(&mrt_lock);
that we can forward NO IGMP messages.
*/
read_lock(&mrt_lock);
+ if (init_net.ipv4.mroute_sk) {
- raw_rcv(mroute_socket, skb);
+ raw_rcv(init_net.ipv4.mroute_sk, skb);
read_unlock(&mrt_lock);
return 0;
}
read_unlock(&mrt_lock);
return 0;
}