ipv6: Replace spinlock with seqlock and rcu in ip6_tunnel
authorMartin KaFai Lau <kafai@fb.com>
Tue, 15 Sep 2015 21:30:09 +0000 (14:30 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 Sep 2015 21:53:05 +0000 (14:53 -0700)
This patch uses a seqlock to ensure consistency between idst->dst and
idst->cookie.  It also makes dst freeing from fib tree to undergo a
rcu grace period.

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/ip6_tunnel.h
net/ipv6/ip6_fib.c
net/ipv6/ip6_tunnel.c

index 60b4f402f78c38c83d220f5188fa9d9b1f48adfc..65c2a9397b3c7ea1119092d46b7ff75e398bbad8 100644 (file)
@@ -33,8 +33,8 @@ struct __ip6_tnl_parm {
 };
 
 struct ip6_tnl_dst {
-       spinlock_t lock;
-       struct dst_entry *dst;
+       seqlock_t lock;
+       struct dst_entry __rcu *dst;
        u32 cookie;
 };
 
index e68350bf838b1276d571e6ee121aa343c3e024c4..8a9ec01f4d016a9af476959e8256fa459e025e86 100644 (file)
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
        kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void rt6_rcu_free(struct rt6_info *rt)
+{
+       call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+}
+
 static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
 {
        int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
                pcpu_rt = *ppcpu_rt;
                if (pcpu_rt) {
-                       dst_free(&pcpu_rt->dst);
+                       rt6_rcu_free(pcpu_rt);
                        *ppcpu_rt = NULL;
                }
        }
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
                rt6_free_pcpu(rt);
-               dst_free(&rt->dst);
+               rt6_rcu_free(rt);
        }
 }
 
index 851cf6d1eb45a76cfa9f6c04255ba0344c76ebb7..983f0d20f96d74167c2fdcd74cf04858166e9ce0 100644 (file)
@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-static void __ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
-                                     struct dst_entry *dst)
+static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
+                                   struct dst_entry *dst)
 {
-       dst_release(idst->dst);
+       write_seqlock_bh(&idst->lock);
+       dst_release(rcu_dereference_protected(
+                           idst->dst,
+                           lockdep_is_held(&idst->lock.lock)));
        if (dst) {
                dst_hold(dst);
                idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
        } else {
                idst->cookie = 0;
        }
-       idst->dst = dst;
-}
-
-static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
-                                   struct dst_entry *dst)
-{
-
-       spin_lock_bh(&idst->lock);
-       __ip6_tnl_per_cpu_dst_set(idst, dst);
-       spin_unlock_bh(&idst->lock);
+       rcu_assign_pointer(idst->dst, dst);
+       write_sequnlock_bh(&idst->lock);
 }
 
 struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
 {
        struct ip6_tnl_dst *idst;
        struct dst_entry *dst;
+       unsigned int seq;
+       u32 cookie;
 
        idst = raw_cpu_ptr(t->dst_cache);
-       spin_lock_bh(&idst->lock);
-       dst = idst->dst;
-       if (dst) {
-               if (!dst->obsolete || dst->ops->check(dst, idst->cookie)) {
-                       dst_hold(idst->dst);
-               } else {
-                       __ip6_tnl_per_cpu_dst_set(idst, NULL);
-                       dst = NULL;
-               }
+
+       rcu_read_lock();
+       do {
+               seq = read_seqbegin(&idst->lock);
+               dst = rcu_dereference(idst->dst);
+               cookie = idst->cookie;
+       } while (read_seqretry(&idst->lock, seq));
+
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
+       rcu_read_unlock();
+
+       if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
+               ip6_tnl_per_cpu_dst_set(idst, NULL);
+               dst_release(dst);
+               dst = NULL;
        }
-       spin_unlock_bh(&idst->lock);
        return dst;
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t)
                return -ENOMEM;
 
        for_each_possible_cpu(i)
-               spin_lock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
+               seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
 
        return 0;
 }