SUNRPC: More optimisations of svc_xprt_enqueue()
authorTrond Myklebust <trond.myklebust@primarydata.com>
Sun, 3 Aug 2014 17:03:12 +0000 (13:03 -0400)
committerJ. Bruce Fields <bfields@redhat.com>
Sun, 17 Aug 2014 16:00:11 +0000 (12:00 -0400)
Just move the transport locking out of the spin lock protected area
altogether.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
net/sunrpc/svc_xprt.c

index 5eb6f32df3e5467d015d64e14065b8c0b9615c56..c0db66d81e344da69c2a80c903f14a5a231e5eaf 100644 (file)
@@ -346,18 +346,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
        if (!svc_xprt_has_something_to_do(xprt))
                return;
 
-       cpu = get_cpu();
-       pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
-       spin_lock_bh(&pool->sp_lock);
-
-       if (!list_empty(&pool->sp_threads) &&
-           !list_empty(&pool->sp_sockets))
-               printk(KERN_ERR
-                      "svc_xprt_enqueue: "
-                      "threads and transports both waiting??\n");
-
-       pool->sp_stats.packets++;
-
        /* Mark transport as busy. It will remain in this state until
         * the provider calls svc_xprt_received. We update XPT_BUSY
         * atomically because it also guards against trying to enqueue
@@ -366,9 +354,15 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
        if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
                /* Don't enqueue transport while already enqueued */
                dprintk("svc: transport %p busy, not enqueued\n", xprt);
-               goto out_unlock;
+               return;
        }
 
+       cpu = get_cpu();
+       pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
+       spin_lock_bh(&pool->sp_lock);
+
+       pool->sp_stats.packets++;
+
        if (!list_empty(&pool->sp_threads)) {
                rqstp = list_entry(pool->sp_threads.next,
                                   struct svc_rqst,
@@ -395,7 +389,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
                pool->sp_stats.sockets_queued++;
        }
 
-out_unlock:
        spin_unlock_bh(&pool->sp_lock);
        put_cpu();
 }