svcrdma: Use standard Linux lists for context cache
authorTom Tucker <tom@opengridcomputing.com>
Thu, 1 May 2008 01:44:39 +0000 (20:44 -0500)
committerTom Tucker <tom@opengridcomputing.com>
Mon, 19 May 2008 12:33:52 +0000 (07:33 -0500)
Replace the one-off linked list implementation used to implement the
context cache with the standard Linux list_head lists. Add a context
counter to catch resource leaks. A WARN_ON will be added later to
ensure that we've freed all contexts.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_transport.c

index c447c417b37bac805095cea90809d9f020ffdbf6..701439064d2134db35ea4bc730786cb04616a536 100644 (file)
@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
  */
 struct svc_rdma_op_ctxt {
        struct svc_rdma_op_ctxt *read_hdr;
-       struct svc_rdma_op_ctxt *next;
+       struct list_head free_list;
        struct xdr_buf arg;
        struct list_head dto_q;
        enum ib_wr_opcode wr_op;
@@ -104,7 +104,8 @@ struct svcxprt_rdma {
 
        struct ib_pd         *sc_pd;
 
-       struct svc_rdma_op_ctxt  *sc_ctxt_head;
+       atomic_t             sc_ctxt_used;
+       struct list_head     sc_ctxt_free;
        int                  sc_ctxt_cnt;
        int                  sc_ctxt_bump;
        int                  sc_ctxt_max;
index 34141eaf25a051024e49e980b411a1db16b13bae..817cf4de746c5f8d9a9e7d66a98040a4b129bb4a 100644 (file)
@@ -103,8 +103,8 @@ static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
                spin_lock_bh(&xprt->sc_ctxt_lock);
                if (ctxt) {
                        at_least_one = 1;
-                       ctxt->next = xprt->sc_ctxt_head;
-                       xprt->sc_ctxt_head = ctxt;
+                       INIT_LIST_HEAD(&ctxt->free_list);
+                       list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
                } else {
                        /* kmalloc failed...give up for now */
                        xprt->sc_ctxt_cnt--;
@@ -123,7 +123,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
 
        while (1) {
                spin_lock_bh(&xprt->sc_ctxt_lock);
-               if (unlikely(xprt->sc_ctxt_head == NULL)) {
+               if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
                        /* Try to bump my cache. */
                        spin_unlock_bh(&xprt->sc_ctxt_lock);
 
@@ -136,12 +136,15 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
                        schedule_timeout_uninterruptible(msecs_to_jiffies(500));
                        continue;
                }
-               ctxt = xprt->sc_ctxt_head;
-               xprt->sc_ctxt_head = ctxt->next;
+               ctxt = list_entry(xprt->sc_ctxt_free.next,
+                                 struct svc_rdma_op_ctxt,
+                                 free_list);
+               list_del_init(&ctxt->free_list);
                spin_unlock_bh(&xprt->sc_ctxt_lock);
                ctxt->xprt = xprt;
                INIT_LIST_HEAD(&ctxt->dto_q);
                ctxt->count = 0;
+               atomic_inc(&xprt->sc_ctxt_used);
                break;
        }
        return ctxt;
@@ -163,10 +166,11 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
                                 ctxt->sge[i].addr,
                                 ctxt->sge[i].length,
                                 ctxt->direction);
+
        spin_lock_bh(&xprt->sc_ctxt_lock);
-       ctxt->next = xprt->sc_ctxt_head;
-       xprt->sc_ctxt_head = ctxt;
+       list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
        spin_unlock_bh(&xprt->sc_ctxt_lock);
+       atomic_dec(&xprt->sc_ctxt_used);
 }
 
 /* ib_cq event handler */
@@ -412,28 +416,29 @@ static void create_context_cache(struct svcxprt_rdma *xprt,
        xprt->sc_ctxt_max = ctxt_max;
        xprt->sc_ctxt_bump = ctxt_bump;
        xprt->sc_ctxt_cnt = 0;
-       xprt->sc_ctxt_head = NULL;
+       atomic_set(&xprt->sc_ctxt_used, 0);
+
+       INIT_LIST_HEAD(&xprt->sc_ctxt_free);
        for (i = 0; i < ctxt_count; i++) {
                ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
                if (ctxt) {
-                       ctxt->next = xprt->sc_ctxt_head;
-                       xprt->sc_ctxt_head = ctxt;
+                       INIT_LIST_HEAD(&ctxt->free_list);
+                       list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
                        xprt->sc_ctxt_cnt++;
                }
        }
 }
 
-static void destroy_context_cache(struct svc_rdma_op_ctxt *ctxt)
+static void destroy_context_cache(struct svcxprt_rdma *xprt)
 {
-       struct svc_rdma_op_ctxt *next;
-       if (!ctxt)
-               return;
-
-       do {
-               next = ctxt->next;
+       while (!list_empty(&xprt->sc_ctxt_free)) {
+               struct svc_rdma_op_ctxt *ctxt;
+               ctxt = list_entry(xprt->sc_ctxt_free.next,
+                                 struct svc_rdma_op_ctxt,
+                                 free_list);
+               list_del_init(&ctxt->free_list);
                kfree(ctxt);
-               ctxt = next;
-       } while (next);
+       }
 }
 
 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
@@ -470,7 +475,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
                                     reqs +
                                     cma_xprt->sc_sq_depth +
                                     RPCRDMA_MAX_THREADS + 1); /* max */
-               if (!cma_xprt->sc_ctxt_head) {
+               if (list_empty(&cma_xprt->sc_ctxt_free)) {
                        kfree(cma_xprt);
                        return NULL;
                }
@@ -976,7 +981,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
        if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
                ib_dealloc_pd(rdma->sc_pd);
 
-       destroy_context_cache(rdma->sc_ctxt_head);
+       destroy_context_cache(rdma);
        kfree(rdma);
 }