staging/lustre: remove assertion of spin_is_locked()
authorLi Xi <lixi@ddn.com>
Sun, 27 Apr 2014 17:07:06 +0000 (13:07 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Apr 2014 17:31:00 +0000 (10:31 -0700)
spin_is_locked() is always false when the platform is
uniprocessor and CONFIG_DEBUG_SPINLOCK is not enabled.
This patch replaces its assertion by assert_spin_locked().

Signed-off-by: Li Xi <lixi@ddn.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/8144
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4199
Reviewed-by: Alexey Lyashkov <alexey_lyashkov@xyratex.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 files changed:
drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
drivers/staging/lustre/lustre/include/lustre_dlm.h
drivers/staging/lustre/lustre/include/lustre_net.h
drivers/staging/lustre/lustre/lov/lov_merge.c
drivers/staging/lustre/lustre/obdclass/cl_lock.c
drivers/staging/lustre/lustre/obdclass/cl_object.c
drivers/staging/lustre/lustre/obdclass/cl_page.c
drivers/staging/lustre/lustre/osc/osc_cache.c
drivers/staging/lustre/lustre/osc/osc_cl_internal.h
drivers/staging/lustre/lustre/ptlrpc/client.c
drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
drivers/staging/lustre/lustre/ptlrpc/import.c
drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
drivers/staging/lustre/lustre/ptlrpc/pinger.c
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
drivers/staging/lustre/lustre/ptlrpc/service.c

index d6e00f92e4a03b44bcd3be2e8af3d55c9b558552..b75e401d9a973c883f6c4785907e210f2c226991 100644 (file)
@@ -66,7 +66,7 @@
  * - spin_unlock(x)
  * - spin_unlock_bh(x)
  * - spin_trylock(x)
- * - spin_is_locked(x)
+ * - assert_spin_locked(x)
  *
  * - spin_lock_irq(x)
  * - spin_lock_irqsave(x, f)
index e28e31a263696f84e8b57bc35f7de255aa01a75f..0c6b7841e56d09b47ba168082af285d04991fa46 100644 (file)
@@ -1445,7 +1445,7 @@ static inline void unlock_res(struct ldlm_resource *res)
 /** Check if resource is already locked, assert if not. */
 static inline void check_res_locked(struct ldlm_resource *res)
 {
-       LASSERT(spin_is_locked(&res->lr_lock));
+       assert_spin_locked(&res->lr_lock);
 }
 
 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
index 7640e179e39db47ca00120b9d75b19fafeddae56..f6b7d10cb78cf64c46652370cf803c4af0e1b6a5 100644 (file)
@@ -719,7 +719,7 @@ struct ptlrpc_nrs_pol_ops {
         *                       \a nrq
         * \param[in,out] nrq    The request
         *
-        * \pre spin_is_locked(&svcpt->scp_req_lock)
+        * \pre assert_spin_locked(&svcpt->scp_req_lock)
         *
         * \see ptlrpc_nrs_req_stop_nolock()
         */
index 0a14cee67b88b8a791eb5c1014afb2b008fe6c56..da959e90137143163fd0f7df0276dde97702f30a 100644 (file)
@@ -58,7 +58,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
        int i;
        int rc = 0;
 
-       LASSERT(spin_is_locked(&lsm->lsm_lock));
+       assert_spin_locked(&lsm->lsm_lock);
        LASSERT(lsm->lsm_lock_owner == current_pid());
 
        CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64
@@ -145,7 +145,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
        int stripe = 0;
        __u64 kms;
 
-       LASSERT(spin_is_locked(&lsm->lsm_lock));
+       assert_spin_locked(&lsm->lsm_lock);
        LASSERT(lsm->lsm_lock_owner == current_pid());
 
        if (shrink) {
index f8040a8923b27eb284a741673f4be6c2bbcd38fb..df77c4fc0eacdd2528b1542ce01931f362bedc83 100644 (file)
@@ -478,7 +478,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
        struct cl_object_header *head;
 
        head = cl_object_header(obj);
-       LINVRNT(spin_is_locked(&head->coh_lock_guard));
+       assert_spin_locked(&head->coh_lock_guard);
        CS_LOCK_INC(obj, lookup);
        list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
                int matched;
index 2837cbc8865d0b7bf81b62cd55a29648742e24a0..41cbc95b916e2d6e1806692cbd8edb1ecf234d59 100644 (file)
@@ -220,7 +220,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
        struct lu_object_header *top;
        int result;
 
-       LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+       assert_spin_locked(cl_object_attr_guard(obj));
 
        top = obj->co_lu.lo_header;
        result = 0;
@@ -251,7 +251,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
        struct lu_object_header *top;
        int result;
 
-       LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+       assert_spin_locked(cl_object_attr_guard(obj));
 
        top = obj->co_lu.lo_header;
        result = 0;
index a4b04cba0b24d8b7df432d7a5c55dec207cac3e7..1b616e4fe140b49ca1e8bb800efce30f0a8709da 100644 (file)
@@ -130,7 +130,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
 {
        struct cl_page *page;
 
-       LASSERT(spin_is_locked(&hdr->coh_page_guard));
+       assert_spin_locked(&hdr->coh_page_guard);
 
        page = radix_tree_lookup(&hdr->coh_tree, index);
        if (page != NULL)
index fe9989a49f81da900902eec0efef949ccecadf71..00f38eeb57865936a9af0877d631afeb5d2ec79d 100644 (file)
@@ -1311,7 +1311,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
 static void osc_consume_write_grant(struct client_obd *cli,
                                    struct brw_page *pga)
 {
-       LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+       assert_spin_locked(&cli->cl_loi_list_lock.lock);
        LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
        atomic_inc(&obd_dirty_pages);
        cli->cl_dirty += PAGE_CACHE_SIZE;
@@ -1326,7 +1326,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
 static void osc_release_write_grant(struct client_obd *cli,
                                    struct brw_page *pga)
 {
-       LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+       assert_spin_locked(&cli->cl_loi_list_lock.lock);
        if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
                return;
        }
index 9e7899fa4cc491c038bbc39802ada20d0969d601..e74b7bb9776cecd699f4f0b3c8dc05255874dfc8 100644 (file)
@@ -176,7 +176,16 @@ static inline void osc_object_unlock(struct osc_object *obj)
 
 static inline int osc_object_is_locked(struct osc_object *obj)
 {
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
        return spin_is_locked(&obj->oo_lock);
+#else
+       /*
+        * It is not perfect to return true all the time.
+        * But since this function is only used for assertion
+        * and checking, it seems OK.
+        */
+       return 1;
+#endif
 }
 
 /*
index 20987723bb56a2b8962143cc9136a92dea002cd6..7246e8ce9c1965d0e4daba8a753aca761fc20eab 100644 (file)
@@ -2271,7 +2271,7 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
  */
 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
 {
-       LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+       assert_spin_locked(&request->rq_import->imp_lock);
        (void)__ptlrpc_req_finished(request, 1);
 }
 EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
@@ -2452,9 +2452,7 @@ void ptlrpc_free_committed(struct obd_import *imp)
        bool                   skip_committed_list = true;
 
        LASSERT(imp != NULL);
-
-       LASSERT(spin_is_locked(&imp->imp_lock));
-
+       assert_spin_locked(&imp->imp_lock);
 
        if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
            imp->imp_generation == imp->imp_last_generation_checked) {
@@ -2585,7 +2583,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
 {
        struct list_head *tmp;
 
-       LASSERT(spin_is_locked(&imp->imp_lock));
+       assert_spin_locked(&imp->imp_lock);
 
        if (req->rq_transno == 0) {
                DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
index 7a1ff4fecbe3c3d1ffd4f9894da2ac2b0c383fc7..3be5bc14c4ed5865633133d5a6847118789f7f57 100644 (file)
@@ -137,7 +137,7 @@ void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
 static
 void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
 {
-       LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+       assert_spin_locked(&ctx->cc_sec->ps_lock);
        LASSERT(atomic_read(&ctx->cc_refcount) > 0);
        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
        LASSERT(!hlist_unhashed(&ctx->cc_cache));
@@ -719,7 +719,7 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
        __u32 idx = gmsg->gum_mechidx;
 
        LASSERT(idx < MECH_MAX);
-       LASSERT(spin_is_locked(&upcall_locks[idx]));
+       assert_spin_locked(&upcall_locks[idx]);
 
        if (list_empty(&gmsg->gum_list))
                return;
index b231452d62151bda546a2acb6299ef1d89f53e73..1c73194421a6ccb2126f39584f71ebf545b9a8d8 100644 (file)
@@ -194,7 +194,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
 /* Must be called with imp_lock held! */
 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
 {
-       LASSERT(spin_is_locked(&imp->imp_lock));
+       assert_spin_locked(&imp->imp_lock);
 
        CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
        imp->imp_invalid = 1;
index 7a422ff759bfbd0a55263aba5ae58d57dc76f38f..6b9c6db1f2df6d3048c99bf4b8ede5441eda002e 100644 (file)
@@ -449,7 +449,7 @@ void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
 {
        LASSERT(policy != NULL);
        LASSERT(info != NULL);
-       LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+       assert_spin_locked(&policy->pol_nrs->nrs_lock);
 
        memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
 
index 6dff502ce23e6fdc3764cda314a3cf346b582735..38099d9dfdaeb65f028d3d59492c0ac415c20df4 100644 (file)
@@ -368,7 +368,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
 void ptlrpc_pinger_commit_expected(struct obd_import *imp)
 {
        ptlrpc_update_next_ping(imp, 1);
-       LASSERT(spin_is_locked(&imp->imp_lock));
+       assert_spin_locked(&imp->imp_lock);
        /*
         * Avoid reading stale imp_connect_data.  When not sure if pings are
         * expected or not on next connection, we assume they are not and force
index 1d46b5e2b0bc3366080c6c1bcb3e9f94bc81419a..9d51badea73dbfa0fcde3edd4d9cf1a3e697f5d7 100644 (file)
@@ -450,7 +450,7 @@ out:
 
 static inline void enc_pools_wakeup(void)
 {
-       LASSERT(spin_is_locked(&page_pools.epp_lock));
+       assert_spin_locked(&page_pools.epp_lock);
        LASSERT(page_pools.epp_waitqlen >= 0);
 
        if (unlikely(page_pools.epp_waitqlen)) {
index 5d0eb6cb587c55e93d8733b0c557d420ec3a7e63..d278f2e218030720daa291e0799941d1dc05ca6b 100644 (file)
@@ -384,8 +384,8 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
 void
 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
 {
-       LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
-       LASSERT(spin_is_locked(&rs->rs_lock));
+       assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
+       assert_spin_locked(&rs->rs_lock);
        LASSERT(rs->rs_difficult);
        rs->rs_scheduled_ever = 1;  /* flag any notification attempt */