tipc: always use tipc_node_lock() to hold node lock
authorYing Xue <ying.xue@windriver.com>
Mon, 5 May 2014 00:56:09 +0000 (08:56 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 5 May 2014 21:26:43 +0000 (17:26 -0400)
Although we obtain node lock with tipc_node_lock() in most time, there
are still places where we directly use native spin lock interface
to grab node lock. But as we will do more jobs in the future when node
lock is released, we should ensure that tipc_node_lock() is always
called when node lock is taken.

Signed-off-by: Ying Xue <ying.xue@windriver.com>
Reviewed-by: Erik Hugne <erik.hugne@ericsson.com>
Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/tipc/link.c
net/tipc/name_distr.c

index c723ee90219da1fd6073b79741893c21fea2f6e3..3a801452643d0463061565b8f7bbea295a7d4405 100644 (file)
@@ -297,14 +297,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr) {
                        tipc_link_reset(l_ptr);
                        if (shutting_down || !tipc_node_is_up(n_ptr)) {
                                tipc_node_detach_link(l_ptr->owner, l_ptr);
                                tipc_link_reset_fragments(l_ptr);
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
 
                                /* Nobody else can access this link now: */
                                del_timer_sync(&l_ptr->timer);
@@ -312,12 +312,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
                        } else {
                                /* Detach/delete when failover is finished: */
                                l_ptr->flags |= LINK_STOPPED;
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                del_timer_sync(&l_ptr->timer);
                        }
                        continue;
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
@@ -474,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr)
                        tipc_link_reset(l_ptr);
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
index 974a73f3d87622f867ad07ffd563a0856dc7d985..8465263246c3211c996fa8ed8c1e5e6d5f1c76ed 100644 (file)
@@ -135,18 +135,18 @@ void named_cluster_distribute(struct sk_buff *buf)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[n_ptr->addr & 1];
                if (l_ptr) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                        if (!buf_copy) {
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                break;
                        }
                        msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
                        __tipc_link_xmit(l_ptr, buf_copy);
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();