workqueue: remove pwq_lock which is no longer used
authorLai Jiangshan <laijs@cn.fujitsu.com>
Mon, 25 Mar 2013 23:57:19 +0000 (16:57 -0700)
committerTejun Heo <tj@kernel.org>
Mon, 25 Mar 2013 23:57:19 +0000 (16:57 -0700)
To simplify locking, the previous patches expanded wq->mutex to
protect all fields of each workqueue instance including the pwqs list
leaving pwq_lock without any user.  Remove the unused pwq_lock.

tj: Rebased on top of the current dev branch.  Updated description.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index af6087a5a10a3e4a89e744cc68de2186b1ec7156..04a8b98d30ce5778548b5aec1ad10cc62a17dc20 100644 (file)
@@ -125,12 +125,9 @@ enum {
  *
  * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
  *
- * PW: pwq_lock protected.
- *
  * WQ: wq->mutex protected.
  *
- * WR: wq->mutex and pwq_lock protected for writes.  Sched-RCU protected
- *     for reads.
+ * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
  *
  * MD: wq_mayday_lock protected.
  */
@@ -257,7 +254,6 @@ struct workqueue_struct {
 static struct kmem_cache *pwq_cache;
 
 static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
-static DEFINE_SPINLOCK(pwq_lock);      /* protects pool_workqueues */
 static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
 
 static LIST_HEAD(workqueues);          /* PL: list of all workqueues */
@@ -300,8 +296,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
 
 #define assert_rcu_or_wq_mutex(wq)                                     \
        rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
-                          lockdep_is_held(&wq->mutex) ||               \
-                          lockdep_is_held(&pwq_lock),                  \
+                          lockdep_is_held(&wq->mutex),                 \
                           "sched RCU or wq->mutex should be held")
 
 #ifdef CONFIG_LOCKDEP
@@ -3549,9 +3544,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
         * and consistent with the linking path.
         */
        mutex_lock(&wq->mutex);
-       spin_lock_irq(&pwq_lock);
        list_del_rcu(&pwq->pwqs_node);
-       spin_unlock_irq(&pwq_lock);
        mutex_unlock(&wq->mutex);
 
        put_unbound_pool(pool);
@@ -3635,9 +3628,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
        pwq_adjust_max_active(pwq);
 
        /* link in @pwq */
-       spin_lock_irq(&pwq_lock);
        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
-       spin_unlock_irq(&pwq_lock);
 
        mutex_unlock(&wq->mutex);
 }