workqueue: implement WQ_NON_REENTRANT
authorTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:13 +0000 (10:07 +0200)
committerTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:13 +0000 (10:07 +0200)
With gcwq managing all the workers and work->data pointing to the last
gcwq it was on, non-reentrance can be easily implemented by checking
whether the work is still running on the previous gcwq on queueing.
Implement it.

Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/workqueue.h
kernel/workqueue.c

index 0a7814131e666c4ad9f7932d9ce955cab1524e7b..07cf5e5f91cbf747d3b2e069be419d76fae4564e 100644 (file)
@@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 enum {
        WQ_FREEZEABLE           = 1 << 0, /* freeze during suspend */
        WQ_SINGLE_CPU           = 1 << 1, /* only single cpu at a time */
+       WQ_NON_REENTRANT        = 1 << 2, /* guarantee non-reentrance */
 };
 
 extern struct workqueue_struct *
index c68277c204abf2432ad92793ab7e005cdda5fc76..bce1074bdec1e826071a38662e43c4d8d74c10bc 100644 (file)
@@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        debug_work_activate(work);
 
-       /* determine gcwq to use */
+       /*
+        * Determine gcwq to use.  SINGLE_CPU is inherently
+        * NON_REENTRANT, so test it first.
+        */
        if (!(wq->flags & WQ_SINGLE_CPU)) {
-               /* just use the requested cpu for multicpu workqueues */
+               struct global_cwq *last_gcwq;
+
+               /*
+                * It's multi cpu.  If @wq is non-reentrant and @work
+                * was previously on a different cpu, it might still
+                * be running there, in which case the work needs to
+                * be queued on that cpu to guarantee non-reentrance.
+                */
                gcwq = get_gcwq(cpu);
-               spin_lock_irqsave(&gcwq->lock, flags);
+               if (wq->flags & WQ_NON_REENTRANT &&
+                   (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+                       struct worker *worker;
+
+                       spin_lock_irqsave(&last_gcwq->lock, flags);
+
+                       worker = find_worker_executing_work(last_gcwq, work);
+
+                       if (worker && worker->current_cwq->wq == wq)
+                               gcwq = last_gcwq;
+                       else {
+                               /* meh... not running there, queue here */
+                               spin_unlock_irqrestore(&last_gcwq->lock, flags);
+                               spin_lock_irqsave(&gcwq->lock, flags);
+                       }
+               } else
+                       spin_lock_irqsave(&gcwq->lock, flags);
        } else {
                unsigned int req_cpu = cpu;