* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
*/
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
if (!need_to_create_worker(pool))
- return false;
+ return;
restart:
spin_unlock_irq(&pool->lock);
start_worker(worker);
if (WARN_ON_ONCE(need_to_create_worker(pool)))
goto restart;
- return true;
+ return;
}
if (!need_to_create_worker(pool))
spin_lock_irq(&pool->lock);
if (need_to_create_worker(pool))
goto restart;
- return true;
+ return;
}
/**
* LOCKING:
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Called only from manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
*/
-static bool maybe_destroy_workers(struct worker_pool *pool)
+static void maybe_destroy_workers(struct worker_pool *pool)
{
- bool ret = false;
-
while (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
}
destroy_worker(worker);
- ret = true;
}
-
- return ret;
}
/**
* multiple times. Does GFP_KERNEL allocations.
*
* RETURNS:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
- * multiple times. Does GFP_KERNEL allocations.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- bool ret = false;
/*
* Managership is governed by two mutexes - manager_arb and
* manager_mutex.
*/
if (!mutex_trylock(&pool->manager_arb))
- return ret;
+ return false;
/*
* With manager arbitration won, manager_mutex would be free in
spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex);
spin_lock_irq(&pool->lock);
- ret = true;
}
pool->flags &= ~POOL_MANAGE_WORKERS;
* Destroy and then create so that may_start_working() is true
* on return.
*/
- ret |= maybe_destroy_workers(pool);
- ret |= maybe_create_worker(pool);
+ maybe_destroy_workers(pool);
+ maybe_create_worker(pool);
mutex_unlock(&pool->manager_mutex);
mutex_unlock(&pool->manager_arb);
- return ret;
+ return true;
}
/**
}
EXPORT_SYMBOL_GPL(flush_work);
+struct cwt_wait {
+ wait_queue_t wait;
+ struct work_struct *work;
+};
+
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+ if (cwait->work != key)
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
+ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
- * If someone else is canceling, wait for the same event it
- * would be waiting for before retrying.
+ * If someone else is already canceling, wait for it to
+ * finish. flush_work() doesn't work for PREEMPT_NONE
+ * because we may get scheduled between @work's completion
+ * and the other canceling task resuming and clearing
+ * CANCELING - flush_work() will return false immediately
+ * as @work is no longer busy, try_to_grab_pending() will
+ * return -ENOENT as @work is still being canceled and the
+ * other canceling task won't be able to clear CANCELING as
+ * we're hogging the CPU.
+ *
+ * Let's wait for completion using a waitqueue. As this
+ * may lead to the thundering herd problem, use a custom
+ * wake function which matches @work along with exclusive
+ * wait and wakeup.
*/
- if (unlikely(ret == -ENOENT))
- flush_work(work);
+ if (unlikely(ret == -ENOENT)) {
+ struct cwt_wait cwait;
+
+ init_wait(&cwait.wait);
+ cwait.wait.func = cwt_wakefn;
+ cwait.work = work;
+
+ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (work_is_canceling(work))
+ schedule();
+ finish_wait(&cancel_waitq, &cwait.wait);
+ }
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
flush_work(work);
clear_work_data(work);
+
+ /*
+ * Paired with prepare_to_wait() above so that either
+ * waitqueue_active() is visible here or !work_is_canceling() is
+ * visible there.
+ */
+ smp_mb();
+ if (waitqueue_active(&cancel_waitq))
+ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
+
return ret;
}