block: blk_delay_queue() should use kblockd workqueue
[firefly-linux-kernel-4.4.55.git] / block / blk-core.c
index 3c81210725071ad4df0ac1eff8932ebc1dbb374f..e2bacfa46cc3ee07684b276791e01870c68e26f7 100644 (file)
@@ -220,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+       queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                               msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -295,7 +296,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    held and interrupts disabled. If force_kblockd is true, then it is
+ *    safe to call this without holding the queue lock.
  *
  */
 void __blk_run_queue(struct request_queue *q, bool force_kblockd)
@@ -2638,6 +2640,7 @@ void blk_start_plug(struct blk_plug *plug)
 
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
+       INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
 
        /*
@@ -2662,17 +2665,52 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
 static void queue_unplugged(struct request_queue *q, unsigned int depth,
-                           bool force_kblockd)
+                           bool from_schedule)
+       __releases(q->queue_lock)
+{
+       trace_block_unplug(q, depth, !from_schedule);
+
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               __blk_run_queue(q, true);
+       } else {
+               __blk_run_queue(q, false);
+               spin_unlock(q->queue_lock);
+       }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
 {
-       trace_block_unplug_io(q, depth);
-       __blk_run_queue(q, force_kblockd);
+       LIST_HEAD(callbacks);
+
+       if (list_empty(&plug->cb_list))
+               return;
+
+       list_splice_init(&plug->cb_list, &callbacks);
 
-       if (q->unplugged_fn)
-               q->unplugged_fn(q);
+       while (!list_empty(&callbacks)) {
+               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+                                                         struct blk_plug_cb,
+                                                         list);
+               list_del(&cb->list);
+               cb->callback(cb);
+       }
 }
 
-void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
        unsigned long flags;
@@ -2682,6 +2720,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
+       flush_plug_callbacks(plug);
        if (list_empty(&plug->list))
                return;
 
@@ -2706,10 +2745,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
-                               queue_unplugged(q, depth, force_kblockd);
-                               spin_unlock(q->queue_lock);
-                       }
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
+                               queue_unplugged(q, depth, from_schedule);
                        q = rq->q;
                        depth = 0;
                        spin_lock(q->queue_lock);
@@ -2727,10 +2767,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
                depth++;
        }
 
-       if (q) {
-               queue_unplugged(q, depth, force_kblockd);
-               spin_unlock(q->queue_lock);
-       }
+       /*
+        * This drops the queue lock
+        */
+       if (q)
+               queue_unplugged(q, depth, from_schedule);
 
        local_irq_restore(flags);
 }