block: blk_delay_queue() should use kblockd workqueue
[firefly-linux-kernel-4.4.55.git] / block / blk-core.c
index 0c0ea10e61ea4bbe57a508734195324f16218043..e2bacfa46cc3ee07684b276791e01870c68e26f7 100644 (file)
@@ -198,19 +198,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 }
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
-/*
- * Make sure that plugs that were pending when this function was entered,
- * are now complete and requests pushed to the queue.
-*/
-static inline void queue_sync_plugs(struct request_queue *q)
-{
-       /*
-        * If the current process is plugged and has barriers submitted,
-        * we will livelock if we don't unplug first.
-        */
-       blk_flush_plug(current);
-}
-
 static void blk_delay_work(struct work_struct *work)
 {
        struct request_queue *q;
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+       queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                               msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
        cancel_delayed_work_sync(&q->delay_work);
-       queue_sync_plugs(q);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -309,7 +296,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    held and interrupts disabled. If force_kblockd is true, then it is
+ *    safe to call this without holding the queue lock.
  *
  */
 void __blk_run_queue(struct request_queue *q, bool force_kblockd)
@@ -1311,7 +1299,15 @@ get_rq:
 
        plug = current->plug;
        if (plug) {
-               if (!plug->should_sort && !list_empty(&plug->list)) {
+               /*
+                * If this is the first request added after a plug, fire
+                * of a plug trace. If others have been added before, check
+                * if we have multiple devices in this plug. If so, make a
+                * note to sort the list before dispatch.
+                */
+               if (list_empty(&plug->list))
+                       trace_block_plug(q);
+               else if (!plug->should_sort) {
                        struct request *__rq;
 
                        __rq = list_entry_rq(plug->list.prev);
@@ -2644,6 +2640,7 @@ void blk_start_plug(struct blk_plug *plug)
 
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
+       INIT_LIST_HEAD(&plug->cb_list);
        plug->should_sort = 0;
 
        /*
@@ -2668,13 +2665,52 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
-static void queue_unplugged(struct request_queue *q, unsigned int depth)
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+                           bool from_schedule)
+       __releases(q->queue_lock)
 {
-       trace_block_unplug_io(q, depth);
-       __blk_run_queue(q, false);
+       trace_block_unplug(q, depth, !from_schedule);
+
+       /*
+        * If we are punting this to kblockd, then we can safely drop
+        * the queue_lock before waking kblockd (which needs to take
+        * this lock).
+        */
+       if (from_schedule) {
+               spin_unlock(q->queue_lock);
+               __blk_run_queue(q, true);
+       } else {
+               __blk_run_queue(q, false);
+               spin_unlock(q->queue_lock);
+       }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+       LIST_HEAD(callbacks);
+
+       if (list_empty(&plug->cb_list))
+               return;
+
+       list_splice_init(&plug->cb_list, &callbacks);
+
+       while (!list_empty(&callbacks)) {
+               struct blk_plug_cb *cb = list_first_entry(&callbacks,
+                                                         struct blk_plug_cb,
+                                                         list);
+               list_del(&cb->list);
+               cb->callback(cb);
+       }
 }
 
-static void flush_plug_list(struct blk_plug *plug)
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
        unsigned long flags;
@@ -2684,6 +2720,7 @@ static void flush_plug_list(struct blk_plug *plug)
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
+       flush_plug_callbacks(plug);
        if (list_empty(&plug->list))
                return;
 
@@ -2708,10 +2745,11 @@ static void flush_plug_list(struct blk_plug *plug)
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
                BUG_ON(!rq->q);
                if (rq->q != q) {
-                       if (q) {
-                               queue_unplugged(q, depth);
-                               spin_unlock(q->queue_lock);
-                       }
+                       /*
+                        * This drops the queue lock
+                        */
+                       if (q)
+                               queue_unplugged(q, depth, from_schedule);
                        q = rq->q;
                        depth = 0;
                        spin_lock(q->queue_lock);
@@ -2729,35 +2767,24 @@ static void flush_plug_list(struct blk_plug *plug)
                depth++;
        }
 
-       if (q) {
-               queue_unplugged(q, depth);
-               spin_unlock(q->queue_lock);
-       }
+       /*
+        * This drops the queue lock
+        */
+       if (q)
+               queue_unplugged(q, depth, from_schedule);
 
        local_irq_restore(flags);
 }
-
-static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       flush_plug_list(plug);
-
-       if (plug == tsk->plug)
-               tsk->plug = NULL;
-}
+EXPORT_SYMBOL(blk_flush_plug_list);
 
 void blk_finish_plug(struct blk_plug *plug)
 {
-       if (plug)
-               __blk_finish_plug(current, plug);
-}
-EXPORT_SYMBOL(blk_finish_plug);
+       blk_flush_plug_list(plug, false);
 
-void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
-       __blk_finish_plug(tsk, plug);
-       tsk->plug = plug;
+       if (plug == current->plug)
+               current->plug = NULL;
 }
-EXPORT_SYMBOL(__blk_flush_plug);
+EXPORT_SYMBOL(blk_finish_plug);
 
 int __init blk_dev_init(void)
 {