cfq-iosched: fix the slice residual sign
[firefly-linux-kernel-4.4.55.git] / block / cfq-iosched.c
index 1ca813b16e7840cf10086d79b7e2bb7b0c07005e..4ab33d8a20b220d528700a705f89ada868e44729 100644 (file)
@@ -173,6 +173,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_latency;
 
        struct list_head cic_list;
 
@@ -180,6 +181,8 @@ struct cfq_data {
         * Fallback dummy cfqq for extreme OOM conditions
         */
        struct cfq_queue oom_cfqq;
+
+       unsigned long last_end_sync_rq;
 };
 
 enum cfqq_state_flags {
@@ -504,11 +507,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                } else
                        rb_key += jiffies;
        } else if (!add_front) {
+               /*
+                * Get our rb key offset. Subtract any residual slice
+                * value carried from last service. A negative resid
+                * count indicates slice overrun, and this should position
+                * the next service time further away in the tree.
+                */
                rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
-               rb_key += cfqq->slice_resid;
+               rb_key -= cfqq->slice_resid;
                cfqq->slice_resid = 0;
-       } else
-               rb_key = 0;
+       } else {
+               rb_key = -HZ;
+               __cfqq = cfq_rb_first(&cfqd->service_tree);
+               rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+       }
 
        if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
                /*
@@ -542,7 +554,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        n = &(*p)->rb_left;
                else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
                        n = &(*p)->rb_right;
-               else if (rb_key < __cfqq->rb_key)
+               else if (time_before(rb_key, __cfqq->rb_key))
                        n = &(*p)->rb_left;
                else
                        n = &(*p)->rb_right;
@@ -822,8 +834,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
         * reposition in fifo if next is older than rq
         */
        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(next->start_time, rq->start_time))
+           time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
                list_move(&rq->queuelist, &next->queuelist);
+               rq_set_fifo_time(rq, rq_fifo_time(next));
+       }
 
        cfq_remove_request(next);
 }
@@ -1124,9 +1138,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  */
 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
 {
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *rq;
-       int fifo;
+       struct request *rq = NULL;
 
        if (cfq_cfqq_fifo_expire(cfqq))
                return NULL;
@@ -1136,13 +1148,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
        if (list_empty(&cfqq->fifo))
                return NULL;
 
-       fifo = cfq_cfqq_sync(cfqq);
        rq = rq_entry_fifo(cfqq->fifo.next);
-
-       if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+       if (time_before(jiffies, rq_fifo_time(rq)))
                rq = NULL;
 
-       cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
        return rq;
 }
 
@@ -1243,16 +1253,83 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        return dispatched;
 }
 
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       unsigned int max_dispatch;
+
+       /*
+        * Drain async requests before we start sync IO
+        */
+       if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+               return false;
+
+       /*
+        * If this is an async queue and we have sync IO in flight, let it wait
+        */
+       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+               return false;
+
+       max_dispatch = cfqd->cfq_quantum;
+       if (cfq_class_idle(cfqq))
+               max_dispatch = 1;
+
+       /*
+        * Does this cfqq already have too much IO in flight?
+        */
+       if (cfqq->dispatched >= max_dispatch) {
+               /*
+                * idle queue must always only have a single IO in flight
+                */
+               if (cfq_class_idle(cfqq))
+                       return false;
+
+               /*
+                * We have other queues, don't allow more IO from this one
+                */
+               if (cfqd->busy_queues > 1)
+                       return false;
+
+               /*
+                * Sole queue user, allow bigger slice
+                */
+               max_dispatch *= 4;
+       }
+
+       /*
+        * Async queues must wait a bit before being allowed dispatch.
+        * We also ramp up the dispatch depth gradually for async IO,
+        * based on the last sync IO we serviced
+        */
+       if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+               unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+               unsigned int depth;
+
+               depth = last_sync / cfqd->cfq_slice[1];
+               if (!depth && !cfqq->dispatched)
+                       depth = 1;
+               if (depth < max_dispatch)
+                       max_dispatch = depth;
+       }
+
+       /*
+        * If we're below the current max, allow a dispatch
+        */
+       return cfqq->dispatched < max_dispatch;
+}
+
 /*
  * Dispatch a request from cfqq, moving them to the request queue
  * dispatch list.
  */
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        struct request *rq;
 
        BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
 
+       if (!cfq_may_dispatch(cfqd, cfqq))
+               return false;
+
        /*
         * follow expired path, else get first next available
         */
@@ -1271,6 +1348,8 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                atomic_long_inc(&cic->ioc->refcount);
                cfqd->active_cic = cic;
        }
+
+       return true;
 }
 
 /*
@@ -1281,7 +1360,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq;
-       unsigned int max_dispatch;
 
        if (!cfqd->busy_queues)
                return 0;
@@ -1294,48 +1372,11 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                return 0;
 
        /*
-        * Drain async requests before we start sync IO
+        * Dispatch a request from this cfqq, if it is allowed
         */
-       if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+       if (!cfq_dispatch_request(cfqd, cfqq))
                return 0;
 
-       /*
-        * If this is an async queue and we have sync IO in flight, let it wait
-        */
-       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-               return 0;
-
-       max_dispatch = cfqd->cfq_quantum;
-       if (cfq_class_idle(cfqq))
-               max_dispatch = 1;
-
-       /*
-        * Does this cfqq already have too much IO in flight?
-        */
-       if (cfqq->dispatched >= max_dispatch) {
-               /*
-                * idle queue must always only have a single IO in flight
-                */
-               if (cfq_class_idle(cfqq))
-                       return 0;
-
-               /*
-                * We have other queues, don't allow more IO from this one
-                */
-               if (cfqd->busy_queues > 1)
-                       return 0;
-
-               /*
-                * we are the only queue, allow up to 4 times of 'quantum'
-                */
-               if (cfqq->dispatched >= 4 * max_dispatch)
-                       return 0;
-       }
-
-       /*
-        * Dispatch a request from this cfqq
-        */
-       cfq_dispatch_request(cfqd, cfqq);
        cfqq->slice_dispatch++;
        cfq_clear_cfqq_must_dispatch(cfqq);
 
@@ -1951,7 +1992,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
 
        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
-           (cfqd->hw_tag && CIC_SEEKY(cic)))
+           (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
                if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2107,6 +2148,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
 
        cfq_add_rq_rb(rq);
 
+       rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
        list_add_tail(&rq->queuelist, &cfqq->fifo);
 
        cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2157,8 +2199,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        if (cfq_cfqq_sync(cfqq))
                cfqd->sync_flight--;
 
-       if (sync)
+       if (sync) {
                RQ_CIC(rq)->last_end_request = now;
+               cfqd->last_end_sync_rq = now;
+       }
 
        /*
         * If this is the active queue, check if it needs to be expired,
@@ -2480,8 +2524,9 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_latency = 1;
        cfqd->hw_tag = 1;
-
+       cfqd->last_end_sync_rq = jiffies;
        return cfqd;
 }
 
@@ -2549,6 +2594,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -2580,6 +2626,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
                UINT_MAX, 0);
+STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -2595,6 +2642,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(low_latency),
        __ATTR_NULL
 };