[BLOCK] Implement elv_drain_elevator for improved switch error detection
authorTejun Heo <htejun@gmail.com>
Thu, 10 Nov 2005 07:52:05 +0000 (08:52 +0100)
committerJens Axboe <axboe@nelson.home.kernel.dk>
Sat, 12 Nov 2005 09:56:06 +0000 (10:56 +0100)
This patch adds request_queue->nr_sorted which keeps the number of
requests in the iosched and implement elv_drain_elevator which
performs forced dispatching.  elv_drain_elevator checks whether
iosched actually dispatches all requests it has and prints error
message if it doesn't.  As buggy forced dispatching can result in
wrong barrier operations, I think this extra check is worthwhile.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
block/elevator.c
include/linux/blkdev.h

index a475b1a19f670b37f65085f7e413f1e817303511..73aa46b6db492191daa88c248bc1bb5bb2b77414 100644 (file)
@@ -226,6 +226,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
 
        if (q->last_merge == rq)
                q->last_merge = NULL;
+       q->nr_sorted--;
 
        boundary = q->end_sector;
 
@@ -284,6 +285,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
 
        if (e->ops->elevator_merge_req_fn)
                e->ops->elevator_merge_req_fn(q, rq, next);
+       q->nr_sorted--;
 
        q->last_merge = rq;
 }
@@ -315,6 +317,20 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
        __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
 }
 
+static void elv_drain_elevator(request_queue_t *q)
+{
+       static int printed;
+       while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+               ;
+       if (q->nr_sorted == 0)
+               return;
+       if (printed++ < 10) {
+               printk(KERN_ERR "%s: forced dispatching is broken "
+                      "(nr_sorted=%u), please report this\n",
+                      q->elevator->elevator_type->elevator_name, q->nr_sorted);
+       }
+}
+
 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                       int plug)
 {
@@ -349,9 +365,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
 
        case ELEVATOR_INSERT_BACK:
                rq->flags |= REQ_SOFTBARRIER;
-
-               while (q->elevator->ops->elevator_dispatch_fn(q, 1))
-                       ;
+               elv_drain_elevator(q);
                list_add_tail(&rq->queuelist, &q->queue_head);
                /*
                 * We kick the queue here for the following reasons.
@@ -370,6 +384,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
        case ELEVATOR_INSERT_SORT:
                BUG_ON(!blk_fs_request(rq));
                rq->flags |= REQ_SORTED;
+               q->nr_sorted++;
                if (q->last_merge == NULL && rq_mergeable(rq))
                        q->last_merge = rq;
                /*
@@ -692,8 +707,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 
        set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
 
-       while (q->elevator->ops->elevator_dispatch_fn(q, 1))
-               ;
+       elv_drain_elevator(q);
 
        while (q->rq.elvpriv) {
                blk_remove_plug(q);
@@ -701,6 +715,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
+               elv_drain_elevator(q);
        }
 
        spin_unlock_irq(q->queue_lock);
index 025a7f084dbd82b2caf7a290736287bc3f012d37..a33a31e71bbc6cb02acf3409c926137c8664b882 100644 (file)
@@ -406,6 +406,7 @@ struct request_queue
 
        atomic_t                refcnt;
 
+       unsigned int            nr_sorted;
        unsigned int            in_flight;
 
        /*
@@ -631,6 +632,7 @@ static inline void elv_dispatch_add_tail(struct request_queue *q,
 {
        if (q->last_merge == rq)
                q->last_merge = NULL;
+       q->nr_sorted--;
 
        q->end_sector = rq_end_sector(rq);
        q->boundary_rq = rq;