block: add API for delaying work/request_fn a little bit
authorJens Axboe <jaxboe@fusionio.com>
Wed, 2 Mar 2011 16:08:00 +0000 (11:08 -0500)
committerJens Axboe <jaxboe@fusionio.com>
Thu, 10 Mar 2011 07:45:54 +0000 (08:45 +0100)
Currently we use plugging for that, but as plugging is going away,
we need an alternative mechanism.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
include/linux/blkdev.h

index 3cc17e6064d68e5a3315d012a395c84f660bcb8a..e958c7a1e462d3b296f7457410562bca50b3875a 100644 (file)
@@ -197,6 +197,32 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 }
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
+static void blk_delay_work(struct work_struct *work)
+{
+       struct request_queue *q;
+
+       q = container_of(work, struct request_queue, delay_work.work);
+       spin_lock_irq(q->queue_lock);
+       q->request_fn(q);
+       spin_unlock_irq(q->queue_lock);
+}
+
+/**
+ * blk_delay_queue - restart queueing after defined interval
+ * @q:         The &struct request_queue in question
+ * @msecs:     Delay in msecs
+ *
+ * Description:
+ *   Sometimes queueing needs to be postponed for a little while, to allow
+ *   resources to come back. This function will make sure that queueing is
+ *   restarted around the specified time.
+ */
+void blk_delay_queue(struct request_queue *q, unsigned long msecs)
+{
+       schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_delay_queue);
+
 /*
  * "plug" the device if there are no outstanding requests: this will
  * force the transfer to start only after we have put all the requests
@@ -363,6 +389,7 @@ EXPORT_SYMBOL(blk_start_queue);
 void blk_stop_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
+       cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
@@ -387,6 +414,7 @@ void blk_sync_queue(struct request_queue *q)
        del_timer_sync(&q->timeout);
        cancel_work_sync(&q->unplug_work);
        throtl_shutdown_timer_wq(q);
+       cancel_delayed_work_sync(&q->delay_work);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -534,6 +562,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        INIT_LIST_HEAD(&q->flush_queue[1]);
        INIT_LIST_HEAD(&q->flush_data_in_flight);
        INIT_WORK(&q->unplug_work, blk_unplug_work);
+       INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 
        kobject_init(&q->kobj, &blk_queue_ktype);
 
index e3ee74fc59030fe6b4c2c15bc8c4ef85a03e0096..f55b2a8b66104997446cd1557e86527e337d6398 100644 (file)
@@ -300,6 +300,11 @@ struct request_queue
        unsigned long           unplug_delay;   /* After this many jiffies */
        struct work_struct      unplug_work;
 
+       /*
+        * Delayed queue handling
+        */
+       struct delayed_work     delay_work;
+
        struct backing_dev_info backing_dev_info;
 
        /*
@@ -677,6 +682,7 @@ extern int blk_insert_cloned_request(struct request_queue *q,
 extern void blk_plug_device(struct request_queue *);
 extern void blk_plug_device_unlocked(struct request_queue *);
 extern int blk_remove_plug(struct request_queue *);
+extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                          unsigned int, void __user *);