2 * CFQ, or complete fairness queueing, disk scheduler.
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/rbtree.h>
13 #include <linux/ioprio.h>
18 static const int cfq_quantum = 4; /* max queue in one round of service */
19 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
20 static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
21 static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
23 static const int cfq_slice_sync = HZ / 10;
24 static int cfq_slice_async = HZ / 25;
25 static const int cfq_slice_async_rq = 2;
26 static int cfq_slice_idle = HZ / 125;
29 * grace period before allowing idle class to get disk access
31 #define CFQ_IDLE_GRACE (HZ / 10)
34 * below this threshold, we consider thinktime immediate
36 #define CFQ_MIN_TT (2)
38 #define CFQ_SLICE_SCALE (5)
40 #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
41 #define RQ_CFQQ(rq) ((rq)->elevator_private2)
43 static struct kmem_cache *cfq_pool;
44 static struct kmem_cache *cfq_ioc_pool;
46 static DEFINE_PER_CPU(unsigned long, ioc_count);
47 static struct completion *ioc_gone;
49 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
50 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
51 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
56 #define sample_valid(samples) ((samples) > 80)
59 * Most of our rbtree usage is for sorting with min extraction, so
60 * if we cache the leftmost node we don't have to walk down the tree
61 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
62 * move this into the elevator for the rq sorting as well.
68 #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
71 * Per block device queue structure
74 request_queue_t *queue;
77 * rr list of queues with requests and the count of them
79 struct cfq_rb_root service_tree;
80 unsigned int busy_queues;
87 * idle window management
89 struct timer_list idle_slice_timer;
90 struct work_struct unplug_work;
92 struct cfq_queue *active_queue;
93 struct cfq_io_context *active_cic;
96 * async queue for each priority case
98 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
99 struct cfq_queue *async_idle_cfqq;
101 struct timer_list idle_class_timer;
103 sector_t last_position;
104 unsigned long last_end_request;
107 * tunables, see top of file
109 unsigned int cfq_quantum;
110 unsigned int cfq_fifo_expire[2];
111 unsigned int cfq_back_penalty;
112 unsigned int cfq_back_max;
113 unsigned int cfq_slice[2];
114 unsigned int cfq_slice_async_rq;
115 unsigned int cfq_slice_idle;
117 struct list_head cic_list;
119 sector_t new_seek_mean;
124 * Per process-grouping structure
127 /* reference count */
129 /* parent cfq_data */
130 struct cfq_data *cfqd;
131 /* service_tree member */
132 struct rb_node rb_node;
133 /* service_tree key */
134 unsigned long rb_key;
135 /* sorted list of pending requests */
136 struct rb_root sort_list;
137 /* if fifo isn't expired, next request to serve */
138 struct request *next_rq;
139 /* requests queued in sort_list */
141 /* currently allocated requests */
143 /* pending metadata requests */
145 /* fifo list of requests in sort_list */
146 struct list_head fifo;
148 unsigned long slice_end;
151 /* number of requests that are on the dispatch list or inside driver */
154 /* io prio of this group */
155 unsigned short ioprio, org_ioprio;
156 unsigned short ioprio_class, org_ioprio_class;
158 /* various state flags, see below */
161 sector_t last_request_pos;
164 enum cfqq_state_flags {
165 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
166 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
167 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
168 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
169 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
170 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
171 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
172 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
173 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
174 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
175 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
178 #define CFQ_CFQQ_FNS(name) \
179 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
181 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
183 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
185 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
187 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
189 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
193 CFQ_CFQQ_FNS(wait_request);
194 CFQ_CFQQ_FNS(must_alloc);
195 CFQ_CFQQ_FNS(must_alloc_slice);
196 CFQ_CFQQ_FNS(must_dispatch);
197 CFQ_CFQQ_FNS(fifo_expire);
198 CFQ_CFQQ_FNS(idle_window);
199 CFQ_CFQQ_FNS(prio_changed);
200 CFQ_CFQQ_FNS(queue_new);
201 CFQ_CFQQ_FNS(slice_new);
205 static void cfq_dispatch_insert(request_queue_t *, struct request *);
206 static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
207 struct task_struct *, gfp_t);
208 static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
209 struct io_context *);
211 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
214 return cic->cfqq[!!is_sync];
217 static inline void cic_set_cfqq(struct cfq_io_context *cic,
218 struct cfq_queue *cfqq, int is_sync)
220 cic->cfqq[!!is_sync] = cfqq;
224 * We regard a request as SYNC, if it's either a read or has the SYNC bit
225 * set (in which case it could also be direct WRITE).
227 static inline int cfq_bio_sync(struct bio *bio)
229 if (bio_data_dir(bio) == READ || bio_sync(bio))
236 * scheduler run of queue, if there are requests pending and no one in the
237 * driver that will restart queueing
239 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
241 if (cfqd->busy_queues)
242 kblockd_schedule_work(&cfqd->unplug_work);
245 static int cfq_queue_empty(request_queue_t *q)
247 struct cfq_data *cfqd = q->elevator->elevator_data;
249 return !cfqd->busy_queues;
253 * Scale schedule slice based on io priority. Use the sync time slice only
254 * if a queue is marked sync and has sync io queued. A sync queue with async
255 * io only, should not get full sync slice length.
257 static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
260 const int base_slice = cfqd->cfq_slice[sync];
262 WARN_ON(prio >= IOPRIO_BE_NR);
264 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
268 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
270 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
274 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
276 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
280 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
281 * isn't valid until the first request from the dispatch is activated
282 * and the slice time set.
284 static inline int cfq_slice_used(struct cfq_queue *cfqq)
286 if (cfq_cfqq_slice_new(cfqq))
288 if (time_before(jiffies, cfqq->slice_end))
295 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
296 * We choose the request that is closest to the head right now. Distance
297 * behind the head is penalized and only allowed to a certain extent.
299 static struct request *
300 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
302 sector_t last, s1, s2, d1 = 0, d2 = 0;
303 unsigned long back_max;
304 #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
305 #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
306 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
308 if (rq1 == NULL || rq1 == rq2)
313 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
315 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
317 if (rq_is_meta(rq1) && !rq_is_meta(rq2))
319 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
325 last = cfqd->last_position;
328 * by definition, 1KiB is 2 sectors
330 back_max = cfqd->cfq_back_max * 2;
333 * Strict one way elevator _except_ in the case where we allow
334 * short backward seeks which are biased as twice the cost of a
335 * similar forward seek.
339 else if (s1 + back_max >= last)
340 d1 = (last - s1) * cfqd->cfq_back_penalty;
342 wrap |= CFQ_RQ1_WRAP;
346 else if (s2 + back_max >= last)
347 d2 = (last - s2) * cfqd->cfq_back_penalty;
349 wrap |= CFQ_RQ2_WRAP;
351 /* Found required data */
354 * By doing switch() on the bit mask "wrap" we avoid having to
355 * check two variables for all permutations: --> faster!
358 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
374 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
377 * Since both rqs are wrapped,
378 * start with the one that's further behind head
379 * (--> only *one* back seek required),
380 * since back seek takes more time than forward.
390 * The below is leftmost cache rbtree addon
392 static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
395 root->left = rb_first(&root->rb);
400 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
405 rb_erase(n, &root->rb);
410 * would be nice to take fifo expire time into account as well
412 static struct request *
413 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
414 struct request *last)
416 struct rb_node *rbnext = rb_next(&last->rb_node);
417 struct rb_node *rbprev = rb_prev(&last->rb_node);
418 struct request *next = NULL, *prev = NULL;
420 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
423 prev = rb_entry_rq(rbprev);
426 next = rb_entry_rq(rbnext);
428 rbnext = rb_first(&cfqq->sort_list);
429 if (rbnext && rbnext != &last->rb_node)
430 next = rb_entry_rq(rbnext);
433 return cfq_choose_req(cfqd, next, prev);
436 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
437 struct cfq_queue *cfqq)
440 * just an approximation, should be ok.
442 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
443 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
447 * The cfqd->service_tree holds all pending cfq_queue's that have
448 * requests waiting to be processed. It is sorted in the order that
449 * we will service the queues.
451 static void cfq_service_tree_add(struct cfq_data *cfqd,
452 struct cfq_queue *cfqq, int add_front)
454 struct rb_node **p = &cfqd->service_tree.rb.rb_node;
455 struct rb_node *parent = NULL;
456 unsigned long rb_key;
460 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
461 rb_key += cfqq->slice_resid;
462 cfqq->slice_resid = 0;
466 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
468 * same position, nothing more to do
470 if (rb_key == cfqq->rb_key)
473 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
478 struct cfq_queue *__cfqq;
482 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
485 * sort RT queues first, we always want to give
486 * preference to them. IDLE queues goes to the back.
487 * after that, sort on the next service time.
489 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
491 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
493 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
495 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
497 else if (rb_key < __cfqq->rb_key)
502 if (n == &(*p)->rb_right)
509 cfqd->service_tree.left = &cfqq->rb_node;
511 cfqq->rb_key = rb_key;
512 rb_link_node(&cfqq->rb_node, parent, p);
513 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
517 * Update cfqq's position in the service tree.
519 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
522 * Resorting requires the cfqq to be on the RR list already.
524 if (cfq_cfqq_on_rr(cfqq))
525 cfq_service_tree_add(cfqd, cfqq, 0);
529 * add to busy list of queues for service, trying to be fair in ordering
530 * the pending list according to last request service
533 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
535 BUG_ON(cfq_cfqq_on_rr(cfqq));
536 cfq_mark_cfqq_on_rr(cfqq);
539 cfq_resort_rr_list(cfqd, cfqq);
543 * Called when the cfqq no longer has requests pending, remove it from
547 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549 BUG_ON(!cfq_cfqq_on_rr(cfqq));
550 cfq_clear_cfqq_on_rr(cfqq);
552 if (!RB_EMPTY_NODE(&cfqq->rb_node))
553 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
555 BUG_ON(!cfqd->busy_queues);
560 * rb tree support functions
562 static inline void cfq_del_rq_rb(struct request *rq)
564 struct cfq_queue *cfqq = RQ_CFQQ(rq);
565 struct cfq_data *cfqd = cfqq->cfqd;
566 const int sync = rq_is_sync(rq);
568 BUG_ON(!cfqq->queued[sync]);
569 cfqq->queued[sync]--;
571 elv_rb_del(&cfqq->sort_list, rq);
573 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
574 cfq_del_cfqq_rr(cfqd, cfqq);
577 static void cfq_add_rq_rb(struct request *rq)
579 struct cfq_queue *cfqq = RQ_CFQQ(rq);
580 struct cfq_data *cfqd = cfqq->cfqd;
581 struct request *__alias;
583 cfqq->queued[rq_is_sync(rq)]++;
586 * looks a little odd, but the first insert might return an alias.
587 * if that happens, put the alias on the dispatch list
589 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
590 cfq_dispatch_insert(cfqd->queue, __alias);
592 if (!cfq_cfqq_on_rr(cfqq))
593 cfq_add_cfqq_rr(cfqd, cfqq);
596 * check if this request is a better next-serve candidate
598 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
599 BUG_ON(!cfqq->next_rq);
603 cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
605 elv_rb_del(&cfqq->sort_list, rq);
606 cfqq->queued[rq_is_sync(rq)]--;
610 static struct request *
611 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
613 struct task_struct *tsk = current;
614 struct cfq_io_context *cic;
615 struct cfq_queue *cfqq;
617 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
621 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
623 sector_t sector = bio->bi_sector + bio_sectors(bio);
625 return elv_rb_find(&cfqq->sort_list, sector);
631 static void cfq_activate_request(request_queue_t *q, struct request *rq)
633 struct cfq_data *cfqd = q->elevator->elevator_data;
635 cfqd->rq_in_driver++;
638 * If the depth is larger 1, it really could be queueing. But lets
639 * make the mark a little higher - idling could still be good for
640 * low queueing, and a low queueing number could also just indicate
641 * a SCSI mid layer like behaviour where limit+1 is often seen.
643 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
646 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
649 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
651 struct cfq_data *cfqd = q->elevator->elevator_data;
653 WARN_ON(!cfqd->rq_in_driver);
654 cfqd->rq_in_driver--;
657 static void cfq_remove_request(struct request *rq)
659 struct cfq_queue *cfqq = RQ_CFQQ(rq);
661 if (cfqq->next_rq == rq)
662 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
664 list_del_init(&rq->queuelist);
667 if (rq_is_meta(rq)) {
668 WARN_ON(!cfqq->meta_pending);
669 cfqq->meta_pending--;
673 static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
675 struct cfq_data *cfqd = q->elevator->elevator_data;
676 struct request *__rq;
678 __rq = cfq_find_rq_fmerge(cfqd, bio);
679 if (__rq && elv_rq_merge_ok(__rq, bio)) {
681 return ELEVATOR_FRONT_MERGE;
684 return ELEVATOR_NO_MERGE;
687 static void cfq_merged_request(request_queue_t *q, struct request *req,
690 if (type == ELEVATOR_FRONT_MERGE) {
691 struct cfq_queue *cfqq = RQ_CFQQ(req);
693 cfq_reposition_rq_rb(cfqq, req);
698 cfq_merged_requests(request_queue_t *q, struct request *rq,
699 struct request *next)
702 * reposition in fifo if next is older than rq
704 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
705 time_before(next->start_time, rq->start_time))
706 list_move(&rq->queuelist, &next->queuelist);
708 cfq_remove_request(next);
711 static int cfq_allow_merge(request_queue_t *q, struct request *rq,
714 struct cfq_data *cfqd = q->elevator->elevator_data;
715 struct cfq_io_context *cic;
716 struct cfq_queue *cfqq;
719 * Disallow merge of a sync bio into an async request.
721 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
725 * Lookup the cfqq that this bio will be queued with. Allow
726 * merge only if rq is queued there.
728 cic = cfq_cic_rb_lookup(cfqd, current->io_context);
732 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
733 if (cfqq == RQ_CFQQ(rq))
740 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
744 * stop potential idle class queues waiting service
746 del_timer(&cfqd->idle_class_timer);
749 cfq_clear_cfqq_must_alloc_slice(cfqq);
750 cfq_clear_cfqq_fifo_expire(cfqq);
751 cfq_mark_cfqq_slice_new(cfqq);
752 cfq_clear_cfqq_queue_new(cfqq);
755 cfqd->active_queue = cfqq;
759 * current cfqq expired its slice (or was too idle), select new one
762 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
765 if (cfq_cfqq_wait_request(cfqq))
766 del_timer(&cfqd->idle_slice_timer);
768 cfq_clear_cfqq_must_dispatch(cfqq);
769 cfq_clear_cfqq_wait_request(cfqq);
772 * store what was left of this slice, if the queue idled/timed out
774 if (timed_out && !cfq_cfqq_slice_new(cfqq))
775 cfqq->slice_resid = cfqq->slice_end - jiffies;
777 cfq_resort_rr_list(cfqd, cfqq);
779 if (cfqq == cfqd->active_queue)
780 cfqd->active_queue = NULL;
782 if (cfqd->active_cic) {
783 put_io_context(cfqd->active_cic->ioc);
784 cfqd->active_cic = NULL;
788 static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
790 struct cfq_queue *cfqq = cfqd->active_queue;
793 __cfq_slice_expired(cfqd, cfqq, timed_out);
797 * Get next queue for service. Unless we have a queue preemption,
798 * we'll simply select the first cfqq in the service tree.
800 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
802 struct cfq_queue *cfqq;
805 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
808 n = cfq_rb_first(&cfqd->service_tree);
809 cfqq = rb_entry(n, struct cfq_queue, rb_node);
811 if (cfq_class_idle(cfqq)) {
815 * if we have idle queues and no rt or be queues had
816 * pending requests, either allow immediate service if
817 * the grace period has passed or arm the idle grace
820 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
821 if (time_before(jiffies, end)) {
822 mod_timer(&cfqd->idle_class_timer, end);
831 * Get and set a new active queue for service.
833 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
835 struct cfq_queue *cfqq;
837 cfqq = cfq_get_next_queue(cfqd);
838 __cfq_set_active_queue(cfqd, cfqq);
842 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
845 if (rq->sector >= cfqd->last_position)
846 return rq->sector - cfqd->last_position;
848 return cfqd->last_position - rq->sector;
851 static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
853 struct cfq_io_context *cic = cfqd->active_cic;
855 if (!sample_valid(cic->seek_samples))
858 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
861 static int cfq_close_cooperator(struct cfq_data *cfq_data,
862 struct cfq_queue *cfqq)
865 * We should notice if some of the queues are cooperating, eg
866 * working closely on the same area of the disk. In that case,
867 * we can group them together and don't waste time idling.
872 #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
874 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
876 struct cfq_queue *cfqq = cfqd->active_queue;
877 struct cfq_io_context *cic;
880 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
881 WARN_ON(cfq_cfqq_slice_new(cfqq));
884 * idle is disabled, either manually or by past process history
886 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
890 * task has exited, don't wait
892 cic = cfqd->active_cic;
893 if (!cic || !cic->ioc->task)
897 * See if this prio level has a good candidate
899 if (cfq_close_cooperator(cfqd, cfqq) &&
900 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
903 cfq_mark_cfqq_must_dispatch(cfqq);
904 cfq_mark_cfqq_wait_request(cfqq);
907 * we don't want to idle for seeks, but we do want to allow
908 * fair distribution of slice time for a process doing back-to-back
909 * seeks. so allow a little bit of time for him to submit a new rq
911 sl = cfqd->cfq_slice_idle;
912 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
913 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
915 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
919 * Move request from internal lists to the request queue dispatch list.
921 static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
923 struct cfq_data *cfqd = q->elevator->elevator_data;
924 struct cfq_queue *cfqq = RQ_CFQQ(rq);
926 cfq_remove_request(rq);
928 elv_dispatch_sort(q, rq);
930 if (cfq_cfqq_sync(cfqq))
935 * return expired entry, or NULL to just start from scratch in rbtree
937 static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
939 struct cfq_data *cfqd = cfqq->cfqd;
943 if (cfq_cfqq_fifo_expire(cfqq))
946 cfq_mark_cfqq_fifo_expire(cfqq);
948 if (list_empty(&cfqq->fifo))
951 fifo = cfq_cfqq_sync(cfqq);
952 rq = rq_entry_fifo(cfqq->fifo.next);
954 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
961 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
963 const int base_rq = cfqd->cfq_slice_async_rq;
965 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
967 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
971 * Select a queue for service. If we have a current active queue,
972 * check whether to continue servicing it, or retrieve and set a new one.
974 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
976 struct cfq_queue *cfqq;
978 cfqq = cfqd->active_queue;
983 * The active queue has run out of time, expire it and select new.
985 if (cfq_slice_used(cfqq))
989 * The active queue has requests and isn't expired, allow it to
992 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
996 * No requests pending. If the active queue still has requests in
997 * flight or is idling for a new request, allow either of these
998 * conditions to happen (or time out) before selecting a new queue.
1000 if (timer_pending(&cfqd->idle_slice_timer) ||
1001 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1007 cfq_slice_expired(cfqd, 0);
1009 cfqq = cfq_set_active_queue(cfqd);
1015 * Dispatch some requests from cfqq, moving them to the request queue
1019 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1024 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1030 * follow expired path, else get first next available
1032 if ((rq = cfq_check_fifo(cfqq)) == NULL)
1036 * finally, insert request into driver dispatch list
1038 cfq_dispatch_insert(cfqd->queue, rq);
1042 if (!cfqd->active_cic) {
1043 atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1044 cfqd->active_cic = RQ_CIC(rq);
1047 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1050 } while (dispatched < max_dispatch);
1053 * expire an async queue immediately if it has used up its slice. idle
1054 * queue always expire after 1 dispatch round.
1056 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1057 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1058 cfq_class_idle(cfqq))) {
1059 cfqq->slice_end = jiffies + 1;
1060 cfq_slice_expired(cfqd, 0);
1066 static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1070 while (cfqq->next_rq) {
1071 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1075 BUG_ON(!list_empty(&cfqq->fifo));
1080 * Drain our current requests. Used for barriers and when switching
1081 * io schedulers on-the-fly.
1083 static int cfq_forced_dispatch(struct cfq_data *cfqd)
1088 while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
1089 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1091 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1094 cfq_slice_expired(cfqd, 0);
1096 BUG_ON(cfqd->busy_queues);
1101 static int cfq_dispatch_requests(request_queue_t *q, int force)
1103 struct cfq_data *cfqd = q->elevator->elevator_data;
1104 struct cfq_queue *cfqq;
1107 if (!cfqd->busy_queues)
1110 if (unlikely(force))
1111 return cfq_forced_dispatch(cfqd);
1114 while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1117 max_dispatch = cfqd->cfq_quantum;
1118 if (cfq_class_idle(cfqq))
1121 if (cfqq->dispatched >= max_dispatch) {
1122 if (cfqd->busy_queues > 1)
1124 if (cfqq->dispatched >= 4 * max_dispatch)
1128 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1131 cfq_clear_cfqq_must_dispatch(cfqq);
1132 cfq_clear_cfqq_wait_request(cfqq);
1133 del_timer(&cfqd->idle_slice_timer);
1135 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1142 * task holds one reference to the queue, dropped when task exits. each rq
1143 * in-flight on this queue also holds a reference, dropped when rq is freed.
1145 * queue lock must be held here.
1147 static void cfq_put_queue(struct cfq_queue *cfqq)
1149 struct cfq_data *cfqd = cfqq->cfqd;
1151 BUG_ON(atomic_read(&cfqq->ref) <= 0);
1153 if (!atomic_dec_and_test(&cfqq->ref))
1156 BUG_ON(rb_first(&cfqq->sort_list));
1157 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1158 BUG_ON(cfq_cfqq_on_rr(cfqq));
1160 if (unlikely(cfqd->active_queue == cfqq)) {
1161 __cfq_slice_expired(cfqd, cfqq, 0);
1162 cfq_schedule_dispatch(cfqd);
1165 kmem_cache_free(cfq_pool, cfqq);
1168 static void cfq_free_io_context(struct io_context *ioc)
1170 struct cfq_io_context *__cic;
1174 ioc->ioc_data = NULL;
1176 while ((n = rb_first(&ioc->cic_root)) != NULL) {
1177 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1178 rb_erase(&__cic->rb_node, &ioc->cic_root);
1179 kmem_cache_free(cfq_ioc_pool, __cic);
1183 elv_ioc_count_mod(ioc_count, -freed);
1185 if (ioc_gone && !elv_ioc_count_read(ioc_count))
1189 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1191 if (unlikely(cfqq == cfqd->active_queue)) {
1192 __cfq_slice_expired(cfqd, cfqq, 0);
1193 cfq_schedule_dispatch(cfqd);
1196 cfq_put_queue(cfqq);
1199 static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1200 struct cfq_io_context *cic)
1202 list_del_init(&cic->queue_list);
1206 if (cic->cfqq[ASYNC]) {
1207 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1208 cic->cfqq[ASYNC] = NULL;
1211 if (cic->cfqq[SYNC]) {
1212 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1213 cic->cfqq[SYNC] = NULL;
1217 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1219 struct cfq_data *cfqd = cic->key;
1222 request_queue_t *q = cfqd->queue;
1224 spin_lock_irq(q->queue_lock);
1225 __cfq_exit_single_io_context(cfqd, cic);
1226 spin_unlock_irq(q->queue_lock);
1231 * The process that ioc belongs to has exited, we need to clean up
1232 * and put the internal structures we have that belongs to that process.
1234 static void cfq_exit_io_context(struct io_context *ioc)
1236 struct cfq_io_context *__cic;
1239 ioc->ioc_data = NULL;
1242 * put the reference this task is holding to the various queues
1244 n = rb_first(&ioc->cic_root);
1246 __cic = rb_entry(n, struct cfq_io_context, rb_node);
1248 cfq_exit_single_io_context(__cic);
1253 static struct cfq_io_context *
1254 cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1256 struct cfq_io_context *cic;
1258 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1261 cic->last_end_request = jiffies;
1262 INIT_LIST_HEAD(&cic->queue_list);
1263 cic->dtor = cfq_free_io_context;
1264 cic->exit = cfq_exit_io_context;
1265 elv_ioc_count_inc(ioc_count);
1271 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1273 struct task_struct *tsk = current;
1276 if (!cfq_cfqq_prio_changed(cfqq))
1279 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1280 switch (ioprio_class) {
1282 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1283 case IOPRIO_CLASS_NONE:
1285 * no prio set, place us in the middle of the BE classes
1287 cfqq->ioprio = task_nice_ioprio(tsk);
1288 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1290 case IOPRIO_CLASS_RT:
1291 cfqq->ioprio = task_ioprio(tsk);
1292 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1294 case IOPRIO_CLASS_BE:
1295 cfqq->ioprio = task_ioprio(tsk);
1296 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1298 case IOPRIO_CLASS_IDLE:
1299 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1301 cfq_clear_cfqq_idle_window(cfqq);
1306 * keep track of original prio settings in case we have to temporarily
1307 * elevate the priority of this queue
1309 cfqq->org_ioprio = cfqq->ioprio;
1310 cfqq->org_ioprio_class = cfqq->ioprio_class;
1311 cfq_clear_cfqq_prio_changed(cfqq);
1314 static inline void changed_ioprio(struct cfq_io_context *cic)
1316 struct cfq_data *cfqd = cic->key;
1317 struct cfq_queue *cfqq;
1318 unsigned long flags;
1320 if (unlikely(!cfqd))
1323 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1325 cfqq = cic->cfqq[ASYNC];
1327 struct cfq_queue *new_cfqq;
1328 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
1331 cic->cfqq[ASYNC] = new_cfqq;
1332 cfq_put_queue(cfqq);
1336 cfqq = cic->cfqq[SYNC];
1338 cfq_mark_cfqq_prio_changed(cfqq);
1340 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1343 static void cfq_ioc_set_ioprio(struct io_context *ioc)
1345 struct cfq_io_context *cic;
1348 ioc->ioprio_changed = 0;
1350 n = rb_first(&ioc->cic_root);
1352 cic = rb_entry(n, struct cfq_io_context, rb_node);
1354 changed_ioprio(cic);
1359 static struct cfq_queue *
1360 cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1361 struct task_struct *tsk, gfp_t gfp_mask)
1363 struct cfq_queue *cfqq, *new_cfqq = NULL;
1364 struct cfq_io_context *cic;
1367 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
1368 /* cic always exists here */
1369 cfqq = cic_to_cfqq(cic, is_sync);
1375 } else if (gfp_mask & __GFP_WAIT) {
1377 * Inform the allocator of the fact that we will
1378 * just repeat this allocation if it fails, to allow
1379 * the allocator to do whatever it needs to attempt to
1382 spin_unlock_irq(cfqd->queue->queue_lock);
1383 new_cfqq = kmem_cache_alloc_node(cfq_pool,
1384 gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1386 spin_lock_irq(cfqd->queue->queue_lock);
1389 cfqq = kmem_cache_alloc_node(cfq_pool,
1390 gfp_mask | __GFP_ZERO,
1396 RB_CLEAR_NODE(&cfqq->rb_node);
1397 INIT_LIST_HEAD(&cfqq->fifo);
1399 atomic_set(&cfqq->ref, 0);
1403 cfq_mark_cfqq_idle_window(cfqq);
1404 cfq_mark_cfqq_sync(cfqq);
1407 cfq_mark_cfqq_prio_changed(cfqq);
1408 cfq_mark_cfqq_queue_new(cfqq);
1410 cfq_init_prio_data(cfqq);
1414 kmem_cache_free(cfq_pool, new_cfqq);
1417 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1421 static struct cfq_queue **
1422 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1424 switch(ioprio_class) {
1425 case IOPRIO_CLASS_RT:
1426 return &cfqd->async_cfqq[0][ioprio];
1427 case IOPRIO_CLASS_BE:
1428 return &cfqd->async_cfqq[1][ioprio];
1429 case IOPRIO_CLASS_IDLE:
1430 return &cfqd->async_idle_cfqq;
1436 static struct cfq_queue *
1437 cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1440 const int ioprio = task_ioprio(tsk);
1441 const int ioprio_class = task_ioprio_class(tsk);
1442 struct cfq_queue **async_cfqq = NULL;
1443 struct cfq_queue *cfqq = NULL;
1446 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1451 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
1454 * pin the queue now that it's allocated, scheduler exit will prune it
1456 if (!is_sync && !(*async_cfqq)) {
1457 atomic_inc(&cfqq->ref);
1461 atomic_inc(&cfqq->ref);
1466 * We drop cfq io contexts lazily, so we may find a dead one.
1469 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1471 WARN_ON(!list_empty(&cic->queue_list));
1473 if (ioc->ioc_data == cic)
1474 ioc->ioc_data = NULL;
1476 rb_erase(&cic->rb_node, &ioc->cic_root);
1477 kmem_cache_free(cfq_ioc_pool, cic);
1478 elv_ioc_count_dec(ioc_count);
1481 static struct cfq_io_context *
1482 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1485 struct cfq_io_context *cic;
1486 void *k, *key = cfqd;
1492 * we maintain a last-hit cache, to avoid browsing over the tree
1494 cic = ioc->ioc_data;
1495 if (cic && cic->key == cfqd)
1499 n = ioc->cic_root.rb_node;
1501 cic = rb_entry(n, struct cfq_io_context, rb_node);
1502 /* ->key must be copied to avoid race with cfq_exit_queue() */
1505 cfq_drop_dead_cic(ioc, cic);
1514 ioc->ioc_data = cic;
1523 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1524 struct cfq_io_context *cic)
1527 struct rb_node *parent;
1528 struct cfq_io_context *__cic;
1529 unsigned long flags;
1537 p = &ioc->cic_root.rb_node;
1540 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1541 /* ->key must be copied to avoid race with cfq_exit_queue() */
1544 cfq_drop_dead_cic(ioc, __cic);
1550 else if (cic->key > k)
1551 p = &(*p)->rb_right;
1556 rb_link_node(&cic->rb_node, parent, p);
1557 rb_insert_color(&cic->rb_node, &ioc->cic_root);
1559 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1560 list_add(&cic->queue_list, &cfqd->cic_list);
1561 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1565 * Setup general io context and cfq io context. There can be several cfq
1566 * io contexts per general io context, if this process is doing io to more
1567 * than one device managed by cfq.
1569 static struct cfq_io_context *
1570 cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1572 struct io_context *ioc = NULL;
1573 struct cfq_io_context *cic;
1575 might_sleep_if(gfp_mask & __GFP_WAIT);
1577 ioc = get_io_context(gfp_mask, cfqd->queue->node);
1581 cic = cfq_cic_rb_lookup(cfqd, ioc);
1585 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1589 cfq_cic_link(cfqd, ioc, cic);
1591 smp_read_barrier_depends();
1592 if (unlikely(ioc->ioprio_changed))
1593 cfq_ioc_set_ioprio(ioc);
1597 put_io_context(ioc);
1602 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1604 unsigned long elapsed = jiffies - cic->last_end_request;
1605 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1607 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1608 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1609 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1613 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1619 if (cic->last_request_pos < rq->sector)
1620 sdist = rq->sector - cic->last_request_pos;
1622 sdist = cic->last_request_pos - rq->sector;
1624 if (!cic->seek_samples) {
1625 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1626 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1630 * Don't allow the seek distance to get too large from the
1631 * odd fragment, pagein, etc
1633 if (cic->seek_samples <= 60) /* second&third seek */
1634 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1636 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1638 cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1639 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1640 total = cic->seek_total + (cic->seek_samples/2);
1641 do_div(total, cic->seek_samples);
1642 cic->seek_mean = (sector_t)total;
1646 * Disable idle window if the process thinks too long or seeks so much that
1650 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1651 struct cfq_io_context *cic)
1655 if (!cfq_cfqq_sync(cfqq))
1658 enable_idle = cfq_cfqq_idle_window(cfqq);
1660 if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1661 (cfqd->hw_tag && CIC_SEEKY(cic)))
1663 else if (sample_valid(cic->ttime_samples)) {
1664 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1671 cfq_mark_cfqq_idle_window(cfqq);
1673 cfq_clear_cfqq_idle_window(cfqq);
1677 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1678 * no or if we aren't sure, a 1 will cause a preempt.
1681 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1684 struct cfq_queue *cfqq;
1686 cfqq = cfqd->active_queue;
1690 if (cfq_slice_used(cfqq))
1693 if (cfq_class_idle(new_cfqq))
1696 if (cfq_class_idle(cfqq))
1700 * if the new request is sync, but the currently running queue is
1701 * not, let the sync request have priority.
1703 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1707 * So both queues are sync. Let the new request get disk time if
1708 * it's a metadata request and the current queue is doing regular IO.
1710 if (rq_is_meta(rq) && !cfqq->meta_pending)
1713 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1717 * if this request is as-good as one we would expect from the
1718 * current cfqq, let it preempt
1720 if (cfq_rq_close(cfqd, rq))
1727 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1728 * let it have half of its nominal slice.
1730 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1732 cfq_slice_expired(cfqd, 1);
1735 * Put the new queue at the front of the of the current list,
1736 * so we know that it will be selected next.
1738 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1740 cfq_service_tree_add(cfqd, cfqq, 1);
1742 cfqq->slice_end = 0;
1743 cfq_mark_cfqq_slice_new(cfqq);
1747 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1748 * something we should do about it
1751 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1754 struct cfq_io_context *cic = RQ_CIC(rq);
1757 cfqq->meta_pending++;
1759 cfq_update_io_thinktime(cfqd, cic);
1760 cfq_update_io_seektime(cfqd, cic, rq);
1761 cfq_update_idle_window(cfqd, cfqq, cic);
1763 cic->last_request_pos = rq->sector + rq->nr_sectors;
1764 cfqq->last_request_pos = cic->last_request_pos;
1766 if (cfqq == cfqd->active_queue) {
1768 * if we are waiting for a request for this queue, let it rip
1769 * immediately and flag that we must not expire this queue
1772 if (cfq_cfqq_wait_request(cfqq)) {
1773 cfq_mark_cfqq_must_dispatch(cfqq);
1774 del_timer(&cfqd->idle_slice_timer);
1775 blk_start_queueing(cfqd->queue);
1777 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1779 * not the active queue - expire current slice if it is
1780 * idle and has expired it's mean thinktime or this new queue
1781 * has some old slice time left and is of higher priority
1783 cfq_preempt_queue(cfqd, cfqq);
1784 cfq_mark_cfqq_must_dispatch(cfqq);
1785 blk_start_queueing(cfqd->queue);
1789 static void cfq_insert_request(request_queue_t *q, struct request *rq)
1791 struct cfq_data *cfqd = q->elevator->elevator_data;
1792 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1794 cfq_init_prio_data(cfqq);
1798 list_add_tail(&rq->queuelist, &cfqq->fifo);
1800 cfq_rq_enqueued(cfqd, cfqq, rq);
1803 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1805 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1806 struct cfq_data *cfqd = cfqq->cfqd;
1807 const int sync = rq_is_sync(rq);
1812 WARN_ON(!cfqd->rq_in_driver);
1813 WARN_ON(!cfqq->dispatched);
1814 cfqd->rq_in_driver--;
1817 if (cfq_cfqq_sync(cfqq))
1818 cfqd->sync_flight--;
1820 if (!cfq_class_idle(cfqq))
1821 cfqd->last_end_request = now;
1824 RQ_CIC(rq)->last_end_request = now;
1827 * If this is the active queue, check if it needs to be expired,
1828 * or if we want to idle in case it has no pending requests.
1830 if (cfqd->active_queue == cfqq) {
1831 if (cfq_cfqq_slice_new(cfqq)) {
1832 cfq_set_prio_slice(cfqd, cfqq);
1833 cfq_clear_cfqq_slice_new(cfqq);
1835 if (cfq_slice_used(cfqq))
1836 cfq_slice_expired(cfqd, 1);
1837 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
1838 cfq_arm_slice_timer(cfqd);
1841 if (!cfqd->rq_in_driver)
1842 cfq_schedule_dispatch(cfqd);
1846 * we temporarily boost lower priority queues if they are holding fs exclusive
1847 * resources. they are boosted to normal prio (CLASS_BE/4)
1849 static void cfq_prio_boost(struct cfq_queue *cfqq)
1851 if (has_fs_excl()) {
1853 * boost idle prio on transactions that would lock out other
1854 * users of the filesystem
1856 if (cfq_class_idle(cfqq))
1857 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1858 if (cfqq->ioprio > IOPRIO_NORM)
1859 cfqq->ioprio = IOPRIO_NORM;
1862 * check if we need to unboost the queue
1864 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1865 cfqq->ioprio_class = cfqq->org_ioprio_class;
1866 if (cfqq->ioprio != cfqq->org_ioprio)
1867 cfqq->ioprio = cfqq->org_ioprio;
1871 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1873 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1874 !cfq_cfqq_must_alloc_slice(cfqq)) {
1875 cfq_mark_cfqq_must_alloc_slice(cfqq);
1876 return ELV_MQUEUE_MUST;
1879 return ELV_MQUEUE_MAY;
1882 static int cfq_may_queue(request_queue_t *q, int rw)
1884 struct cfq_data *cfqd = q->elevator->elevator_data;
1885 struct task_struct *tsk = current;
1886 struct cfq_io_context *cic;
1887 struct cfq_queue *cfqq;
1890 * don't force setup of a queue from here, as a call to may_queue
1891 * does not necessarily imply that a request actually will be queued.
1892 * so just lookup a possibly existing queue, or return 'may queue'
1895 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
1897 return ELV_MQUEUE_MAY;
1899 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
1901 cfq_init_prio_data(cfqq);
1902 cfq_prio_boost(cfqq);
1904 return __cfq_may_queue(cfqq);
1907 return ELV_MQUEUE_MAY;
1911 * queue lock held here
1913 static void cfq_put_request(struct request *rq)
1915 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1918 const int rw = rq_data_dir(rq);
1920 BUG_ON(!cfqq->allocated[rw]);
1921 cfqq->allocated[rw]--;
1923 put_io_context(RQ_CIC(rq)->ioc);
1925 rq->elevator_private = NULL;
1926 rq->elevator_private2 = NULL;
1928 cfq_put_queue(cfqq);
1933 * Allocate cfq data structures associated with this request.
1936 cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1938 struct cfq_data *cfqd = q->elevator->elevator_data;
1939 struct task_struct *tsk = current;
1940 struct cfq_io_context *cic;
1941 const int rw = rq_data_dir(rq);
1942 const int is_sync = rq_is_sync(rq);
1943 struct cfq_queue *cfqq;
1944 unsigned long flags;
1946 might_sleep_if(gfp_mask & __GFP_WAIT);
1948 cic = cfq_get_io_context(cfqd, gfp_mask);
1950 spin_lock_irqsave(q->queue_lock, flags);
1955 cfqq = cic_to_cfqq(cic, is_sync);
1957 cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
1962 cic_set_cfqq(cic, cfqq, is_sync);
1965 cfqq->allocated[rw]++;
1966 cfq_clear_cfqq_must_alloc(cfqq);
1967 atomic_inc(&cfqq->ref);
1969 spin_unlock_irqrestore(q->queue_lock, flags);
1971 rq->elevator_private = cic;
1972 rq->elevator_private2 = cfqq;
1977 put_io_context(cic->ioc);
1979 cfq_schedule_dispatch(cfqd);
1980 spin_unlock_irqrestore(q->queue_lock, flags);
1984 static void cfq_kick_queue(struct work_struct *work)
1986 struct cfq_data *cfqd =
1987 container_of(work, struct cfq_data, unplug_work);
1988 request_queue_t *q = cfqd->queue;
1989 unsigned long flags;
1991 spin_lock_irqsave(q->queue_lock, flags);
1992 blk_start_queueing(q);
1993 spin_unlock_irqrestore(q->queue_lock, flags);
1997 * Timer running if the active_queue is currently idling inside its time slice
1999 static void cfq_idle_slice_timer(unsigned long data)
2001 struct cfq_data *cfqd = (struct cfq_data *) data;
2002 struct cfq_queue *cfqq;
2003 unsigned long flags;
2006 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2008 if ((cfqq = cfqd->active_queue) != NULL) {
2014 if (cfq_slice_used(cfqq))
2018 * only expire and reinvoke request handler, if there are
2019 * other queues with pending requests
2021 if (!cfqd->busy_queues)
2025 * not expired and it has a request pending, let it dispatch
2027 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
2028 cfq_mark_cfqq_must_dispatch(cfqq);
2033 cfq_slice_expired(cfqd, timed_out);
2035 cfq_schedule_dispatch(cfqd);
2037 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2041 * Timer running if an idle class queue is waiting for service
2043 static void cfq_idle_class_timer(unsigned long data)
2045 struct cfq_data *cfqd = (struct cfq_data *) data;
2046 unsigned long flags, end;
2048 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2051 * race with a non-idle queue, reset timer
2053 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2054 if (!time_after_eq(jiffies, end))
2055 mod_timer(&cfqd->idle_class_timer, end);
2057 cfq_schedule_dispatch(cfqd);
2059 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2062 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2064 del_timer_sync(&cfqd->idle_slice_timer);
2065 del_timer_sync(&cfqd->idle_class_timer);
2066 blk_sync_queue(cfqd->queue);
2069 static void cfq_put_async_queues(struct cfq_data *cfqd)
2073 for (i = 0; i < IOPRIO_BE_NR; i++) {
2074 if (cfqd->async_cfqq[0][i])
2075 cfq_put_queue(cfqd->async_cfqq[0][i]);
2076 if (cfqd->async_cfqq[1][i])
2077 cfq_put_queue(cfqd->async_cfqq[1][i]);
2078 if (cfqd->async_idle_cfqq)
2079 cfq_put_queue(cfqd->async_idle_cfqq);
2083 static void cfq_exit_queue(elevator_t *e)
2085 struct cfq_data *cfqd = e->elevator_data;
2086 request_queue_t *q = cfqd->queue;
2088 cfq_shutdown_timer_wq(cfqd);
2090 spin_lock_irq(q->queue_lock);
2092 if (cfqd->active_queue)
2093 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2095 while (!list_empty(&cfqd->cic_list)) {
2096 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2097 struct cfq_io_context,
2100 __cfq_exit_single_io_context(cfqd, cic);
2103 cfq_put_async_queues(cfqd);
2105 spin_unlock_irq(q->queue_lock);
2107 cfq_shutdown_timer_wq(cfqd);
2112 static void *cfq_init_queue(request_queue_t *q)
2114 struct cfq_data *cfqd;
2116 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2120 cfqd->service_tree = CFQ_RB_ROOT;
2121 INIT_LIST_HEAD(&cfqd->cic_list);
2125 init_timer(&cfqd->idle_slice_timer);
2126 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2127 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2129 init_timer(&cfqd->idle_class_timer);
2130 cfqd->idle_class_timer.function = cfq_idle_class_timer;
2131 cfqd->idle_class_timer.data = (unsigned long) cfqd;
2133 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2135 cfqd->cfq_quantum = cfq_quantum;
2136 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2137 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2138 cfqd->cfq_back_max = cfq_back_max;
2139 cfqd->cfq_back_penalty = cfq_back_penalty;
2140 cfqd->cfq_slice[0] = cfq_slice_async;
2141 cfqd->cfq_slice[1] = cfq_slice_sync;
2142 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2143 cfqd->cfq_slice_idle = cfq_slice_idle;
2148 static void cfq_slab_kill(void)
2151 kmem_cache_destroy(cfq_pool);
2153 kmem_cache_destroy(cfq_ioc_pool);
2156 static int __init cfq_slab_setup(void)
2158 cfq_pool = KMEM_CACHE(cfq_queue, 0);
2162 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2173 * sysfs parts below -->
2176 cfq_var_show(unsigned int var, char *page)
2178 return sprintf(page, "%d\n", var);
2182 cfq_var_store(unsigned int *var, const char *page, size_t count)
2184 char *p = (char *) page;
2186 *var = simple_strtoul(p, &p, 10);
2190 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
2191 static ssize_t __FUNC(elevator_t *e, char *page) \
2193 struct cfq_data *cfqd = e->elevator_data; \
2194 unsigned int __data = __VAR; \
2196 __data = jiffies_to_msecs(__data); \
2197 return cfq_var_show(__data, (page)); \
2199 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2200 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2201 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2202 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2203 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2204 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2205 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2206 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2207 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2208 #undef SHOW_FUNCTION
2210 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
2211 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
2213 struct cfq_data *cfqd = e->elevator_data; \
2214 unsigned int __data; \
2215 int ret = cfq_var_store(&__data, (page), count); \
2216 if (__data < (MIN)) \
2218 else if (__data > (MAX)) \
2221 *(__PTR) = msecs_to_jiffies(__data); \
2223 *(__PTR) = __data; \
2226 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2227 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2228 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2229 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2230 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2231 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2232 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2233 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2234 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2235 #undef STORE_FUNCTION
2237 #define CFQ_ATTR(name) \
2238 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2240 static struct elv_fs_entry cfq_attrs[] = {
2242 CFQ_ATTR(fifo_expire_sync),
2243 CFQ_ATTR(fifo_expire_async),
2244 CFQ_ATTR(back_seek_max),
2245 CFQ_ATTR(back_seek_penalty),
2246 CFQ_ATTR(slice_sync),
2247 CFQ_ATTR(slice_async),
2248 CFQ_ATTR(slice_async_rq),
2249 CFQ_ATTR(slice_idle),
2253 static struct elevator_type iosched_cfq = {
2255 .elevator_merge_fn = cfq_merge,
2256 .elevator_merged_fn = cfq_merged_request,
2257 .elevator_merge_req_fn = cfq_merged_requests,
2258 .elevator_allow_merge_fn = cfq_allow_merge,
2259 .elevator_dispatch_fn = cfq_dispatch_requests,
2260 .elevator_add_req_fn = cfq_insert_request,
2261 .elevator_activate_req_fn = cfq_activate_request,
2262 .elevator_deactivate_req_fn = cfq_deactivate_request,
2263 .elevator_queue_empty_fn = cfq_queue_empty,
2264 .elevator_completed_req_fn = cfq_completed_request,
2265 .elevator_former_req_fn = elv_rb_former_request,
2266 .elevator_latter_req_fn = elv_rb_latter_request,
2267 .elevator_set_req_fn = cfq_set_request,
2268 .elevator_put_req_fn = cfq_put_request,
2269 .elevator_may_queue_fn = cfq_may_queue,
2270 .elevator_init_fn = cfq_init_queue,
2271 .elevator_exit_fn = cfq_exit_queue,
2272 .trim = cfq_free_io_context,
2274 .elevator_attrs = cfq_attrs,
2275 .elevator_name = "cfq",
2276 .elevator_owner = THIS_MODULE,
2279 static int __init cfq_init(void)
2284 * could be 0 on HZ < 1000 setups
2286 if (!cfq_slice_async)
2287 cfq_slice_async = 1;
2288 if (!cfq_slice_idle)
2291 if (cfq_slab_setup())
2294 ret = elv_register(&iosched_cfq);
2301 static void __exit cfq_exit(void)
2303 DECLARE_COMPLETION_ONSTACK(all_gone);
2304 elv_unregister(&iosched_cfq);
2305 ioc_gone = &all_gone;
2306 /* ioc_gone's update must be visible before reading ioc_count */
2308 if (elv_ioc_count_read(ioc_count))
2309 wait_for_completion(ioc_gone);
2314 module_init(cfq_init);
2315 module_exit(cfq_exit);
2317 MODULE_AUTHOR("Jens Axboe");
2318 MODULE_LICENSE("GPL");
2319 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");