2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
29 struct throtl_service_queue {
30 struct throtl_service_queue *parent_sq; /* the parent service_queue */
33 * Bios queued directly to this service_queue or dispatched from
34 * children throtl_grp's.
36 struct bio_list bio_lists[2]; /* queued bios [READ/WRITE] */
37 unsigned int nr_queued[2]; /* number of queued bios */
40 * RB tree of active children throtl_grp's, which are sorted by
43 struct rb_root pending_tree; /* RB tree of active tgs */
44 struct rb_node *first_pending; /* first node in the tree */
45 unsigned int nr_pending; /* # queued in the tree */
46 unsigned long first_pending_disptime; /* disptime of the first tg */
50 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
51 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
54 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
56 /* Per-cpu group stats */
58 /* total bytes transferred */
59 struct blkg_rwstat service_bytes;
60 /* total IOs serviced, post merge */
61 struct blkg_rwstat serviced;
65 /* must be the first member */
66 struct blkg_policy_data pd;
68 /* active throtl group service_queue member */
69 struct rb_node rb_node;
71 /* throtl_data this group belongs to */
72 struct throtl_data *td;
74 /* this group's service queue */
75 struct throtl_service_queue service_queue;
78 * Dispatch time in jiffies. This is the estimated time when group
79 * will unthrottle and is ready to dispatch more bio. It is used as
80 * key to sort active groups in service tree.
82 unsigned long disptime;
86 /* bytes per second rate limits */
92 /* Number of bytes disptached in current slice */
93 uint64_t bytes_disp[2];
94 /* Number of bio's dispatched in current slice */
95 unsigned int io_disp[2];
97 /* When did we start a new slice */
98 unsigned long slice_start[2];
99 unsigned long slice_end[2];
101 /* Per cpu stats pointer */
102 struct tg_stats_cpu __percpu *stats_cpu;
104 /* List of tgs waiting for per cpu stats memory to be allocated */
105 struct list_head stats_alloc_node;
110 /* service tree for active throtl groups */
111 struct throtl_service_queue service_queue;
113 struct request_queue *queue;
115 /* Total Number of queued bios on READ and WRITE lists */
116 unsigned int nr_queued[2];
119 * number of total undestroyed groups
121 unsigned int nr_undestroyed_grps;
123 /* Work for dispatching throttled bios */
124 struct delayed_work dispatch_work;
127 /* list and work item to allocate percpu group stats */
128 static DEFINE_SPINLOCK(tg_stats_alloc_lock);
129 static LIST_HEAD(tg_stats_alloc_list);
131 static void tg_stats_alloc_fn(struct work_struct *);
132 static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
134 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
136 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
139 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
141 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
144 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
146 return pd_to_blkg(&tg->pd);
149 static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
151 return blkg_to_tg(td->queue->root_blkg);
154 #define throtl_log_tg(tg, fmt, args...) do { \
157 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
158 blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \
161 #define throtl_log(td, fmt, args...) \
162 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
165 * Worker for allocating per cpu stat for tgs. This is scheduled on the
166 * system_wq once there are some groups on the alloc_list waiting for
169 static void tg_stats_alloc_fn(struct work_struct *work)
171 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
172 struct delayed_work *dwork = to_delayed_work(work);
177 stats_cpu = alloc_percpu(struct tg_stats_cpu);
179 /* allocation failed, try again after some time */
180 schedule_delayed_work(dwork, msecs_to_jiffies(10));
185 spin_lock_irq(&tg_stats_alloc_lock);
187 if (!list_empty(&tg_stats_alloc_list)) {
188 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
191 swap(tg->stats_cpu, stats_cpu);
192 list_del_init(&tg->stats_alloc_node);
195 empty = list_empty(&tg_stats_alloc_list);
196 spin_unlock_irq(&tg_stats_alloc_lock);
201 /* init a service_queue, assumes the caller zeroed it */
202 static void throtl_service_queue_init(struct throtl_service_queue *sq,
203 struct throtl_service_queue *parent_sq)
205 bio_list_init(&sq->bio_lists[0]);
206 bio_list_init(&sq->bio_lists[1]);
207 sq->pending_tree = RB_ROOT;
208 sq->parent_sq = parent_sq;
211 static void throtl_pd_init(struct blkcg_gq *blkg)
213 struct throtl_grp *tg = blkg_to_tg(blkg);
214 struct throtl_data *td = blkg->q->td;
217 throtl_service_queue_init(&tg->service_queue, &td->service_queue);
218 RB_CLEAR_NODE(&tg->rb_node);
224 tg->iops[WRITE] = -1;
227 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
228 * but percpu allocator can't be called from IO path. Queue tg on
229 * tg_stats_alloc_list and allocate from work item.
231 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
232 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
233 schedule_delayed_work(&tg_stats_alloc_work, 0);
234 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
237 static void throtl_pd_exit(struct blkcg_gq *blkg)
239 struct throtl_grp *tg = blkg_to_tg(blkg);
242 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
243 list_del_init(&tg->stats_alloc_node);
244 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
246 free_percpu(tg->stats_cpu);
249 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
251 struct throtl_grp *tg = blkg_to_tg(blkg);
254 if (tg->stats_cpu == NULL)
257 for_each_possible_cpu(cpu) {
258 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
260 blkg_rwstat_reset(&sc->service_bytes);
261 blkg_rwstat_reset(&sc->serviced);
265 static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
269 * This is the common case when there are no blkcgs. Avoid lookup
272 if (blkcg == &blkcg_root)
273 return td_root_tg(td);
275 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
278 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
281 struct request_queue *q = td->queue;
282 struct throtl_grp *tg = NULL;
285 * This is the common case when there are no blkcgs. Avoid lookup
288 if (blkcg == &blkcg_root) {
291 struct blkcg_gq *blkg;
293 blkg = blkg_lookup_create(blkcg, q);
295 /* if %NULL and @q is alive, fall back to root_tg */
297 tg = blkg_to_tg(blkg);
298 else if (!blk_queue_dying(q))
305 static struct throtl_grp *
306 throtl_rb_first(struct throtl_service_queue *parent_sq)
308 /* Service tree is empty */
309 if (!parent_sq->nr_pending)
312 if (!parent_sq->first_pending)
313 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
315 if (parent_sq->first_pending)
316 return rb_entry_tg(parent_sq->first_pending);
321 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
327 static void throtl_rb_erase(struct rb_node *n,
328 struct throtl_service_queue *parent_sq)
330 if (parent_sq->first_pending == n)
331 parent_sq->first_pending = NULL;
332 rb_erase_init(n, &parent_sq->pending_tree);
333 --parent_sq->nr_pending;
336 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
338 struct throtl_grp *tg;
340 tg = throtl_rb_first(parent_sq);
344 parent_sq->first_pending_disptime = tg->disptime;
347 static void tg_service_queue_add(struct throtl_grp *tg)
349 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
350 struct rb_node **node = &parent_sq->pending_tree.rb_node;
351 struct rb_node *parent = NULL;
352 struct throtl_grp *__tg;
353 unsigned long key = tg->disptime;
356 while (*node != NULL) {
358 __tg = rb_entry_tg(parent);
360 if (time_before(key, __tg->disptime))
361 node = &parent->rb_left;
363 node = &parent->rb_right;
369 parent_sq->first_pending = &tg->rb_node;
371 rb_link_node(&tg->rb_node, parent, node);
372 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
375 static void __throtl_enqueue_tg(struct throtl_grp *tg)
377 tg_service_queue_add(tg);
378 tg->flags |= THROTL_TG_PENDING;
379 tg->service_queue.parent_sq->nr_pending++;
382 static void throtl_enqueue_tg(struct throtl_grp *tg)
384 if (!(tg->flags & THROTL_TG_PENDING))
385 __throtl_enqueue_tg(tg);
388 static void __throtl_dequeue_tg(struct throtl_grp *tg)
390 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
391 tg->flags &= ~THROTL_TG_PENDING;
394 static void throtl_dequeue_tg(struct throtl_grp *tg)
396 if (tg->flags & THROTL_TG_PENDING)
397 __throtl_dequeue_tg(tg);
400 /* Call with queue lock held */
401 static void throtl_schedule_delayed_work(struct throtl_data *td,
404 struct delayed_work *dwork = &td->dispatch_work;
406 mod_delayed_work(kthrotld_workqueue, dwork, delay);
407 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
410 static void throtl_schedule_next_dispatch(struct throtl_data *td)
412 struct throtl_service_queue *sq = &td->service_queue;
414 /* any pending children left? */
418 update_min_dispatch_time(sq);
420 if (time_before_eq(sq->first_pending_disptime, jiffies))
421 throtl_schedule_delayed_work(td, 0);
423 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies);
426 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
428 tg->bytes_disp[rw] = 0;
430 tg->slice_start[rw] = jiffies;
431 tg->slice_end[rw] = jiffies + throtl_slice;
432 throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
433 rw == READ ? 'R' : 'W', tg->slice_start[rw],
434 tg->slice_end[rw], jiffies);
437 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
438 unsigned long jiffy_end)
440 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
443 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
444 unsigned long jiffy_end)
446 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
447 throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
448 rw == READ ? 'R' : 'W', tg->slice_start[rw],
449 tg->slice_end[rw], jiffies);
452 /* Determine if previously allocated or extended slice is complete or not */
453 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
455 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
461 /* Trim the used slices and adjust slice start accordingly */
462 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
464 unsigned long nr_slices, time_elapsed, io_trim;
467 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
470 * If bps are unlimited (-1), then time slice don't get
471 * renewed. Don't try to trim the slice if slice is used. A new
472 * slice will start when appropriate.
474 if (throtl_slice_used(tg, rw))
478 * A bio has been dispatched. Also adjust slice_end. It might happen
479 * that initially cgroup limit was very low resulting in high
480 * slice_end, but later limit was bumped up and bio was dispached
481 * sooner, then we need to reduce slice_end. A high bogus slice_end
482 * is bad because it does not allow new slice to start.
485 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
487 time_elapsed = jiffies - tg->slice_start[rw];
489 nr_slices = time_elapsed / throtl_slice;
493 tmp = tg->bps[rw] * throtl_slice * nr_slices;
497 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
499 if (!bytes_trim && !io_trim)
502 if (tg->bytes_disp[rw] >= bytes_trim)
503 tg->bytes_disp[rw] -= bytes_trim;
505 tg->bytes_disp[rw] = 0;
507 if (tg->io_disp[rw] >= io_trim)
508 tg->io_disp[rw] -= io_trim;
512 tg->slice_start[rw] += nr_slices * throtl_slice;
514 throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
515 " start=%lu end=%lu jiffies=%lu",
516 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
517 tg->slice_start[rw], tg->slice_end[rw], jiffies);
520 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
523 bool rw = bio_data_dir(bio);
524 unsigned int io_allowed;
525 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
528 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
530 /* Slice has just started. Consider one slice interval */
532 jiffy_elapsed_rnd = throtl_slice;
534 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
537 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
538 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
539 * will allow dispatch after 1 second and after that slice should
543 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
547 io_allowed = UINT_MAX;
551 if (tg->io_disp[rw] + 1 <= io_allowed) {
557 /* Calc approx time to dispatch */
558 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
560 if (jiffy_wait > jiffy_elapsed)
561 jiffy_wait = jiffy_wait - jiffy_elapsed;
570 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
573 bool rw = bio_data_dir(bio);
574 u64 bytes_allowed, extra_bytes, tmp;
575 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
577 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
579 /* Slice has just started. Consider one slice interval */
581 jiffy_elapsed_rnd = throtl_slice;
583 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
585 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
589 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
595 /* Calc approx time to dispatch */
596 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
597 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
603 * This wait time is without taking into consideration the rounding
604 * up we did. Add that time also.
606 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
612 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
613 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
619 * Returns whether one can dispatch a bio or not. Also returns approx number
620 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
622 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
625 bool rw = bio_data_dir(bio);
626 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
629 * Currently whole state machine of group depends on first bio
630 * queued in the group bio list. So one should not be calling
631 * this function with a different bio if there are other bios
634 BUG_ON(tg->service_queue.nr_queued[rw] &&
635 bio != bio_list_peek(&tg->service_queue.bio_lists[rw]));
637 /* If tg->bps = -1, then BW is unlimited */
638 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
645 * If previous slice expired, start a new one otherwise renew/extend
646 * existing slice to make sure it is at least throtl_slice interval
649 if (throtl_slice_used(tg, rw))
650 throtl_start_new_slice(tg, rw);
652 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
653 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
656 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
657 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
663 max_wait = max(bps_wait, iops_wait);
668 if (time_before(tg->slice_end[rw], jiffies + max_wait))
669 throtl_extend_slice(tg, rw, jiffies + max_wait);
674 static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
677 struct throtl_grp *tg = blkg_to_tg(blkg);
678 struct tg_stats_cpu *stats_cpu;
681 /* If per cpu stats are not allocated yet, don't do any accounting. */
682 if (tg->stats_cpu == NULL)
686 * Disabling interrupts to provide mutual exclusion between two
687 * writes on same cpu. It probably is not needed for 64bit. Not
688 * optimizing that case yet.
690 local_irq_save(flags);
692 stats_cpu = this_cpu_ptr(tg->stats_cpu);
694 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
695 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
697 local_irq_restore(flags);
700 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
702 bool rw = bio_data_dir(bio);
704 /* Charge the bio to the group */
705 tg->bytes_disp[rw] += bio->bi_size;
708 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
711 static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg)
713 struct throtl_service_queue *sq = &tg->service_queue;
714 bool rw = bio_data_dir(bio);
717 * If @tg doesn't currently have any bios queued in the same
718 * direction, queueing @bio can change when @tg should be
719 * dispatched. Mark that @tg was empty. This is automatically
720 * cleaered on the next tg_update_disptime().
722 if (!sq->nr_queued[rw])
723 tg->flags |= THROTL_TG_WAS_EMPTY;
725 bio_list_add(&sq->bio_lists[rw], bio);
726 /* Take a bio reference on tg */
727 blkg_get(tg_to_blkg(tg));
729 tg->td->nr_queued[rw]++;
730 throtl_enqueue_tg(tg);
733 static void tg_update_disptime(struct throtl_grp *tg)
735 struct throtl_service_queue *sq = &tg->service_queue;
736 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
739 if ((bio = bio_list_peek(&sq->bio_lists[READ])))
740 tg_may_dispatch(tg, bio, &read_wait);
742 if ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
743 tg_may_dispatch(tg, bio, &write_wait);
745 min_wait = min(read_wait, write_wait);
746 disptime = jiffies + min_wait;
748 /* Update dispatch time */
749 throtl_dequeue_tg(tg);
750 tg->disptime = disptime;
751 throtl_enqueue_tg(tg);
753 /* see throtl_add_bio_tg() */
754 tg->flags &= ~THROTL_TG_WAS_EMPTY;
757 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
759 struct throtl_service_queue *sq = &tg->service_queue;
762 bio = bio_list_pop(&sq->bio_lists[rw]);
764 /* Drop bio reference on blkg */
765 blkg_put(tg_to_blkg(tg));
767 BUG_ON(tg->td->nr_queued[rw] <= 0);
768 tg->td->nr_queued[rw]--;
770 throtl_charge_bio(tg, bio);
771 bio_list_add(&sq->parent_sq->bio_lists[rw], bio);
772 bio->bi_rw |= REQ_THROTTLED;
774 throtl_trim_slice(tg, rw);
777 static int throtl_dispatch_tg(struct throtl_grp *tg)
779 struct throtl_service_queue *sq = &tg->service_queue;
780 unsigned int nr_reads = 0, nr_writes = 0;
781 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
782 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
785 /* Try to dispatch 75% READS and 25% WRITES */
787 while ((bio = bio_list_peek(&sq->bio_lists[READ])) &&
788 tg_may_dispatch(tg, bio, NULL)) {
790 tg_dispatch_one_bio(tg, bio_data_dir(bio));
793 if (nr_reads >= max_nr_reads)
797 while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) &&
798 tg_may_dispatch(tg, bio, NULL)) {
800 tg_dispatch_one_bio(tg, bio_data_dir(bio));
803 if (nr_writes >= max_nr_writes)
807 return nr_reads + nr_writes;
810 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
812 unsigned int nr_disp = 0;
815 struct throtl_grp *tg = throtl_rb_first(parent_sq);
816 struct throtl_service_queue *sq = &tg->service_queue;
821 if (time_before(jiffies, tg->disptime))
824 throtl_dequeue_tg(tg);
826 nr_disp += throtl_dispatch_tg(tg);
828 if (sq->nr_queued[0] || sq->nr_queued[1])
829 tg_update_disptime(tg);
831 if (nr_disp >= throtl_quantum)
838 /* work function to dispatch throttled bios */
839 void blk_throtl_dispatch_work_fn(struct work_struct *work)
841 struct throtl_data *td = container_of(to_delayed_work(work),
842 struct throtl_data, dispatch_work);
843 struct throtl_service_queue *sq = &td->service_queue;
844 struct request_queue *q = td->queue;
845 unsigned int nr_disp = 0;
846 struct bio_list bio_list_on_stack;
848 struct blk_plug plug;
851 spin_lock_irq(q->queue_lock);
853 bio_list_init(&bio_list_on_stack);
855 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
856 td->nr_queued[READ] + td->nr_queued[WRITE],
857 td->nr_queued[READ], td->nr_queued[WRITE]);
859 nr_disp = throtl_select_dispatch(sq);
862 for (rw = READ; rw <= WRITE; rw++) {
863 bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]);
864 bio_list_init(&sq->bio_lists[rw]);
866 throtl_log(td, "bios disp=%u", nr_disp);
869 throtl_schedule_next_dispatch(td);
871 spin_unlock_irq(q->queue_lock);
874 * If we dispatched some requests, unplug the queue to make sure
878 blk_start_plug(&plug);
879 while((bio = bio_list_pop(&bio_list_on_stack)))
880 generic_make_request(bio);
881 blk_finish_plug(&plug);
885 static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
886 struct blkg_policy_data *pd, int off)
888 struct throtl_grp *tg = pd_to_tg(pd);
889 struct blkg_rwstat rwstat = { }, tmp;
892 for_each_possible_cpu(cpu) {
893 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
895 tmp = blkg_rwstat_read((void *)sc + off);
896 for (i = 0; i < BLKG_RWSTAT_NR; i++)
897 rwstat.cnt[i] += tmp.cnt[i];
900 return __blkg_prfill_rwstat(sf, pd, &rwstat);
903 static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
906 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
908 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
913 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
916 struct throtl_grp *tg = pd_to_tg(pd);
917 u64 v = *(u64 *)((void *)tg + off);
921 return __blkg_prfill_u64(sf, pd, v);
924 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
927 struct throtl_grp *tg = pd_to_tg(pd);
928 unsigned int v = *(unsigned int *)((void *)tg + off);
932 return __blkg_prfill_u64(sf, pd, v);
935 static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
938 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
939 &blkcg_policy_throtl, cft->private, false);
943 static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
946 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
947 &blkcg_policy_throtl, cft->private, false);
951 static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
954 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
955 struct blkg_conf_ctx ctx;
956 struct throtl_grp *tg;
957 struct throtl_data *td;
960 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
964 tg = blkg_to_tg(ctx.blkg);
965 td = ctx.blkg->q->td;
971 *(u64 *)((void *)tg + cft->private) = ctx.v;
973 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
975 throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
976 tg->bps[READ], tg->bps[WRITE],
977 tg->iops[READ], tg->iops[WRITE]);
980 * We're already holding queue_lock and know @tg is valid. Let's
981 * apply the new config directly.
983 * Restart the slices for both READ and WRITES. It might happen
984 * that a group's limit are dropped suddenly and we don't want to
985 * account recently dispatched IO with new low rate.
987 throtl_start_new_slice(tg, 0);
988 throtl_start_new_slice(tg, 1);
990 if (tg->flags & THROTL_TG_PENDING) {
991 tg_update_disptime(tg);
992 throtl_schedule_next_dispatch(td);
995 blkg_conf_finish(&ctx);
999 static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1002 return tg_set_conf(cgrp, cft, buf, true);
1005 static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1008 return tg_set_conf(cgrp, cft, buf, false);
1011 static struct cftype throtl_files[] = {
1013 .name = "throttle.read_bps_device",
1014 .private = offsetof(struct throtl_grp, bps[READ]),
1015 .read_seq_string = tg_print_conf_u64,
1016 .write_string = tg_set_conf_u64,
1017 .max_write_len = 256,
1020 .name = "throttle.write_bps_device",
1021 .private = offsetof(struct throtl_grp, bps[WRITE]),
1022 .read_seq_string = tg_print_conf_u64,
1023 .write_string = tg_set_conf_u64,
1024 .max_write_len = 256,
1027 .name = "throttle.read_iops_device",
1028 .private = offsetof(struct throtl_grp, iops[READ]),
1029 .read_seq_string = tg_print_conf_uint,
1030 .write_string = tg_set_conf_uint,
1031 .max_write_len = 256,
1034 .name = "throttle.write_iops_device",
1035 .private = offsetof(struct throtl_grp, iops[WRITE]),
1036 .read_seq_string = tg_print_conf_uint,
1037 .write_string = tg_set_conf_uint,
1038 .max_write_len = 256,
1041 .name = "throttle.io_service_bytes",
1042 .private = offsetof(struct tg_stats_cpu, service_bytes),
1043 .read_seq_string = tg_print_cpu_rwstat,
1046 .name = "throttle.io_serviced",
1047 .private = offsetof(struct tg_stats_cpu, serviced),
1048 .read_seq_string = tg_print_cpu_rwstat,
1053 static void throtl_shutdown_wq(struct request_queue *q)
1055 struct throtl_data *td = q->td;
1057 cancel_delayed_work_sync(&td->dispatch_work);
1060 static struct blkcg_policy blkcg_policy_throtl = {
1061 .pd_size = sizeof(struct throtl_grp),
1062 .cftypes = throtl_files,
1064 .pd_init_fn = throtl_pd_init,
1065 .pd_exit_fn = throtl_pd_exit,
1066 .pd_reset_stats_fn = throtl_pd_reset_stats,
1069 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1071 struct throtl_data *td = q->td;
1072 struct throtl_grp *tg;
1073 struct throtl_service_queue *sq;
1074 bool rw = bio_data_dir(bio);
1075 struct blkcg *blkcg;
1076 bool throttled = false;
1078 if (bio->bi_rw & REQ_THROTTLED) {
1079 bio->bi_rw &= ~REQ_THROTTLED;
1084 * A throtl_grp pointer retrieved under rcu can be used to access
1085 * basic fields like stats and io rates. If a group has no rules,
1086 * just update the dispatch stats in lockless manner and return.
1089 blkcg = bio_blkcg(bio);
1090 tg = throtl_lookup_tg(td, blkcg);
1092 if (tg_no_rule_group(tg, rw)) {
1093 throtl_update_dispatch_stats(tg_to_blkg(tg),
1094 bio->bi_size, bio->bi_rw);
1095 goto out_unlock_rcu;
1100 * Either group has not been allocated yet or it is not an unlimited
1103 spin_lock_irq(q->queue_lock);
1104 tg = throtl_lookup_create_tg(td, blkcg);
1108 sq = &tg->service_queue;
1110 /* throtl is FIFO - if other bios are already queued, should queue */
1111 if (sq->nr_queued[rw])
1114 /* Bio is with-in rate limit of group */
1115 if (tg_may_dispatch(tg, bio, NULL)) {
1116 throtl_charge_bio(tg, bio);
1119 * We need to trim slice even when bios are not being queued
1120 * otherwise it might happen that a bio is not queued for
1121 * a long time and slice keeps on extending and trim is not
1122 * called for a long time. Now if limits are reduced suddenly
1123 * we take into account all the IO dispatched so far at new
1124 * low rate and * newly queued IO gets a really long dispatch
1127 * So keep on trimming slice even if bio is not queued.
1129 throtl_trim_slice(tg, rw);
1134 throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1135 " iodisp=%u iops=%u queued=%d/%d",
1136 rw == READ ? 'R' : 'W',
1137 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1138 tg->io_disp[rw], tg->iops[rw],
1139 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1141 bio_associate_current(bio);
1142 throtl_add_bio_tg(bio, tg);
1145 /* update @tg's dispatch time if @tg was empty before @bio */
1146 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1147 tg_update_disptime(tg);
1148 throtl_schedule_next_dispatch(td);
1152 spin_unlock_irq(q->queue_lock);
1160 * blk_throtl_drain - drain throttled bios
1161 * @q: request_queue to drain throttled bios for
1163 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1165 void blk_throtl_drain(struct request_queue *q)
1166 __releases(q->queue_lock) __acquires(q->queue_lock)
1168 struct throtl_data *td = q->td;
1169 struct throtl_service_queue *parent_sq = &td->service_queue;
1170 struct throtl_grp *tg;
1174 queue_lockdep_assert_held(q);
1176 while ((tg = throtl_rb_first(parent_sq))) {
1177 struct throtl_service_queue *sq = &tg->service_queue;
1179 throtl_dequeue_tg(tg);
1181 while ((bio = bio_list_peek(&sq->bio_lists[READ])))
1182 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1183 while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
1184 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1186 spin_unlock_irq(q->queue_lock);
1188 for (rw = READ; rw <= WRITE; rw++)
1189 while ((bio = bio_list_pop(&parent_sq->bio_lists[rw])))
1190 generic_make_request(bio);
1192 spin_lock_irq(q->queue_lock);
1195 int blk_throtl_init(struct request_queue *q)
1197 struct throtl_data *td;
1200 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1204 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1205 throtl_service_queue_init(&td->service_queue, NULL);
1210 /* activate policy */
1211 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1217 void blk_throtl_exit(struct request_queue *q)
1220 throtl_shutdown_wq(q);
1221 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1225 static int __init throtl_init(void)
1227 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1228 if (!kthrotld_workqueue)
1229 panic("Failed to create kthrotld\n");
1231 return blkcg_policy_register(&blkcg_policy_throtl);
1234 module_init(throtl_init);