2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10; /* 100 ms */
24 static struct blkcg_policy blkcg_policy_throtl;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct *kthrotld_workqueue;
29 struct throtl_service_queue {
30 struct rb_root pending_tree; /* RB tree of active tgs */
31 struct rb_node *first_pending; /* first node in the tree */
32 unsigned int nr_pending; /* # queued in the tree */
33 unsigned long first_pending_disptime; /* disptime of the first tg */
36 #define THROTL_SERVICE_QUEUE_INITIALIZER \
37 (struct throtl_service_queue){ .pending_tree = RB_ROOT }
40 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
43 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
45 /* Per-cpu group stats */
47 /* total bytes transferred */
48 struct blkg_rwstat service_bytes;
49 /* total IOs serviced, post merge */
50 struct blkg_rwstat serviced;
54 /* must be the first member */
55 struct blkg_policy_data pd;
57 /* active throtl group service_queue member */
58 struct rb_node rb_node;
60 /* throtl_data this group belongs to */
61 struct throtl_data *td;
64 * Dispatch time in jiffies. This is the estimated time when group
65 * will unthrottle and is ready to dispatch more bio. It is used as
66 * key to sort active groups in service tree.
68 unsigned long disptime;
72 /* Two lists for READ and WRITE */
73 struct bio_list bio_lists[2];
75 /* Number of queued bios on READ and WRITE lists */
76 unsigned int nr_queued[2];
78 /* bytes per second rate limits */
84 /* Number of bytes disptached in current slice */
85 uint64_t bytes_disp[2];
86 /* Number of bio's dispatched in current slice */
87 unsigned int io_disp[2];
89 /* When did we start a new slice */
90 unsigned long slice_start[2];
91 unsigned long slice_end[2];
93 /* Per cpu stats pointer */
94 struct tg_stats_cpu __percpu *stats_cpu;
96 /* List of tgs waiting for per cpu stats memory to be allocated */
97 struct list_head stats_alloc_node;
102 /* service tree for active throtl groups */
103 struct throtl_service_queue service_queue;
105 struct request_queue *queue;
107 /* Total Number of queued bios on READ and WRITE lists */
108 unsigned int nr_queued[2];
111 * number of total undestroyed groups
113 unsigned int nr_undestroyed_grps;
115 /* Work for dispatching throttled bios */
116 struct delayed_work dispatch_work;
119 /* list and work item to allocate percpu group stats */
120 static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121 static LIST_HEAD(tg_stats_alloc_list);
123 static void tg_stats_alloc_fn(struct work_struct *);
124 static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
126 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
131 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
136 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
138 return pd_to_blkg(&tg->pd);
141 static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
143 return blkg_to_tg(td->queue->root_blkg);
146 #define throtl_log_tg(tg, fmt, args...) do { \
149 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
150 blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \
153 #define throtl_log(td, fmt, args...) \
154 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
157 * Worker for allocating per cpu stat for tgs. This is scheduled on the
158 * system_wq once there are some groups on the alloc_list waiting for
161 static void tg_stats_alloc_fn(struct work_struct *work)
163 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
164 struct delayed_work *dwork = to_delayed_work(work);
169 stats_cpu = alloc_percpu(struct tg_stats_cpu);
171 /* allocation failed, try again after some time */
172 schedule_delayed_work(dwork, msecs_to_jiffies(10));
177 spin_lock_irq(&tg_stats_alloc_lock);
179 if (!list_empty(&tg_stats_alloc_list)) {
180 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
183 swap(tg->stats_cpu, stats_cpu);
184 list_del_init(&tg->stats_alloc_node);
187 empty = list_empty(&tg_stats_alloc_list);
188 spin_unlock_irq(&tg_stats_alloc_lock);
193 static void throtl_pd_init(struct blkcg_gq *blkg)
195 struct throtl_grp *tg = blkg_to_tg(blkg);
198 RB_CLEAR_NODE(&tg->rb_node);
199 tg->td = blkg->q->td;
200 bio_list_init(&tg->bio_lists[0]);
201 bio_list_init(&tg->bio_lists[1]);
206 tg->iops[WRITE] = -1;
209 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
210 * but percpu allocator can't be called from IO path. Queue tg on
211 * tg_stats_alloc_list and allocate from work item.
213 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
214 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
215 schedule_delayed_work(&tg_stats_alloc_work, 0);
216 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
219 static void throtl_pd_exit(struct blkcg_gq *blkg)
221 struct throtl_grp *tg = blkg_to_tg(blkg);
224 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
225 list_del_init(&tg->stats_alloc_node);
226 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
228 free_percpu(tg->stats_cpu);
231 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
233 struct throtl_grp *tg = blkg_to_tg(blkg);
236 if (tg->stats_cpu == NULL)
239 for_each_possible_cpu(cpu) {
240 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
242 blkg_rwstat_reset(&sc->service_bytes);
243 blkg_rwstat_reset(&sc->serviced);
247 static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
251 * This is the common case when there are no blkcgs. Avoid lookup
254 if (blkcg == &blkcg_root)
255 return td_root_tg(td);
257 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
260 static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
263 struct request_queue *q = td->queue;
264 struct throtl_grp *tg = NULL;
267 * This is the common case when there are no blkcgs. Avoid lookup
270 if (blkcg == &blkcg_root) {
273 struct blkcg_gq *blkg;
275 blkg = blkg_lookup_create(blkcg, q);
277 /* if %NULL and @q is alive, fall back to root_tg */
279 tg = blkg_to_tg(blkg);
280 else if (!blk_queue_dying(q))
287 static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq)
289 /* Service tree is empty */
293 if (!sq->first_pending)
294 sq->first_pending = rb_first(&sq->pending_tree);
296 if (sq->first_pending)
297 return rb_entry_tg(sq->first_pending);
302 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
308 static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq)
310 if (sq->first_pending == n)
311 sq->first_pending = NULL;
312 rb_erase_init(n, &sq->pending_tree);
316 static void update_min_dispatch_time(struct throtl_service_queue *sq)
318 struct throtl_grp *tg;
320 tg = throtl_rb_first(sq);
324 sq->first_pending_disptime = tg->disptime;
327 static void tg_service_queue_add(struct throtl_service_queue *sq,
328 struct throtl_grp *tg)
330 struct rb_node **node = &sq->pending_tree.rb_node;
331 struct rb_node *parent = NULL;
332 struct throtl_grp *__tg;
333 unsigned long key = tg->disptime;
336 while (*node != NULL) {
338 __tg = rb_entry_tg(parent);
340 if (time_before(key, __tg->disptime))
341 node = &parent->rb_left;
343 node = &parent->rb_right;
349 sq->first_pending = &tg->rb_node;
351 rb_link_node(&tg->rb_node, parent, node);
352 rb_insert_color(&tg->rb_node, &sq->pending_tree);
355 static void __throtl_enqueue_tg(struct throtl_service_queue *sq,
356 struct throtl_grp *tg)
358 tg_service_queue_add(sq, tg);
359 tg->flags |= THROTL_TG_PENDING;
363 static void throtl_enqueue_tg(struct throtl_service_queue *sq,
364 struct throtl_grp *tg)
366 if (!(tg->flags & THROTL_TG_PENDING))
367 __throtl_enqueue_tg(sq, tg);
370 static void __throtl_dequeue_tg(struct throtl_service_queue *sq,
371 struct throtl_grp *tg)
373 throtl_rb_erase(&tg->rb_node, sq);
374 tg->flags &= ~THROTL_TG_PENDING;
377 static void throtl_dequeue_tg(struct throtl_service_queue *sq,
378 struct throtl_grp *tg)
380 if (tg->flags & THROTL_TG_PENDING)
381 __throtl_dequeue_tg(sq, tg);
384 /* Call with queue lock held */
385 static void throtl_schedule_delayed_work(struct throtl_data *td,
388 struct delayed_work *dwork = &td->dispatch_work;
390 mod_delayed_work(kthrotld_workqueue, dwork, delay);
391 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
394 static void throtl_schedule_next_dispatch(struct throtl_data *td)
396 struct throtl_service_queue *sq = &td->service_queue;
398 /* any pending children left? */
402 update_min_dispatch_time(sq);
404 if (time_before_eq(sq->first_pending_disptime, jiffies))
405 throtl_schedule_delayed_work(td, 0);
407 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies);
410 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
412 tg->bytes_disp[rw] = 0;
414 tg->slice_start[rw] = jiffies;
415 tg->slice_end[rw] = jiffies + throtl_slice;
416 throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
417 rw == READ ? 'R' : 'W', tg->slice_start[rw],
418 tg->slice_end[rw], jiffies);
421 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
422 unsigned long jiffy_end)
424 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
427 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
428 unsigned long jiffy_end)
430 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
431 throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
432 rw == READ ? 'R' : 'W', tg->slice_start[rw],
433 tg->slice_end[rw], jiffies);
436 /* Determine if previously allocated or extended slice is complete or not */
437 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
439 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
445 /* Trim the used slices and adjust slice start accordingly */
446 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
448 unsigned long nr_slices, time_elapsed, io_trim;
451 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
454 * If bps are unlimited (-1), then time slice don't get
455 * renewed. Don't try to trim the slice if slice is used. A new
456 * slice will start when appropriate.
458 if (throtl_slice_used(tg, rw))
462 * A bio has been dispatched. Also adjust slice_end. It might happen
463 * that initially cgroup limit was very low resulting in high
464 * slice_end, but later limit was bumped up and bio was dispached
465 * sooner, then we need to reduce slice_end. A high bogus slice_end
466 * is bad because it does not allow new slice to start.
469 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
471 time_elapsed = jiffies - tg->slice_start[rw];
473 nr_slices = time_elapsed / throtl_slice;
477 tmp = tg->bps[rw] * throtl_slice * nr_slices;
481 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
483 if (!bytes_trim && !io_trim)
486 if (tg->bytes_disp[rw] >= bytes_trim)
487 tg->bytes_disp[rw] -= bytes_trim;
489 tg->bytes_disp[rw] = 0;
491 if (tg->io_disp[rw] >= io_trim)
492 tg->io_disp[rw] -= io_trim;
496 tg->slice_start[rw] += nr_slices * throtl_slice;
498 throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
499 " start=%lu end=%lu jiffies=%lu",
500 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
501 tg->slice_start[rw], tg->slice_end[rw], jiffies);
504 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
507 bool rw = bio_data_dir(bio);
508 unsigned int io_allowed;
509 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
512 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
514 /* Slice has just started. Consider one slice interval */
516 jiffy_elapsed_rnd = throtl_slice;
518 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
521 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
522 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
523 * will allow dispatch after 1 second and after that slice should
527 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
531 io_allowed = UINT_MAX;
535 if (tg->io_disp[rw] + 1 <= io_allowed) {
541 /* Calc approx time to dispatch */
542 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
544 if (jiffy_wait > jiffy_elapsed)
545 jiffy_wait = jiffy_wait - jiffy_elapsed;
554 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
557 bool rw = bio_data_dir(bio);
558 u64 bytes_allowed, extra_bytes, tmp;
559 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
561 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
563 /* Slice has just started. Consider one slice interval */
565 jiffy_elapsed_rnd = throtl_slice;
567 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
569 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
573 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
579 /* Calc approx time to dispatch */
580 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
581 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
587 * This wait time is without taking into consideration the rounding
588 * up we did. Add that time also.
590 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
596 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
597 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
603 * Returns whether one can dispatch a bio or not. Also returns approx number
604 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
606 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
609 bool rw = bio_data_dir(bio);
610 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
613 * Currently whole state machine of group depends on first bio
614 * queued in the group bio list. So one should not be calling
615 * this function with a different bio if there are other bios
618 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
620 /* If tg->bps = -1, then BW is unlimited */
621 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
628 * If previous slice expired, start a new one otherwise renew/extend
629 * existing slice to make sure it is at least throtl_slice interval
632 if (throtl_slice_used(tg, rw))
633 throtl_start_new_slice(tg, rw);
635 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
636 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
639 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
640 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
646 max_wait = max(bps_wait, iops_wait);
651 if (time_before(tg->slice_end[rw], jiffies + max_wait))
652 throtl_extend_slice(tg, rw, jiffies + max_wait);
657 static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
660 struct throtl_grp *tg = blkg_to_tg(blkg);
661 struct tg_stats_cpu *stats_cpu;
664 /* If per cpu stats are not allocated yet, don't do any accounting. */
665 if (tg->stats_cpu == NULL)
669 * Disabling interrupts to provide mutual exclusion between two
670 * writes on same cpu. It probably is not needed for 64bit. Not
671 * optimizing that case yet.
673 local_irq_save(flags);
675 stats_cpu = this_cpu_ptr(tg->stats_cpu);
677 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
678 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
680 local_irq_restore(flags);
683 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
685 bool rw = bio_data_dir(bio);
687 /* Charge the bio to the group */
688 tg->bytes_disp[rw] += bio->bi_size;
691 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
694 static void throtl_add_bio_tg(struct throtl_service_queue *sq,
695 struct throtl_grp *tg, struct bio *bio)
697 bool rw = bio_data_dir(bio);
699 bio_list_add(&tg->bio_lists[rw], bio);
700 /* Take a bio reference on tg */
701 blkg_get(tg_to_blkg(tg));
703 tg->td->nr_queued[rw]++;
704 throtl_enqueue_tg(sq, tg);
707 static void tg_update_disptime(struct throtl_service_queue *sq,
708 struct throtl_grp *tg)
710 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
713 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
714 tg_may_dispatch(tg, bio, &read_wait);
716 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
717 tg_may_dispatch(tg, bio, &write_wait);
719 min_wait = min(read_wait, write_wait);
720 disptime = jiffies + min_wait;
722 /* Update dispatch time */
723 throtl_dequeue_tg(sq, tg);
724 tg->disptime = disptime;
725 throtl_enqueue_tg(sq, tg);
728 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
733 bio = bio_list_pop(&tg->bio_lists[rw]);
735 /* Drop bio reference on blkg */
736 blkg_put(tg_to_blkg(tg));
738 BUG_ON(tg->td->nr_queued[rw] <= 0);
739 tg->td->nr_queued[rw]--;
741 throtl_charge_bio(tg, bio);
742 bio_list_add(bl, bio);
743 bio->bi_rw |= REQ_THROTTLED;
745 throtl_trim_slice(tg, rw);
748 static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
750 unsigned int nr_reads = 0, nr_writes = 0;
751 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
752 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
755 /* Try to dispatch 75% READS and 25% WRITES */
757 while ((bio = bio_list_peek(&tg->bio_lists[READ])) &&
758 tg_may_dispatch(tg, bio, NULL)) {
760 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
763 if (nr_reads >= max_nr_reads)
767 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) &&
768 tg_may_dispatch(tg, bio, NULL)) {
770 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
773 if (nr_writes >= max_nr_writes)
777 return nr_reads + nr_writes;
780 static int throtl_select_dispatch(struct throtl_service_queue *sq,
783 unsigned int nr_disp = 0;
784 struct throtl_grp *tg;
787 tg = throtl_rb_first(sq);
792 if (time_before(jiffies, tg->disptime))
795 throtl_dequeue_tg(sq, tg);
797 nr_disp += throtl_dispatch_tg(tg, bl);
799 if (tg->nr_queued[0] || tg->nr_queued[1])
800 tg_update_disptime(sq, tg);
802 if (nr_disp >= throtl_quantum)
809 /* work function to dispatch throttled bios */
810 void blk_throtl_dispatch_work_fn(struct work_struct *work)
812 struct throtl_data *td = container_of(to_delayed_work(work),
813 struct throtl_data, dispatch_work);
814 struct request_queue *q = td->queue;
815 unsigned int nr_disp = 0;
816 struct bio_list bio_list_on_stack;
818 struct blk_plug plug;
820 spin_lock_irq(q->queue_lock);
822 bio_list_init(&bio_list_on_stack);
824 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
825 td->nr_queued[READ] + td->nr_queued[WRITE],
826 td->nr_queued[READ], td->nr_queued[WRITE]);
828 nr_disp = throtl_select_dispatch(&td->service_queue, &bio_list_on_stack);
831 throtl_log(td, "bios disp=%u", nr_disp);
833 throtl_schedule_next_dispatch(td);
835 spin_unlock_irq(q->queue_lock);
838 * If we dispatched some requests, unplug the queue to make sure
842 blk_start_plug(&plug);
843 while((bio = bio_list_pop(&bio_list_on_stack)))
844 generic_make_request(bio);
845 blk_finish_plug(&plug);
849 static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
850 struct blkg_policy_data *pd, int off)
852 struct throtl_grp *tg = pd_to_tg(pd);
853 struct blkg_rwstat rwstat = { }, tmp;
856 for_each_possible_cpu(cpu) {
857 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
859 tmp = blkg_rwstat_read((void *)sc + off);
860 for (i = 0; i < BLKG_RWSTAT_NR; i++)
861 rwstat.cnt[i] += tmp.cnt[i];
864 return __blkg_prfill_rwstat(sf, pd, &rwstat);
867 static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
870 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
872 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
877 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
880 struct throtl_grp *tg = pd_to_tg(pd);
881 u64 v = *(u64 *)((void *)tg + off);
885 return __blkg_prfill_u64(sf, pd, v);
888 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
891 struct throtl_grp *tg = pd_to_tg(pd);
892 unsigned int v = *(unsigned int *)((void *)tg + off);
896 return __blkg_prfill_u64(sf, pd, v);
899 static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
902 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
903 &blkcg_policy_throtl, cft->private, false);
907 static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
910 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
911 &blkcg_policy_throtl, cft->private, false);
915 static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
918 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
919 struct blkg_conf_ctx ctx;
920 struct throtl_grp *tg;
921 struct throtl_data *td;
924 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
928 tg = blkg_to_tg(ctx.blkg);
929 td = ctx.blkg->q->td;
935 *(u64 *)((void *)tg + cft->private) = ctx.v;
937 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
939 throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
940 tg->bps[READ], tg->bps[WRITE],
941 tg->iops[READ], tg->iops[WRITE]);
944 * We're already holding queue_lock and know @tg is valid. Let's
945 * apply the new config directly.
947 * Restart the slices for both READ and WRITES. It might happen
948 * that a group's limit are dropped suddenly and we don't want to
949 * account recently dispatched IO with new low rate.
951 throtl_start_new_slice(tg, 0);
952 throtl_start_new_slice(tg, 1);
954 if (tg->flags & THROTL_TG_PENDING) {
955 tg_update_disptime(&td->service_queue, tg);
956 throtl_schedule_next_dispatch(td);
959 blkg_conf_finish(&ctx);
963 static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
966 return tg_set_conf(cgrp, cft, buf, true);
969 static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
972 return tg_set_conf(cgrp, cft, buf, false);
975 static struct cftype throtl_files[] = {
977 .name = "throttle.read_bps_device",
978 .private = offsetof(struct throtl_grp, bps[READ]),
979 .read_seq_string = tg_print_conf_u64,
980 .write_string = tg_set_conf_u64,
981 .max_write_len = 256,
984 .name = "throttle.write_bps_device",
985 .private = offsetof(struct throtl_grp, bps[WRITE]),
986 .read_seq_string = tg_print_conf_u64,
987 .write_string = tg_set_conf_u64,
988 .max_write_len = 256,
991 .name = "throttle.read_iops_device",
992 .private = offsetof(struct throtl_grp, iops[READ]),
993 .read_seq_string = tg_print_conf_uint,
994 .write_string = tg_set_conf_uint,
995 .max_write_len = 256,
998 .name = "throttle.write_iops_device",
999 .private = offsetof(struct throtl_grp, iops[WRITE]),
1000 .read_seq_string = tg_print_conf_uint,
1001 .write_string = tg_set_conf_uint,
1002 .max_write_len = 256,
1005 .name = "throttle.io_service_bytes",
1006 .private = offsetof(struct tg_stats_cpu, service_bytes),
1007 .read_seq_string = tg_print_cpu_rwstat,
1010 .name = "throttle.io_serviced",
1011 .private = offsetof(struct tg_stats_cpu, serviced),
1012 .read_seq_string = tg_print_cpu_rwstat,
1017 static void throtl_shutdown_wq(struct request_queue *q)
1019 struct throtl_data *td = q->td;
1021 cancel_delayed_work_sync(&td->dispatch_work);
1024 static struct blkcg_policy blkcg_policy_throtl = {
1025 .pd_size = sizeof(struct throtl_grp),
1026 .cftypes = throtl_files,
1028 .pd_init_fn = throtl_pd_init,
1029 .pd_exit_fn = throtl_pd_exit,
1030 .pd_reset_stats_fn = throtl_pd_reset_stats,
1033 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1035 struct throtl_data *td = q->td;
1036 struct throtl_grp *tg;
1037 bool rw = bio_data_dir(bio), update_disptime = true;
1038 struct blkcg *blkcg;
1039 bool throttled = false;
1041 if (bio->bi_rw & REQ_THROTTLED) {
1042 bio->bi_rw &= ~REQ_THROTTLED;
1047 * A throtl_grp pointer retrieved under rcu can be used to access
1048 * basic fields like stats and io rates. If a group has no rules,
1049 * just update the dispatch stats in lockless manner and return.
1052 blkcg = bio_blkcg(bio);
1053 tg = throtl_lookup_tg(td, blkcg);
1055 if (tg_no_rule_group(tg, rw)) {
1056 throtl_update_dispatch_stats(tg_to_blkg(tg),
1057 bio->bi_size, bio->bi_rw);
1058 goto out_unlock_rcu;
1063 * Either group has not been allocated yet or it is not an unlimited
1066 spin_lock_irq(q->queue_lock);
1067 tg = throtl_lookup_create_tg(td, blkcg);
1071 if (tg->nr_queued[rw]) {
1073 * There is already another bio queued in same dir. No
1074 * need to update dispatch time.
1076 update_disptime = false;
1081 /* Bio is with-in rate limit of group */
1082 if (tg_may_dispatch(tg, bio, NULL)) {
1083 throtl_charge_bio(tg, bio);
1086 * We need to trim slice even when bios are not being queued
1087 * otherwise it might happen that a bio is not queued for
1088 * a long time and slice keeps on extending and trim is not
1089 * called for a long time. Now if limits are reduced suddenly
1090 * we take into account all the IO dispatched so far at new
1091 * low rate and * newly queued IO gets a really long dispatch
1094 * So keep on trimming slice even if bio is not queued.
1096 throtl_trim_slice(tg, rw);
1101 throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1102 " iodisp=%u iops=%u queued=%d/%d",
1103 rw == READ ? 'R' : 'W',
1104 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1105 tg->io_disp[rw], tg->iops[rw],
1106 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1108 bio_associate_current(bio);
1109 throtl_add_bio_tg(&q->td->service_queue, tg, bio);
1112 if (update_disptime) {
1113 tg_update_disptime(&td->service_queue, tg);
1114 throtl_schedule_next_dispatch(td);
1118 spin_unlock_irq(q->queue_lock);
1126 * blk_throtl_drain - drain throttled bios
1127 * @q: request_queue to drain throttled bios for
1129 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1131 void blk_throtl_drain(struct request_queue *q)
1132 __releases(q->queue_lock) __acquires(q->queue_lock)
1134 struct throtl_data *td = q->td;
1135 struct throtl_service_queue *sq = &td->service_queue;
1136 struct throtl_grp *tg;
1140 queue_lockdep_assert_held(q);
1144 while ((tg = throtl_rb_first(sq))) {
1145 throtl_dequeue_tg(sq, tg);
1147 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1148 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1149 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1150 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1152 spin_unlock_irq(q->queue_lock);
1154 while ((bio = bio_list_pop(&bl)))
1155 generic_make_request(bio);
1157 spin_lock_irq(q->queue_lock);
1160 int blk_throtl_init(struct request_queue *q)
1162 struct throtl_data *td;
1165 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1169 td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER;
1170 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1175 /* activate policy */
1176 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1182 void blk_throtl_exit(struct request_queue *q)
1185 throtl_shutdown_wq(q);
1186 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1190 static int __init throtl_init(void)
1192 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1193 if (!kthrotld_workqueue)
1194 panic("Failed to create kthrotld\n");
1196 return blkcg_policy_register(&blkcg_policy_throtl);
1199 module_init(throtl_init);