2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
13 int sched_rr_timeslice = RR_TIMESLICE;
15 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
17 struct rt_bandwidth def_rt_bandwidth;
19 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
21 struct rt_bandwidth *rt_b =
22 container_of(timer, struct rt_bandwidth, rt_period_timer);
26 raw_spin_lock(&rt_b->rt_runtime_lock);
28 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
32 raw_spin_unlock(&rt_b->rt_runtime_lock);
33 idle = do_sched_rt_period_timer(rt_b, overrun);
34 raw_spin_lock(&rt_b->rt_runtime_lock);
37 rt_b->rt_period_active = 0;
38 raw_spin_unlock(&rt_b->rt_runtime_lock);
40 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
43 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
45 rt_b->rt_period = ns_to_ktime(period);
46 rt_b->rt_runtime = runtime;
48 raw_spin_lock_init(&rt_b->rt_runtime_lock);
50 hrtimer_init(&rt_b->rt_period_timer,
51 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
52 rt_b->rt_period_timer.function = sched_rt_period_timer;
55 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
57 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
60 raw_spin_lock(&rt_b->rt_runtime_lock);
61 if (!rt_b->rt_period_active) {
62 rt_b->rt_period_active = 1;
63 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
64 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
66 raw_spin_unlock(&rt_b->rt_runtime_lock);
69 #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
70 static void push_irq_work_func(struct irq_work *work);
73 void init_rt_rq(struct rt_rq *rt_rq)
75 struct rt_prio_array *array;
78 array = &rt_rq->active;
79 for (i = 0; i < MAX_RT_PRIO; i++) {
80 INIT_LIST_HEAD(array->queue + i);
81 __clear_bit(i, array->bitmap);
83 /* delimiter for bitsearch: */
84 __set_bit(MAX_RT_PRIO, array->bitmap);
86 #if defined CONFIG_SMP
87 rt_rq->highest_prio.curr = MAX_RT_PRIO;
88 rt_rq->highest_prio.next = MAX_RT_PRIO;
89 rt_rq->rt_nr_migratory = 0;
90 rt_rq->overloaded = 0;
91 plist_head_init(&rt_rq->pushable_tasks);
93 #ifdef HAVE_RT_PUSH_IPI
94 rt_rq->push_flags = 0;
95 rt_rq->push_cpu = nr_cpu_ids;
96 raw_spin_lock_init(&rt_rq->push_lock);
97 init_irq_work(&rt_rq->push_work, push_irq_work_func);
99 #endif /* CONFIG_SMP */
100 /* We start is dequeued state, because no RT tasks are queued */
101 rt_rq->rt_queued = 0;
104 rt_rq->rt_throttled = 0;
105 rt_rq->rt_runtime = 0;
106 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
109 #ifdef CONFIG_RT_GROUP_SCHED
110 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
112 hrtimer_cancel(&rt_b->rt_period_timer);
115 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
117 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
119 #ifdef CONFIG_SCHED_DEBUG
120 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
122 return container_of(rt_se, struct task_struct, rt);
125 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
130 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
135 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
137 struct rt_rq *rt_rq = rt_se->rt_rq;
142 void free_rt_sched_group(struct task_group *tg)
147 destroy_rt_bandwidth(&tg->rt_bandwidth);
149 for_each_possible_cpu(i) {
160 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
161 struct sched_rt_entity *rt_se, int cpu,
162 struct sched_rt_entity *parent)
164 struct rq *rq = cpu_rq(cpu);
166 rt_rq->highest_prio.curr = MAX_RT_PRIO;
167 rt_rq->rt_nr_boosted = 0;
171 tg->rt_rq[cpu] = rt_rq;
172 tg->rt_se[cpu] = rt_se;
178 rt_se->rt_rq = &rq->rt;
180 rt_se->rt_rq = parent->my_q;
183 rt_se->parent = parent;
184 INIT_LIST_HEAD(&rt_se->run_list);
187 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
190 struct sched_rt_entity *rt_se;
193 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
196 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
200 init_rt_bandwidth(&tg->rt_bandwidth,
201 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
203 for_each_possible_cpu(i) {
204 rt_rq = kzalloc_node(sizeof(struct rt_rq),
205 GFP_KERNEL, cpu_to_node(i));
209 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
210 GFP_KERNEL, cpu_to_node(i));
215 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
216 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
227 #else /* CONFIG_RT_GROUP_SCHED */
229 #define rt_entity_is_task(rt_se) (1)
231 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
233 return container_of(rt_se, struct task_struct, rt);
236 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
238 return container_of(rt_rq, struct rq, rt);
241 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
243 struct task_struct *p = rt_task_of(rt_se);
248 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
250 struct rq *rq = rq_of_rt_se(rt_se);
255 void free_rt_sched_group(struct task_group *tg) { }
257 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
261 #endif /* CONFIG_RT_GROUP_SCHED */
265 static void pull_rt_task(struct rq *this_rq);
267 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
269 /* Try to pull RT tasks here if we lower this rq's prio */
270 return rq->rt.highest_prio.curr > prev->prio;
273 static inline int rt_overloaded(struct rq *rq)
275 return atomic_read(&rq->rd->rto_count);
278 static inline void rt_set_overload(struct rq *rq)
283 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
285 * Make sure the mask is visible before we set
286 * the overload count. That is checked to determine
287 * if we should look at the mask. It would be a shame
288 * if we looked at the mask, but the mask was not
291 * Matched by the barrier in pull_rt_task().
294 atomic_inc(&rq->rd->rto_count);
297 static inline void rt_clear_overload(struct rq *rq)
302 /* the order here really doesn't matter */
303 atomic_dec(&rq->rd->rto_count);
304 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
307 static void update_rt_migration(struct rt_rq *rt_rq)
309 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
310 if (!rt_rq->overloaded) {
311 rt_set_overload(rq_of_rt_rq(rt_rq));
312 rt_rq->overloaded = 1;
314 } else if (rt_rq->overloaded) {
315 rt_clear_overload(rq_of_rt_rq(rt_rq));
316 rt_rq->overloaded = 0;
320 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
322 struct task_struct *p;
324 if (!rt_entity_is_task(rt_se))
327 p = rt_task_of(rt_se);
328 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
330 rt_rq->rt_nr_total++;
331 if (p->nr_cpus_allowed > 1)
332 rt_rq->rt_nr_migratory++;
334 update_rt_migration(rt_rq);
337 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
339 struct task_struct *p;
341 if (!rt_entity_is_task(rt_se))
344 p = rt_task_of(rt_se);
345 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
347 rt_rq->rt_nr_total--;
348 if (p->nr_cpus_allowed > 1)
349 rt_rq->rt_nr_migratory--;
351 update_rt_migration(rt_rq);
354 static inline int has_pushable_tasks(struct rq *rq)
356 return !plist_head_empty(&rq->rt.pushable_tasks);
359 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
360 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
362 static void push_rt_tasks(struct rq *);
363 static void pull_rt_task(struct rq *);
365 static inline void queue_push_tasks(struct rq *rq)
367 if (!has_pushable_tasks(rq))
370 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
373 static inline void queue_pull_task(struct rq *rq)
375 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
378 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
380 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
381 plist_node_init(&p->pushable_tasks, p->prio);
382 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
384 /* Update the highest prio pushable task */
385 if (p->prio < rq->rt.highest_prio.next)
386 rq->rt.highest_prio.next = p->prio;
389 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
391 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
393 /* Update the new highest prio pushable task */
394 if (has_pushable_tasks(rq)) {
395 p = plist_first_entry(&rq->rt.pushable_tasks,
396 struct task_struct, pushable_tasks);
397 rq->rt.highest_prio.next = p->prio;
399 rq->rt.highest_prio.next = MAX_RT_PRIO;
404 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
408 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
413 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
418 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
422 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
427 static inline void pull_rt_task(struct rq *this_rq)
431 static inline void queue_push_tasks(struct rq *rq)
434 #endif /* CONFIG_SMP */
436 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
437 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
439 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
441 return !list_empty(&rt_se->run_list);
444 #ifdef CONFIG_RT_GROUP_SCHED
446 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
451 return rt_rq->rt_runtime;
454 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
456 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
459 typedef struct task_group *rt_rq_iter_t;
461 static inline struct task_group *next_task_group(struct task_group *tg)
464 tg = list_entry_rcu(tg->list.next,
465 typeof(struct task_group), list);
466 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
468 if (&tg->list == &task_groups)
474 #define for_each_rt_rq(rt_rq, iter, rq) \
475 for (iter = container_of(&task_groups, typeof(*iter), list); \
476 (iter = next_task_group(iter)) && \
477 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
479 #define for_each_sched_rt_entity(rt_se) \
480 for (; rt_se; rt_se = rt_se->parent)
482 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
487 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
488 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
490 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
492 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
493 struct rq *rq = rq_of_rt_rq(rt_rq);
494 struct sched_rt_entity *rt_se;
496 int cpu = cpu_of(rq);
498 rt_se = rt_rq->tg->rt_se[cpu];
500 if (rt_rq->rt_nr_running) {
502 enqueue_top_rt_rq(rt_rq);
503 else if (!on_rt_rq(rt_se))
504 enqueue_rt_entity(rt_se, false);
506 if (rt_rq->highest_prio.curr < curr->prio)
511 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
513 struct sched_rt_entity *rt_se;
514 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
516 rt_se = rt_rq->tg->rt_se[cpu];
519 dequeue_top_rt_rq(rt_rq);
520 else if (on_rt_rq(rt_se))
521 dequeue_rt_entity(rt_se);
524 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
526 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
529 static int rt_se_boosted(struct sched_rt_entity *rt_se)
531 struct rt_rq *rt_rq = group_rt_rq(rt_se);
532 struct task_struct *p;
535 return !!rt_rq->rt_nr_boosted;
537 p = rt_task_of(rt_se);
538 return p->prio != p->normal_prio;
542 static inline const struct cpumask *sched_rt_period_mask(void)
544 return this_rq()->rd->span;
547 static inline const struct cpumask *sched_rt_period_mask(void)
549 return cpu_online_mask;
554 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
556 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
559 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
561 return &rt_rq->tg->rt_bandwidth;
564 #else /* !CONFIG_RT_GROUP_SCHED */
566 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
568 return rt_rq->rt_runtime;
571 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
573 return ktime_to_ns(def_rt_bandwidth.rt_period);
576 typedef struct rt_rq *rt_rq_iter_t;
578 #define for_each_rt_rq(rt_rq, iter, rq) \
579 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
581 #define for_each_sched_rt_entity(rt_se) \
582 for (; rt_se; rt_se = NULL)
584 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
589 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
591 struct rq *rq = rq_of_rt_rq(rt_rq);
593 if (!rt_rq->rt_nr_running)
596 enqueue_top_rt_rq(rt_rq);
600 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
602 dequeue_top_rt_rq(rt_rq);
605 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
607 return rt_rq->rt_throttled;
610 static inline const struct cpumask *sched_rt_period_mask(void)
612 return cpu_online_mask;
616 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
618 return &cpu_rq(cpu)->rt;
621 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
623 return &def_rt_bandwidth;
626 #endif /* CONFIG_RT_GROUP_SCHED */
628 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
630 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632 return (hrtimer_active(&rt_b->rt_period_timer) ||
633 rt_rq->rt_time < rt_b->rt_runtime);
638 * We ran out of runtime, see if we can borrow some from our neighbours.
640 static void do_balance_runtime(struct rt_rq *rt_rq)
642 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
643 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
647 weight = cpumask_weight(rd->span);
649 raw_spin_lock(&rt_b->rt_runtime_lock);
650 rt_period = ktime_to_ns(rt_b->rt_period);
651 for_each_cpu(i, rd->span) {
652 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
658 raw_spin_lock(&iter->rt_runtime_lock);
660 * Either all rqs have inf runtime and there's nothing to steal
661 * or __disable_runtime() below sets a specific rq to inf to
662 * indicate its been disabled and disalow stealing.
664 if (iter->rt_runtime == RUNTIME_INF)
668 * From runqueues with spare time, take 1/n part of their
669 * spare time, but no more than our period.
671 diff = iter->rt_runtime - iter->rt_time;
673 diff = div_u64((u64)diff, weight);
674 if (rt_rq->rt_runtime + diff > rt_period)
675 diff = rt_period - rt_rq->rt_runtime;
676 iter->rt_runtime -= diff;
677 rt_rq->rt_runtime += diff;
678 if (rt_rq->rt_runtime == rt_period) {
679 raw_spin_unlock(&iter->rt_runtime_lock);
684 raw_spin_unlock(&iter->rt_runtime_lock);
686 raw_spin_unlock(&rt_b->rt_runtime_lock);
690 * Ensure this RQ takes back all the runtime it lend to its neighbours.
692 static void __disable_runtime(struct rq *rq)
694 struct root_domain *rd = rq->rd;
698 if (unlikely(!scheduler_running))
701 for_each_rt_rq(rt_rq, iter, rq) {
702 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
706 raw_spin_lock(&rt_b->rt_runtime_lock);
707 raw_spin_lock(&rt_rq->rt_runtime_lock);
709 * Either we're all inf and nobody needs to borrow, or we're
710 * already disabled and thus have nothing to do, or we have
711 * exactly the right amount of runtime to take out.
713 if (rt_rq->rt_runtime == RUNTIME_INF ||
714 rt_rq->rt_runtime == rt_b->rt_runtime)
716 raw_spin_unlock(&rt_rq->rt_runtime_lock);
719 * Calculate the difference between what we started out with
720 * and what we current have, that's the amount of runtime
721 * we lend and now have to reclaim.
723 want = rt_b->rt_runtime - rt_rq->rt_runtime;
726 * Greedy reclaim, take back as much as we can.
728 for_each_cpu(i, rd->span) {
729 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
733 * Can't reclaim from ourselves or disabled runqueues.
735 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
738 raw_spin_lock(&iter->rt_runtime_lock);
740 diff = min_t(s64, iter->rt_runtime, want);
741 iter->rt_runtime -= diff;
744 iter->rt_runtime -= want;
747 raw_spin_unlock(&iter->rt_runtime_lock);
753 raw_spin_lock(&rt_rq->rt_runtime_lock);
755 * We cannot be left wanting - that would mean some runtime
756 * leaked out of the system.
761 * Disable all the borrow logic by pretending we have inf
762 * runtime - in which case borrowing doesn't make sense.
764 rt_rq->rt_runtime = RUNTIME_INF;
765 rt_rq->rt_throttled = 0;
766 raw_spin_unlock(&rt_rq->rt_runtime_lock);
767 raw_spin_unlock(&rt_b->rt_runtime_lock);
769 /* Make rt_rq available for pick_next_task() */
770 sched_rt_rq_enqueue(rt_rq);
774 static void __enable_runtime(struct rq *rq)
779 if (unlikely(!scheduler_running))
783 * Reset each runqueue's bandwidth settings
785 for_each_rt_rq(rt_rq, iter, rq) {
786 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
788 raw_spin_lock(&rt_b->rt_runtime_lock);
789 raw_spin_lock(&rt_rq->rt_runtime_lock);
790 rt_rq->rt_runtime = rt_b->rt_runtime;
792 rt_rq->rt_throttled = 0;
793 raw_spin_unlock(&rt_rq->rt_runtime_lock);
794 raw_spin_unlock(&rt_b->rt_runtime_lock);
798 static void balance_runtime(struct rt_rq *rt_rq)
800 if (!sched_feat(RT_RUNTIME_SHARE))
803 if (rt_rq->rt_time > rt_rq->rt_runtime) {
804 raw_spin_unlock(&rt_rq->rt_runtime_lock);
805 do_balance_runtime(rt_rq);
806 raw_spin_lock(&rt_rq->rt_runtime_lock);
809 #else /* !CONFIG_SMP */
810 static inline void balance_runtime(struct rt_rq *rt_rq) {}
811 #endif /* CONFIG_SMP */
813 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
815 int i, idle = 1, throttled = 0;
816 const struct cpumask *span;
818 span = sched_rt_period_mask();
819 #ifdef CONFIG_RT_GROUP_SCHED
821 * FIXME: isolated CPUs should really leave the root task group,
822 * whether they are isolcpus or were isolated via cpusets, lest
823 * the timer run on a CPU which does not service all runqueues,
824 * potentially leaving other CPUs indefinitely throttled. If
825 * isolation is really required, the user will turn the throttle
826 * off to kill the perturbations it causes anyway. Meanwhile,
827 * this maintains functionality for boot and/or troubleshooting.
829 if (rt_b == &root_task_group.rt_bandwidth)
830 span = cpu_online_mask;
832 for_each_cpu(i, span) {
834 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
835 struct rq *rq = rq_of_rt_rq(rt_rq);
837 raw_spin_lock(&rq->lock);
838 if (rt_rq->rt_time) {
841 raw_spin_lock(&rt_rq->rt_runtime_lock);
842 if (rt_rq->rt_throttled)
843 balance_runtime(rt_rq);
844 runtime = rt_rq->rt_runtime;
845 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
846 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
847 rt_rq->rt_throttled = 0;
851 * When we're idle and a woken (rt) task is
852 * throttled check_preempt_curr() will set
853 * skip_update and the time between the wakeup
854 * and this unthrottle will get accounted as
857 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
858 rq_clock_skip_update(rq, false);
860 if (rt_rq->rt_time || rt_rq->rt_nr_running)
862 raw_spin_unlock(&rt_rq->rt_runtime_lock);
863 } else if (rt_rq->rt_nr_running) {
865 if (!rt_rq_throttled(rt_rq))
868 if (rt_rq->rt_throttled)
872 sched_rt_rq_enqueue(rt_rq);
873 raw_spin_unlock(&rq->lock);
876 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
882 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
884 #ifdef CONFIG_RT_GROUP_SCHED
885 struct rt_rq *rt_rq = group_rt_rq(rt_se);
888 return rt_rq->highest_prio.curr;
891 return rt_task_of(rt_se)->prio;
894 static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
896 struct rt_prio_array *array = &rt_rq->active;
897 struct sched_rt_entity *rt_se;
900 char *end = buf + sizeof(buf);
903 pos += snprintf(pos, sizeof(buf),
904 "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
905 rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
907 if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
910 pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
911 idx = sched_find_first_bit(array->bitmap);
912 while (idx < MAX_RT_PRIO) {
913 list_for_each_entry(rt_se, array->queue + idx, run_list) {
914 struct task_struct *p;
916 if (!rt_entity_is_task(rt_se))
919 p = rt_task_of(rt_se);
921 pos += snprintf(pos, end - pos, "\t%s (%d)\n",
924 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
927 #ifdef CONFIG_PANIC_ON_RT_THROTTLING
929 * Use pr_err() in the BUG() case since printk_sched() will
930 * not get flushed and deadlock is not a concern.
935 printk_deferred("%s", buf);
939 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
941 u64 runtime = sched_rt_runtime(rt_rq);
943 if (rt_rq->rt_throttled)
944 return rt_rq_throttled(rt_rq);
946 if (runtime >= sched_rt_period(rt_rq))
949 balance_runtime(rt_rq);
950 runtime = sched_rt_runtime(rt_rq);
951 if (runtime == RUNTIME_INF)
954 if (rt_rq->rt_time > runtime) {
955 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
958 * Don't actually throttle groups that have no runtime assigned
959 * but accrue some time due to boosting.
961 if (likely(rt_b->rt_runtime)) {
962 static bool once = false;
964 rt_rq->rt_throttled = 1;
968 dump_throttled_rt_tasks(rt_rq);
972 * In case we did anyway, make it go away,
973 * replenishment is a joke, since it will replenish us
979 if (rt_rq_throttled(rt_rq)) {
980 sched_rt_rq_dequeue(rt_rq);
989 * Update the current task's runtime statistics. Skip current tasks that
990 * are not in our scheduling class.
992 static void update_curr_rt(struct rq *rq)
994 struct task_struct *curr = rq->curr;
995 struct sched_rt_entity *rt_se = &curr->rt;
998 if (curr->sched_class != &rt_sched_class)
1001 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1002 if (unlikely((s64)delta_exec <= 0))
1005 schedstat_set(curr->se.statistics.exec_max,
1006 max(curr->se.statistics.exec_max, delta_exec));
1008 curr->se.sum_exec_runtime += delta_exec;
1009 account_group_exec_runtime(curr, delta_exec);
1011 curr->se.exec_start = rq_clock_task(rq);
1012 cpuacct_charge(curr, delta_exec);
1014 sched_rt_avg_update(rq, delta_exec);
1016 if (!rt_bandwidth_enabled())
1019 for_each_sched_rt_entity(rt_se) {
1020 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1022 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1023 raw_spin_lock(&rt_rq->rt_runtime_lock);
1024 rt_rq->rt_time += delta_exec;
1025 if (sched_rt_runtime_exceeded(rt_rq))
1027 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1033 dequeue_top_rt_rq(struct rt_rq *rt_rq)
1035 struct rq *rq = rq_of_rt_rq(rt_rq);
1037 BUG_ON(&rq->rt != rt_rq);
1039 if (!rt_rq->rt_queued)
1042 BUG_ON(!rq->nr_running);
1044 sub_nr_running(rq, rt_rq->rt_nr_running);
1045 rt_rq->rt_queued = 0;
1049 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1051 struct rq *rq = rq_of_rt_rq(rt_rq);
1053 BUG_ON(&rq->rt != rt_rq);
1055 if (rt_rq->rt_queued)
1057 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1060 add_nr_running(rq, rt_rq->rt_nr_running);
1061 rt_rq->rt_queued = 1;
1064 #if defined CONFIG_SMP
1067 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1069 struct rq *rq = rq_of_rt_rq(rt_rq);
1071 #ifdef CONFIG_RT_GROUP_SCHED
1073 * Change rq's cpupri only if rt_rq is the top queue.
1075 if (&rq->rt != rt_rq)
1078 if (rq->online && prio < prev_prio)
1079 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1083 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1085 struct rq *rq = rq_of_rt_rq(rt_rq);
1087 #ifdef CONFIG_RT_GROUP_SCHED
1089 * Change rq's cpupri only if rt_rq is the top queue.
1091 if (&rq->rt != rt_rq)
1094 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1095 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1098 #else /* CONFIG_SMP */
1101 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1103 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1105 #endif /* CONFIG_SMP */
1107 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1109 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1111 int prev_prio = rt_rq->highest_prio.curr;
1113 if (prio < prev_prio)
1114 rt_rq->highest_prio.curr = prio;
1116 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1120 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1122 int prev_prio = rt_rq->highest_prio.curr;
1124 if (rt_rq->rt_nr_running) {
1126 WARN_ON(prio < prev_prio);
1129 * This may have been our highest task, and therefore
1130 * we may have some recomputation to do
1132 if (prio == prev_prio) {
1133 struct rt_prio_array *array = &rt_rq->active;
1135 rt_rq->highest_prio.curr =
1136 sched_find_first_bit(array->bitmap);
1140 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1142 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1147 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1148 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1150 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1152 #ifdef CONFIG_RT_GROUP_SCHED
1155 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1157 if (rt_se_boosted(rt_se))
1158 rt_rq->rt_nr_boosted++;
1161 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1165 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1167 if (rt_se_boosted(rt_se))
1168 rt_rq->rt_nr_boosted--;
1170 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1173 #else /* CONFIG_RT_GROUP_SCHED */
1176 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1178 start_rt_bandwidth(&def_rt_bandwidth);
1182 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1184 #endif /* CONFIG_RT_GROUP_SCHED */
1187 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1189 struct rt_rq *group_rq = group_rt_rq(rt_se);
1192 return group_rq->rt_nr_running;
1198 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1200 int prio = rt_se_prio(rt_se);
1202 WARN_ON(!rt_prio(prio));
1203 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1205 inc_rt_prio(rt_rq, prio);
1206 inc_rt_migration(rt_se, rt_rq);
1207 inc_rt_group(rt_se, rt_rq);
1211 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1213 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1214 WARN_ON(!rt_rq->rt_nr_running);
1215 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1217 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1218 dec_rt_migration(rt_se, rt_rq);
1219 dec_rt_group(rt_se, rt_rq);
1222 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1224 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1225 struct rt_prio_array *array = &rt_rq->active;
1226 struct rt_rq *group_rq = group_rt_rq(rt_se);
1227 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1230 * Don't enqueue the group if its throttled, or when empty.
1231 * The latter is a consequence of the former when a child group
1232 * get throttled and the current group doesn't have any other
1235 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1239 list_add(&rt_se->run_list, queue);
1241 list_add_tail(&rt_se->run_list, queue);
1242 __set_bit(rt_se_prio(rt_se), array->bitmap);
1244 inc_rt_tasks(rt_se, rt_rq);
1247 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1249 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1250 struct rt_prio_array *array = &rt_rq->active;
1252 list_del_init(&rt_se->run_list);
1253 if (list_empty(array->queue + rt_se_prio(rt_se)))
1254 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1256 dec_rt_tasks(rt_se, rt_rq);
1260 * Because the prio of an upper entry depends on the lower
1261 * entries, we must remove entries top - down.
1263 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1265 struct sched_rt_entity *back = NULL;
1267 for_each_sched_rt_entity(rt_se) {
1272 dequeue_top_rt_rq(rt_rq_of_se(back));
1274 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1275 if (on_rt_rq(rt_se))
1276 __dequeue_rt_entity(rt_se);
1280 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1282 struct rq *rq = rq_of_rt_se(rt_se);
1284 dequeue_rt_stack(rt_se);
1285 for_each_sched_rt_entity(rt_se)
1286 __enqueue_rt_entity(rt_se, head);
1287 enqueue_top_rt_rq(&rq->rt);
1290 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1292 struct rq *rq = rq_of_rt_se(rt_se);
1294 dequeue_rt_stack(rt_se);
1296 for_each_sched_rt_entity(rt_se) {
1297 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1299 if (rt_rq && rt_rq->rt_nr_running)
1300 __enqueue_rt_entity(rt_se, false);
1302 enqueue_top_rt_rq(&rq->rt);
1306 * Adding/removing a task to/from a priority array:
1309 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1311 struct sched_rt_entity *rt_se = &p->rt;
1313 if (flags & ENQUEUE_WAKEUP)
1316 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1317 walt_inc_cumulative_runnable_avg(rq, p);
1319 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1320 enqueue_pushable_task(rq, p);
1323 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1325 struct sched_rt_entity *rt_se = &p->rt;
1328 dequeue_rt_entity(rt_se);
1329 walt_dec_cumulative_runnable_avg(rq, p);
1331 dequeue_pushable_task(rq, p);
1335 * Put task to the head or the end of the run list without the overhead of
1336 * dequeue followed by enqueue.
1339 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1341 if (on_rt_rq(rt_se)) {
1342 struct rt_prio_array *array = &rt_rq->active;
1343 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1346 list_move(&rt_se->run_list, queue);
1348 list_move_tail(&rt_se->run_list, queue);
1352 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1354 struct sched_rt_entity *rt_se = &p->rt;
1355 struct rt_rq *rt_rq;
1357 for_each_sched_rt_entity(rt_se) {
1358 rt_rq = rt_rq_of_se(rt_se);
1359 requeue_rt_entity(rt_rq, rt_se, head);
1363 static void yield_task_rt(struct rq *rq)
1365 requeue_task_rt(rq, rq->curr, 0);
1369 static int find_lowest_rq(struct task_struct *task);
1372 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1374 struct task_struct *curr;
1377 /* For anything but wake ups, just return the task_cpu */
1378 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1384 curr = READ_ONCE(rq->curr); /* unlocked access */
1387 * If the current task on @p's runqueue is an RT task, then
1388 * try to see if we can wake this RT task up on another
1389 * runqueue. Otherwise simply start this RT task
1390 * on its current runqueue.
1392 * We want to avoid overloading runqueues. If the woken
1393 * task is a higher priority, then it will stay on this CPU
1394 * and the lower prio task should be moved to another CPU.
1395 * Even though this will probably make the lower prio task
1396 * lose its cache, we do not want to bounce a higher task
1397 * around just because it gave up its CPU, perhaps for a
1400 * For equal prio tasks, we just let the scheduler sort it out.
1402 * Otherwise, just let it ride on the affined RQ and the
1403 * post-schedule router will push the preempted task away
1405 * This test is optimistic, if we get it wrong the load-balancer
1406 * will have to sort it out.
1408 if (curr && unlikely(rt_task(curr)) &&
1409 (curr->nr_cpus_allowed < 2 ||
1410 curr->prio <= p->prio)) {
1411 int target = find_lowest_rq(p);
1414 * Don't bother moving it if the destination CPU is
1415 * not running a lower priority task.
1418 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1427 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1430 * Current can't be migrated, useless to reschedule,
1431 * let's hope p can move out.
1433 if (rq->curr->nr_cpus_allowed == 1 ||
1434 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1438 * p is migratable, so let's not schedule it and
1439 * see if it is pushed or pulled somewhere else.
1441 if (p->nr_cpus_allowed != 1
1442 && cpupri_find(&rq->rd->cpupri, p, NULL))
1446 * There appears to be other cpus that can accept
1447 * current and none to run 'p', so lets reschedule
1448 * to try and push current away:
1450 requeue_task_rt(rq, p, 1);
1454 #endif /* CONFIG_SMP */
1457 * Preempt the current task with a newly woken task if needed:
1459 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1461 if (p->prio < rq->curr->prio) {
1470 * - the newly woken task is of equal priority to the current task
1471 * - the newly woken task is non-migratable while current is migratable
1472 * - current will be preempted on the next reschedule
1474 * we should check to see if current can readily move to a different
1475 * cpu. If so, we will reschedule to allow the push logic to try
1476 * to move current somewhere else, making room for our non-migratable
1479 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1480 check_preempt_equal_prio(rq, p);
1485 static void sched_rt_update_capacity_req(struct rq *rq)
1487 u64 total, used, age_stamp, avg;
1493 sched_avg_update(rq);
1495 * Since we're reading these variables without serialization make sure
1496 * we read them once before doing sanity checks on them.
1498 age_stamp = READ_ONCE(rq->age_stamp);
1499 avg = READ_ONCE(rq->rt_avg);
1500 delta = rq_clock(rq) - age_stamp;
1502 if (unlikely(delta < 0))
1505 total = sched_avg_period() + delta;
1507 used = div_u64(avg, total);
1508 if (unlikely(used > SCHED_CAPACITY_SCALE))
1509 used = SCHED_CAPACITY_SCALE;
1511 set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
1514 static inline void sched_rt_update_capacity_req(struct rq *rq)
1519 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1520 struct rt_rq *rt_rq)
1522 struct rt_prio_array *array = &rt_rq->active;
1523 struct sched_rt_entity *next = NULL;
1524 struct list_head *queue;
1527 idx = sched_find_first_bit(array->bitmap);
1528 BUG_ON(idx >= MAX_RT_PRIO);
1530 queue = array->queue + idx;
1531 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1536 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1538 struct sched_rt_entity *rt_se;
1539 struct task_struct *p;
1540 struct rt_rq *rt_rq = &rq->rt;
1543 rt_se = pick_next_rt_entity(rq, rt_rq);
1545 rt_rq = group_rt_rq(rt_se);
1548 p = rt_task_of(rt_se);
1549 p->se.exec_start = rq_clock_task(rq);
1554 static struct task_struct *
1555 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1557 struct task_struct *p;
1558 struct rt_rq *rt_rq = &rq->rt;
1560 if (need_pull_rt_task(rq, prev)) {
1562 * This is OK, because current is on_cpu, which avoids it being
1563 * picked for load-balance and preemption/IRQs are still
1564 * disabled avoiding further scheduler activity on it and we're
1565 * being very careful to re-start the picking loop.
1567 lockdep_unpin_lock(&rq->lock);
1569 lockdep_pin_lock(&rq->lock);
1571 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1572 * means a dl or stop task can slip in, in which case we need
1573 * to re-start task selection.
1575 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1576 rq->dl.dl_nr_running))
1581 * We may dequeue prev's rt_rq in put_prev_task().
1582 * So, we update time before rt_nr_running check.
1584 if (prev->sched_class == &rt_sched_class)
1587 if (!rt_rq->rt_queued) {
1589 * The next task to be picked on this rq will have a lower
1590 * priority than rt tasks so we can spend some time to update
1591 * the capacity used by rt tasks based on the last activity.
1592 * This value will be the used as an estimation of the next
1595 sched_rt_update_capacity_req(rq);
1599 put_prev_task(rq, prev);
1601 p = _pick_next_task_rt(rq);
1603 /* The running task is never eligible for pushing */
1604 dequeue_pushable_task(rq, p);
1606 queue_push_tasks(rq);
1611 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1616 * The previous task needs to be made eligible for pushing
1617 * if it is still active
1619 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1620 enqueue_pushable_task(rq, p);
1625 /* Only try algorithms three times */
1626 #define RT_MAX_TRIES 3
1628 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1630 if (!task_running(rq, p) &&
1631 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1637 * Return the highest pushable rq's task, which is suitable to be executed
1638 * on the cpu, NULL otherwise
1640 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1642 struct plist_head *head = &rq->rt.pushable_tasks;
1643 struct task_struct *p;
1645 if (!has_pushable_tasks(rq))
1648 plist_for_each_entry(p, head, pushable_tasks) {
1649 if (pick_rt_task(rq, p, cpu))
1656 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1658 static int find_lowest_rq(struct task_struct *task)
1660 struct sched_domain *sd;
1661 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1662 int this_cpu = smp_processor_id();
1663 int cpu = task_cpu(task);
1665 /* Make sure the mask is initialized first */
1666 if (unlikely(!lowest_mask))
1669 if (task->nr_cpus_allowed == 1)
1670 return -1; /* No other targets possible */
1672 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1673 return -1; /* No targets found */
1676 * At this point we have built a mask of cpus representing the
1677 * lowest priority tasks in the system. Now we want to elect
1678 * the best one based on our affinity and topology.
1680 * We prioritize the last cpu that the task executed on since
1681 * it is most likely cache-hot in that location.
1683 if (cpumask_test_cpu(cpu, lowest_mask))
1687 * Otherwise, we consult the sched_domains span maps to figure
1688 * out which cpu is logically closest to our hot cache data.
1690 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1691 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1694 for_each_domain(cpu, sd) {
1695 if (sd->flags & SD_WAKE_AFFINE) {
1699 * "this_cpu" is cheaper to preempt than a
1702 if (this_cpu != -1 &&
1703 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1708 best_cpu = cpumask_first_and(lowest_mask,
1709 sched_domain_span(sd));
1710 if (best_cpu < nr_cpu_ids) {
1719 * And finally, if there were no matches within the domains
1720 * just give the caller *something* to work with from the compatible
1726 cpu = cpumask_any(lowest_mask);
1727 if (cpu < nr_cpu_ids)
1732 /* Will lock the rq it finds */
1733 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1735 struct rq *lowest_rq = NULL;
1739 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1740 cpu = find_lowest_rq(task);
1742 if ((cpu == -1) || (cpu == rq->cpu))
1745 lowest_rq = cpu_rq(cpu);
1747 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1749 * Target rq has tasks of equal or higher priority,
1750 * retrying does not release any lock and is unlikely
1751 * to yield a different result.
1757 /* if the prio of this runqueue changed, try again */
1758 if (double_lock_balance(rq, lowest_rq)) {
1760 * We had to unlock the run queue. In
1761 * the mean time, task could have
1762 * migrated already or had its affinity changed.
1763 * Also make sure that it wasn't scheduled on its rq.
1765 if (unlikely(task_rq(task) != rq ||
1766 !cpumask_test_cpu(lowest_rq->cpu,
1767 tsk_cpus_allowed(task)) ||
1768 task_running(rq, task) ||
1769 !task_on_rq_queued(task))) {
1771 double_unlock_balance(rq, lowest_rq);
1777 /* If this rq is still suitable use it. */
1778 if (lowest_rq->rt.highest_prio.curr > task->prio)
1782 double_unlock_balance(rq, lowest_rq);
1789 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1791 struct task_struct *p;
1793 if (!has_pushable_tasks(rq))
1796 p = plist_first_entry(&rq->rt.pushable_tasks,
1797 struct task_struct, pushable_tasks);
1799 BUG_ON(rq->cpu != task_cpu(p));
1800 BUG_ON(task_current(rq, p));
1801 BUG_ON(p->nr_cpus_allowed <= 1);
1803 BUG_ON(!task_on_rq_queued(p));
1804 BUG_ON(!rt_task(p));
1810 * If the current CPU has more than one RT task, see if the non
1811 * running task can migrate over to a CPU that is running a task
1812 * of lesser priority.
1814 static int push_rt_task(struct rq *rq)
1816 struct task_struct *next_task;
1817 struct rq *lowest_rq;
1820 if (!rq->rt.overloaded)
1823 next_task = pick_next_pushable_task(rq);
1828 if (unlikely(next_task == rq->curr)) {
1834 * It's possible that the next_task slipped in of
1835 * higher priority than current. If that's the case
1836 * just reschedule current.
1838 if (unlikely(next_task->prio < rq->curr->prio)) {
1843 /* We might release rq lock */
1844 get_task_struct(next_task);
1846 /* find_lock_lowest_rq locks the rq if found */
1847 lowest_rq = find_lock_lowest_rq(next_task, rq);
1849 struct task_struct *task;
1851 * find_lock_lowest_rq releases rq->lock
1852 * so it is possible that next_task has migrated.
1854 * We need to make sure that the task is still on the same
1855 * run-queue and is also still the next task eligible for
1858 task = pick_next_pushable_task(rq);
1859 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1861 * The task hasn't migrated, and is still the next
1862 * eligible task, but we failed to find a run-queue
1863 * to push it to. Do not retry in this case, since
1864 * other cpus will pull from us when ready.
1870 /* No more tasks, just exit */
1874 * Something has shifted, try again.
1876 put_task_struct(next_task);
1881 deactivate_task(rq, next_task, 0);
1882 set_task_cpu(next_task, lowest_rq->cpu);
1883 activate_task(lowest_rq, next_task, 0);
1886 resched_curr(lowest_rq);
1888 double_unlock_balance(rq, lowest_rq);
1891 put_task_struct(next_task);
1896 static void push_rt_tasks(struct rq *rq)
1898 /* push_rt_task will return true if it moved an RT */
1899 while (push_rt_task(rq))
1903 #ifdef HAVE_RT_PUSH_IPI
1905 * The search for the next cpu always starts at rq->cpu and ends
1906 * when we reach rq->cpu again. It will never return rq->cpu.
1907 * This returns the next cpu to check, or nr_cpu_ids if the loop
1910 * rq->rt.push_cpu holds the last cpu returned by this function,
1911 * or if this is the first instance, it must hold rq->cpu.
1913 static int rto_next_cpu(struct rq *rq)
1915 int prev_cpu = rq->rt.push_cpu;
1918 cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1921 * If the previous cpu is less than the rq's CPU, then it already
1922 * passed the end of the mask, and has started from the beginning.
1923 * We end if the next CPU is greater or equal to rq's CPU.
1925 if (prev_cpu < rq->cpu) {
1929 } else if (cpu >= nr_cpu_ids) {
1931 * We passed the end of the mask, start at the beginning.
1932 * If the result is greater or equal to the rq's CPU, then
1933 * the loop is finished.
1935 cpu = cpumask_first(rq->rd->rto_mask);
1939 rq->rt.push_cpu = cpu;
1941 /* Return cpu to let the caller know if the loop is finished or not */
1945 static int find_next_push_cpu(struct rq *rq)
1951 cpu = rto_next_cpu(rq);
1952 if (cpu >= nr_cpu_ids)
1954 next_rq = cpu_rq(cpu);
1956 /* Make sure the next rq can push to this rq */
1957 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1964 #define RT_PUSH_IPI_EXECUTING 1
1965 #define RT_PUSH_IPI_RESTART 2
1967 static void tell_cpu_to_push(struct rq *rq)
1971 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1972 raw_spin_lock(&rq->rt.push_lock);
1973 /* Make sure it's still executing */
1974 if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1976 * Tell the IPI to restart the loop as things have
1977 * changed since it started.
1979 rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1980 raw_spin_unlock(&rq->rt.push_lock);
1983 raw_spin_unlock(&rq->rt.push_lock);
1986 /* When here, there's no IPI going around */
1988 rq->rt.push_cpu = rq->cpu;
1989 cpu = find_next_push_cpu(rq);
1990 if (cpu >= nr_cpu_ids)
1993 rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1995 irq_work_queue_on(&rq->rt.push_work, cpu);
1998 /* Called from hardirq context */
1999 static void try_to_push_tasks(void *arg)
2001 struct rt_rq *rt_rq = arg;
2002 struct rq *rq, *src_rq;
2006 this_cpu = rt_rq->push_cpu;
2008 /* Paranoid check */
2009 BUG_ON(this_cpu != smp_processor_id());
2011 rq = cpu_rq(this_cpu);
2012 src_rq = rq_of_rt_rq(rt_rq);
2015 if (has_pushable_tasks(rq)) {
2016 raw_spin_lock(&rq->lock);
2018 raw_spin_unlock(&rq->lock);
2021 /* Pass the IPI to the next rt overloaded queue */
2022 raw_spin_lock(&rt_rq->push_lock);
2024 * If the source queue changed since the IPI went out,
2025 * we need to restart the search from that CPU again.
2027 if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
2028 rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
2029 rt_rq->push_cpu = src_rq->cpu;
2032 cpu = find_next_push_cpu(src_rq);
2034 if (cpu >= nr_cpu_ids)
2035 rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
2036 raw_spin_unlock(&rt_rq->push_lock);
2038 if (cpu >= nr_cpu_ids)
2042 * It is possible that a restart caused this CPU to be
2043 * chosen again. Don't bother with an IPI, just see if we
2044 * have more to push.
2046 if (unlikely(cpu == rq->cpu))
2049 /* Try the next RT overloaded CPU */
2050 irq_work_queue_on(&rt_rq->push_work, cpu);
2053 static void push_irq_work_func(struct irq_work *work)
2055 struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
2057 try_to_push_tasks(rt_rq);
2059 #endif /* HAVE_RT_PUSH_IPI */
2061 static void pull_rt_task(struct rq *this_rq)
2063 int this_cpu = this_rq->cpu, cpu;
2064 bool resched = false;
2065 struct task_struct *p;
2068 if (likely(!rt_overloaded(this_rq)))
2072 * Match the barrier from rt_set_overloaded; this guarantees that if we
2073 * see overloaded we must also see the rto_mask bit.
2077 #ifdef HAVE_RT_PUSH_IPI
2078 if (sched_feat(RT_PUSH_IPI)) {
2079 tell_cpu_to_push(this_rq);
2084 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2085 if (this_cpu == cpu)
2088 src_rq = cpu_rq(cpu);
2091 * Don't bother taking the src_rq->lock if the next highest
2092 * task is known to be lower-priority than our current task.
2093 * This may look racy, but if this value is about to go
2094 * logically higher, the src_rq will push this task away.
2095 * And if its going logically lower, we do not care
2097 if (src_rq->rt.highest_prio.next >=
2098 this_rq->rt.highest_prio.curr)
2102 * We can potentially drop this_rq's lock in
2103 * double_lock_balance, and another CPU could
2106 double_lock_balance(this_rq, src_rq);
2109 * We can pull only a task, which is pushable
2110 * on its rq, and no others.
2112 p = pick_highest_pushable_task(src_rq, this_cpu);
2115 * Do we have an RT task that preempts
2116 * the to-be-scheduled task?
2118 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2119 WARN_ON(p == src_rq->curr);
2120 WARN_ON(!task_on_rq_queued(p));
2123 * There's a chance that p is higher in priority
2124 * than what's currently running on its cpu.
2125 * This is just that p is wakeing up and hasn't
2126 * had a chance to schedule. We only pull
2127 * p if it is lower in priority than the
2128 * current task on the run queue
2130 if (p->prio < src_rq->curr->prio)
2135 deactivate_task(src_rq, p, 0);
2136 set_task_cpu(p, this_cpu);
2137 activate_task(this_rq, p, 0);
2139 * We continue with the search, just in
2140 * case there's an even higher prio task
2141 * in another runqueue. (low likelihood
2146 double_unlock_balance(this_rq, src_rq);
2150 resched_curr(this_rq);
2154 * If we are not running and we are not going to reschedule soon, we should
2155 * try to push tasks away now
2157 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2159 if (!task_running(rq, p) &&
2160 !test_tsk_need_resched(rq->curr) &&
2161 p->nr_cpus_allowed > 1 &&
2162 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2163 (rq->curr->nr_cpus_allowed < 2 ||
2164 rq->curr->prio <= p->prio))
2168 /* Assumes rq->lock is held */
2169 static void rq_online_rt(struct rq *rq)
2171 if (rq->rt.overloaded)
2172 rt_set_overload(rq);
2174 __enable_runtime(rq);
2176 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2179 /* Assumes rq->lock is held */
2180 static void rq_offline_rt(struct rq *rq)
2182 if (rq->rt.overloaded)
2183 rt_clear_overload(rq);
2185 __disable_runtime(rq);
2187 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2191 * When switch from the rt queue, we bring ourselves to a position
2192 * that we might want to pull RT tasks from other runqueues.
2194 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2197 * If there are other RT tasks then we will reschedule
2198 * and the scheduling of the other RT tasks will handle
2199 * the balancing. But if we are the last RT task
2200 * we may need to handle the pulling of RT tasks
2203 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2206 queue_pull_task(rq);
2209 void __init init_sched_rt_class(void)
2213 for_each_possible_cpu(i) {
2214 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2215 GFP_KERNEL, cpu_to_node(i));
2218 #endif /* CONFIG_SMP */
2221 * When switching a task to RT, we may overload the runqueue
2222 * with RT tasks. In this case we try to push them off to
2225 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2228 * If we are already running, then there's nothing
2229 * that needs to be done. But if we are not running
2230 * we may need to preempt the current running task.
2231 * If that current running task is also an RT task
2232 * then see if we can move to another run queue.
2234 if (task_on_rq_queued(p) && rq->curr != p) {
2236 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2237 queue_push_tasks(rq);
2239 if (p->prio < rq->curr->prio)
2241 #endif /* CONFIG_SMP */
2246 * Priority of the task has changed. This may cause
2247 * us to initiate a push or pull.
2250 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2252 if (!task_on_rq_queued(p))
2255 if (rq->curr == p) {
2258 * If our priority decreases while running, we
2259 * may need to pull tasks to this runqueue.
2261 if (oldprio < p->prio)
2262 queue_pull_task(rq);
2265 * If there's a higher priority task waiting to run
2268 if (p->prio > rq->rt.highest_prio.curr)
2271 /* For UP simply resched on drop of prio */
2272 if (oldprio < p->prio)
2274 #endif /* CONFIG_SMP */
2277 * This task is not running, but if it is
2278 * greater than the current running task
2281 if (p->prio < rq->curr->prio)
2286 static void watchdog(struct rq *rq, struct task_struct *p)
2288 unsigned long soft, hard;
2290 /* max may change after cur was read, this will be fixed next tick */
2291 soft = task_rlimit(p, RLIMIT_RTTIME);
2292 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2294 if (soft != RLIM_INFINITY) {
2297 if (p->rt.watchdog_stamp != jiffies) {
2299 p->rt.watchdog_stamp = jiffies;
2302 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2303 if (p->rt.timeout > next)
2304 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2308 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2310 struct sched_rt_entity *rt_se = &p->rt;
2314 if (rq->rt.rt_nr_running)
2315 sched_rt_update_capacity_req(rq);
2320 * RR tasks need a special form of timeslice management.
2321 * FIFO tasks have no timeslices.
2323 if (p->policy != SCHED_RR)
2326 if (--p->rt.time_slice)
2329 p->rt.time_slice = sched_rr_timeslice;
2332 * Requeue to the end of queue if we (and all of our ancestors) are not
2333 * the only element on the queue
2335 for_each_sched_rt_entity(rt_se) {
2336 if (rt_se->run_list.prev != rt_se->run_list.next) {
2337 requeue_task_rt(rq, p, 0);
2344 static void set_curr_task_rt(struct rq *rq)
2346 struct task_struct *p = rq->curr;
2348 p->se.exec_start = rq_clock_task(rq);
2350 /* The running task is never eligible for pushing */
2351 dequeue_pushable_task(rq, p);
2354 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2357 * Time slice is 0 for SCHED_FIFO tasks
2359 if (task->policy == SCHED_RR)
2360 return sched_rr_timeslice;
2365 const struct sched_class rt_sched_class = {
2366 .next = &fair_sched_class,
2367 .enqueue_task = enqueue_task_rt,
2368 .dequeue_task = dequeue_task_rt,
2369 .yield_task = yield_task_rt,
2371 .check_preempt_curr = check_preempt_curr_rt,
2373 .pick_next_task = pick_next_task_rt,
2374 .put_prev_task = put_prev_task_rt,
2377 .select_task_rq = select_task_rq_rt,
2379 .set_cpus_allowed = set_cpus_allowed_common,
2380 .rq_online = rq_online_rt,
2381 .rq_offline = rq_offline_rt,
2382 .task_woken = task_woken_rt,
2383 .switched_from = switched_from_rt,
2386 .set_curr_task = set_curr_task_rt,
2387 .task_tick = task_tick_rt,
2389 .get_rr_interval = get_rr_interval_rt,
2391 .prio_changed = prio_changed_rt,
2392 .switched_to = switched_to_rt,
2394 .update_curr = update_curr_rt,
2397 #ifdef CONFIG_SCHED_DEBUG
2398 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2400 void print_rt_stats(struct seq_file *m, int cpu)
2403 struct rt_rq *rt_rq;
2406 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2407 print_rt_rq(m, cpu, rt_rq);
2410 #endif /* CONFIG_SCHED_DEBUG */