2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 #include <linux/slab.h>
10 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12 struct rt_bandwidth def_rt_bandwidth;
14 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
16 struct rt_bandwidth *rt_b =
17 container_of(timer, struct rt_bandwidth, rt_period_timer);
23 now = hrtimer_cb_get_time(timer);
24 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
29 idle = do_sched_rt_period_timer(rt_b, overrun);
32 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
37 rt_b->rt_period = ns_to_ktime(period);
38 rt_b->rt_runtime = runtime;
40 raw_spin_lock_init(&rt_b->rt_runtime_lock);
42 hrtimer_init(&rt_b->rt_period_timer,
43 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 rt_b->rt_period_timer.function = sched_rt_period_timer;
47 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
49 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 if (hrtimer_active(&rt_b->rt_period_timer))
55 raw_spin_lock(&rt_b->rt_runtime_lock);
56 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 raw_spin_unlock(&rt_b->rt_runtime_lock);
60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
62 struct rt_prio_array *array;
65 array = &rt_rq->active;
66 for (i = 0; i < MAX_RT_PRIO; i++) {
67 INIT_LIST_HEAD(array->queue + i);
68 __clear_bit(i, array->bitmap);
70 /* delimiter for bitsearch: */
71 __set_bit(MAX_RT_PRIO, array->bitmap);
73 #if defined CONFIG_SMP
74 rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 rt_rq->highest_prio.next = MAX_RT_PRIO;
76 rt_rq->rt_nr_migratory = 0;
77 rt_rq->overloaded = 0;
78 plist_head_init(&rt_rq->pushable_tasks);
82 rt_rq->rt_throttled = 0;
83 rt_rq->rt_runtime = 0;
84 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
90 hrtimer_cancel(&rt_b->rt_period_timer);
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
95 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
97 #ifdef CONFIG_SCHED_DEBUG
98 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
100 return container_of(rt_se, struct task_struct, rt);
103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
108 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
113 void free_rt_sched_group(struct task_group *tg)
118 destroy_rt_bandwidth(&tg->rt_bandwidth);
120 for_each_possible_cpu(i) {
131 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 struct sched_rt_entity *rt_se, int cpu,
133 struct sched_rt_entity *parent)
135 struct rq *rq = cpu_rq(cpu);
137 rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 rt_rq->rt_nr_boosted = 0;
142 tg->rt_rq[cpu] = rt_rq;
143 tg->rt_se[cpu] = rt_se;
149 rt_se->rt_rq = &rq->rt;
151 rt_se->rt_rq = parent->my_q;
154 rt_se->parent = parent;
155 INIT_LIST_HEAD(&rt_se->run_list);
158 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 struct sched_rt_entity *rt_se;
164 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
171 init_rt_bandwidth(&tg->rt_bandwidth,
172 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
174 for_each_possible_cpu(i) {
175 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 GFP_KERNEL, cpu_to_node(i));
180 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 GFP_KERNEL, cpu_to_node(i));
185 init_rt_rq(rt_rq, cpu_rq(i));
186 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
198 #else /* CONFIG_RT_GROUP_SCHED */
200 #define rt_entity_is_task(rt_se) (1)
202 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
204 return container_of(rt_se, struct task_struct, rt);
207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
209 return container_of(rt_rq, struct rq, rt);
212 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
214 struct task_struct *p = rt_task_of(rt_se);
215 struct rq *rq = task_rq(p);
220 void free_rt_sched_group(struct task_group *tg) { }
222 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
226 #endif /* CONFIG_RT_GROUP_SCHED */
230 static inline int rt_overloaded(struct rq *rq)
232 return atomic_read(&rq->rd->rto_count);
235 static inline void rt_set_overload(struct rq *rq)
240 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
242 * Make sure the mask is visible before we set
243 * the overload count. That is checked to determine
244 * if we should look at the mask. It would be a shame
245 * if we looked at the mask, but the mask was not
249 atomic_inc(&rq->rd->rto_count);
252 static inline void rt_clear_overload(struct rq *rq)
257 /* the order here really doesn't matter */
258 atomic_dec(&rq->rd->rto_count);
259 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
262 static void update_rt_migration(struct rt_rq *rt_rq)
264 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265 if (!rt_rq->overloaded) {
266 rt_set_overload(rq_of_rt_rq(rt_rq));
267 rt_rq->overloaded = 1;
269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
275 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
277 struct task_struct *p;
279 if (!rt_entity_is_task(rt_se))
282 p = rt_task_of(rt_se);
283 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
285 rt_rq->rt_nr_total++;
286 if (p->nr_cpus_allowed > 1)
287 rt_rq->rt_nr_migratory++;
289 update_rt_migration(rt_rq);
292 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
294 struct task_struct *p;
296 if (!rt_entity_is_task(rt_se))
299 p = rt_task_of(rt_se);
300 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
302 rt_rq->rt_nr_total--;
303 if (p->nr_cpus_allowed > 1)
304 rt_rq->rt_nr_migratory--;
306 update_rt_migration(rt_rq);
309 static inline int has_pushable_tasks(struct rq *rq)
311 return !plist_head_empty(&rq->rt.pushable_tasks);
314 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
316 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
317 plist_node_init(&p->pushable_tasks, p->prio);
318 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
320 /* Update the highest prio pushable task */
321 if (p->prio < rq->rt.highest_prio.next)
322 rq->rt.highest_prio.next = p->prio;
325 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
327 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
329 /* Update the new highest prio pushable task */
330 if (has_pushable_tasks(rq)) {
331 p = plist_first_entry(&rq->rt.pushable_tasks,
332 struct task_struct, pushable_tasks);
333 rq->rt.highest_prio.next = p->prio;
335 rq->rt.highest_prio.next = MAX_RT_PRIO;
340 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
344 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
349 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
354 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
358 #endif /* CONFIG_SMP */
360 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
362 return !list_empty(&rt_se->run_list);
365 #ifdef CONFIG_RT_GROUP_SCHED
367 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
372 return rt_rq->rt_runtime;
375 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
377 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
380 typedef struct task_group *rt_rq_iter_t;
382 static inline struct task_group *next_task_group(struct task_group *tg)
385 tg = list_entry_rcu(tg->list.next,
386 typeof(struct task_group), list);
387 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
389 if (&tg->list == &task_groups)
395 #define for_each_rt_rq(rt_rq, iter, rq) \
396 for (iter = container_of(&task_groups, typeof(*iter), list); \
397 (iter = next_task_group(iter)) && \
398 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
400 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
402 list_add_rcu(&rt_rq->leaf_rt_rq_list,
403 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
406 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
408 list_del_rcu(&rt_rq->leaf_rt_rq_list);
411 #define for_each_leaf_rt_rq(rt_rq, rq) \
412 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
414 #define for_each_sched_rt_entity(rt_se) \
415 for (; rt_se; rt_se = rt_se->parent)
417 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
422 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
423 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
425 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
427 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
428 struct sched_rt_entity *rt_se;
430 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
432 rt_se = rt_rq->tg->rt_se[cpu];
434 if (rt_rq->rt_nr_running) {
435 if (rt_se && !on_rt_rq(rt_se))
436 enqueue_rt_entity(rt_se, false);
437 if (rt_rq->highest_prio.curr < curr->prio)
442 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
444 struct sched_rt_entity *rt_se;
445 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
447 rt_se = rt_rq->tg->rt_se[cpu];
449 if (rt_se && on_rt_rq(rt_se))
450 dequeue_rt_entity(rt_se);
453 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
455 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
458 static int rt_se_boosted(struct sched_rt_entity *rt_se)
460 struct rt_rq *rt_rq = group_rt_rq(rt_se);
461 struct task_struct *p;
464 return !!rt_rq->rt_nr_boosted;
466 p = rt_task_of(rt_se);
467 return p->prio != p->normal_prio;
471 static inline const struct cpumask *sched_rt_period_mask(void)
473 return cpu_rq(smp_processor_id())->rd->span;
476 static inline const struct cpumask *sched_rt_period_mask(void)
478 return cpu_online_mask;
483 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
485 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
488 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
490 return &rt_rq->tg->rt_bandwidth;
493 #else /* !CONFIG_RT_GROUP_SCHED */
495 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
497 return rt_rq->rt_runtime;
500 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
502 return ktime_to_ns(def_rt_bandwidth.rt_period);
505 typedef struct rt_rq *rt_rq_iter_t;
507 #define for_each_rt_rq(rt_rq, iter, rq) \
508 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
510 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
514 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
518 #define for_each_leaf_rt_rq(rt_rq, rq) \
519 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
521 #define for_each_sched_rt_entity(rt_se) \
522 for (; rt_se; rt_se = NULL)
524 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
529 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
531 if (rt_rq->rt_nr_running)
532 resched_task(rq_of_rt_rq(rt_rq)->curr);
535 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
539 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
541 return rt_rq->rt_throttled;
544 static inline const struct cpumask *sched_rt_period_mask(void)
546 return cpu_online_mask;
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
552 return &cpu_rq(cpu)->rt;
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
557 return &def_rt_bandwidth;
560 #endif /* CONFIG_RT_GROUP_SCHED */
564 * We ran out of runtime, see if we can borrow some from our neighbours.
566 static int do_balance_runtime(struct rt_rq *rt_rq)
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570 int i, weight, more = 0;
573 weight = cpumask_weight(rd->span);
575 raw_spin_lock(&rt_b->rt_runtime_lock);
576 rt_period = ktime_to_ns(rt_b->rt_period);
577 for_each_cpu(i, rd->span) {
578 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
584 raw_spin_lock(&iter->rt_runtime_lock);
586 * Either all rqs have inf runtime and there's nothing to steal
587 * or __disable_runtime() below sets a specific rq to inf to
588 * indicate its been disabled and disalow stealing.
590 if (iter->rt_runtime == RUNTIME_INF)
594 * From runqueues with spare time, take 1/n part of their
595 * spare time, but no more than our period.
597 diff = iter->rt_runtime - iter->rt_time;
599 diff = div_u64((u64)diff, weight);
600 if (rt_rq->rt_runtime + diff > rt_period)
601 diff = rt_period - rt_rq->rt_runtime;
602 iter->rt_runtime -= diff;
603 rt_rq->rt_runtime += diff;
605 if (rt_rq->rt_runtime == rt_period) {
606 raw_spin_unlock(&iter->rt_runtime_lock);
611 raw_spin_unlock(&iter->rt_runtime_lock);
613 raw_spin_unlock(&rt_b->rt_runtime_lock);
619 * Ensure this RQ takes back all the runtime it lend to its neighbours.
621 static void __disable_runtime(struct rq *rq)
623 struct root_domain *rd = rq->rd;
627 if (unlikely(!scheduler_running))
630 for_each_rt_rq(rt_rq, iter, rq) {
631 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
635 raw_spin_lock(&rt_b->rt_runtime_lock);
636 raw_spin_lock(&rt_rq->rt_runtime_lock);
638 * Either we're all inf and nobody needs to borrow, or we're
639 * already disabled and thus have nothing to do, or we have
640 * exactly the right amount of runtime to take out.
642 if (rt_rq->rt_runtime == RUNTIME_INF ||
643 rt_rq->rt_runtime == rt_b->rt_runtime)
645 raw_spin_unlock(&rt_rq->rt_runtime_lock);
648 * Calculate the difference between what we started out with
649 * and what we current have, that's the amount of runtime
650 * we lend and now have to reclaim.
652 want = rt_b->rt_runtime - rt_rq->rt_runtime;
655 * Greedy reclaim, take back as much as we can.
657 for_each_cpu(i, rd->span) {
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
662 * Can't reclaim from ourselves or disabled runqueues.
664 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
667 raw_spin_lock(&iter->rt_runtime_lock);
669 diff = min_t(s64, iter->rt_runtime, want);
670 iter->rt_runtime -= diff;
673 iter->rt_runtime -= want;
676 raw_spin_unlock(&iter->rt_runtime_lock);
682 raw_spin_lock(&rt_rq->rt_runtime_lock);
684 * We cannot be left wanting - that would mean some runtime
685 * leaked out of the system.
690 * Disable all the borrow logic by pretending we have inf
691 * runtime - in which case borrowing doesn't make sense.
693 rt_rq->rt_runtime = RUNTIME_INF;
694 raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 raw_spin_unlock(&rt_b->rt_runtime_lock);
699 static void disable_runtime(struct rq *rq)
703 raw_spin_lock_irqsave(&rq->lock, flags);
704 __disable_runtime(rq);
705 raw_spin_unlock_irqrestore(&rq->lock, flags);
708 static void __enable_runtime(struct rq *rq)
713 if (unlikely(!scheduler_running))
717 * Reset each runqueue's bandwidth settings
719 for_each_rt_rq(rt_rq, iter, rq) {
720 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
722 raw_spin_lock(&rt_b->rt_runtime_lock);
723 raw_spin_lock(&rt_rq->rt_runtime_lock);
724 rt_rq->rt_runtime = rt_b->rt_runtime;
726 rt_rq->rt_throttled = 0;
727 raw_spin_unlock(&rt_rq->rt_runtime_lock);
728 raw_spin_unlock(&rt_b->rt_runtime_lock);
732 static void enable_runtime(struct rq *rq)
736 raw_spin_lock_irqsave(&rq->lock, flags);
737 __enable_runtime(rq);
738 raw_spin_unlock_irqrestore(&rq->lock, flags);
741 int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
743 int cpu = (int)(long)hcpu;
746 case CPU_DOWN_PREPARE:
747 case CPU_DOWN_PREPARE_FROZEN:
748 disable_runtime(cpu_rq(cpu));
751 case CPU_DOWN_FAILED:
752 case CPU_DOWN_FAILED_FROZEN:
754 case CPU_ONLINE_FROZEN:
755 enable_runtime(cpu_rq(cpu));
763 static int balance_runtime(struct rt_rq *rt_rq)
767 if (!sched_feat(RT_RUNTIME_SHARE))
770 if (rt_rq->rt_time > rt_rq->rt_runtime) {
771 raw_spin_unlock(&rt_rq->rt_runtime_lock);
772 more = do_balance_runtime(rt_rq);
773 raw_spin_lock(&rt_rq->rt_runtime_lock);
778 #else /* !CONFIG_SMP */
779 static inline int balance_runtime(struct rt_rq *rt_rq)
783 #endif /* CONFIG_SMP */
785 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
787 int i, idle = 1, throttled = 0;
788 const struct cpumask *span;
790 span = sched_rt_period_mask();
791 for_each_cpu(i, span) {
793 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
794 struct rq *rq = rq_of_rt_rq(rt_rq);
796 raw_spin_lock(&rq->lock);
797 if (rt_rq->rt_time) {
800 raw_spin_lock(&rt_rq->rt_runtime_lock);
801 if (rt_rq->rt_throttled)
802 balance_runtime(rt_rq);
803 runtime = rt_rq->rt_runtime;
804 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
805 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
806 rt_rq->rt_throttled = 0;
810 * Force a clock update if the CPU was idle,
811 * lest wakeup -> unthrottle time accumulate.
813 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
814 rq->skip_clock_update = -1;
816 if (rt_rq->rt_time || rt_rq->rt_nr_running)
818 raw_spin_unlock(&rt_rq->rt_runtime_lock);
819 } else if (rt_rq->rt_nr_running) {
821 if (!rt_rq_throttled(rt_rq))
824 if (rt_rq->rt_throttled)
828 sched_rt_rq_enqueue(rt_rq);
829 raw_spin_unlock(&rq->lock);
832 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
838 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
840 #ifdef CONFIG_RT_GROUP_SCHED
841 struct rt_rq *rt_rq = group_rt_rq(rt_se);
844 return rt_rq->highest_prio.curr;
847 return rt_task_of(rt_se)->prio;
850 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
852 u64 runtime = sched_rt_runtime(rt_rq);
854 if (rt_rq->rt_throttled)
855 return rt_rq_throttled(rt_rq);
857 if (runtime >= sched_rt_period(rt_rq))
860 balance_runtime(rt_rq);
861 runtime = sched_rt_runtime(rt_rq);
862 if (runtime == RUNTIME_INF)
865 if (rt_rq->rt_time > runtime) {
866 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
869 * Don't actually throttle groups that have no runtime assigned
870 * but accrue some time due to boosting.
872 if (likely(rt_b->rt_runtime)) {
873 static bool once = false;
875 rt_rq->rt_throttled = 1;
879 printk_sched("sched: RT throttling activated\n");
883 * In case we did anyway, make it go away,
884 * replenishment is a joke, since it will replenish us
890 if (rt_rq_throttled(rt_rq)) {
891 sched_rt_rq_dequeue(rt_rq);
900 * Update the current task's runtime statistics. Skip current tasks that
901 * are not in our scheduling class.
903 static void update_curr_rt(struct rq *rq)
905 struct task_struct *curr = rq->curr;
906 struct sched_rt_entity *rt_se = &curr->rt;
907 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
910 if (curr->sched_class != &rt_sched_class)
913 delta_exec = rq->clock_task - curr->se.exec_start;
914 if (unlikely((s64)delta_exec < 0))
917 schedstat_set(curr->se.statistics.exec_max,
918 max(curr->se.statistics.exec_max, delta_exec));
920 curr->se.sum_exec_runtime += delta_exec;
921 account_group_exec_runtime(curr, delta_exec);
923 curr->se.exec_start = rq->clock_task;
924 cpuacct_charge(curr, delta_exec);
926 sched_rt_avg_update(rq, delta_exec);
928 if (!rt_bandwidth_enabled())
931 for_each_sched_rt_entity(rt_se) {
932 rt_rq = rt_rq_of_se(rt_se);
934 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
935 raw_spin_lock(&rt_rq->rt_runtime_lock);
936 rt_rq->rt_time += delta_exec;
937 if (sched_rt_runtime_exceeded(rt_rq))
939 raw_spin_unlock(&rt_rq->rt_runtime_lock);
944 #if defined CONFIG_SMP
947 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
949 struct rq *rq = rq_of_rt_rq(rt_rq);
951 if (rq->online && prio < prev_prio)
952 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
956 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
958 struct rq *rq = rq_of_rt_rq(rt_rq);
960 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
961 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
964 #else /* CONFIG_SMP */
967 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
969 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
971 #endif /* CONFIG_SMP */
973 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
975 inc_rt_prio(struct rt_rq *rt_rq, int prio)
977 int prev_prio = rt_rq->highest_prio.curr;
979 if (prio < prev_prio)
980 rt_rq->highest_prio.curr = prio;
982 inc_rt_prio_smp(rt_rq, prio, prev_prio);
986 dec_rt_prio(struct rt_rq *rt_rq, int prio)
988 int prev_prio = rt_rq->highest_prio.curr;
990 if (rt_rq->rt_nr_running) {
992 WARN_ON(prio < prev_prio);
995 * This may have been our highest task, and therefore
996 * we may have some recomputation to do
998 if (prio == prev_prio) {
999 struct rt_prio_array *array = &rt_rq->active;
1001 rt_rq->highest_prio.curr =
1002 sched_find_first_bit(array->bitmap);
1006 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1008 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1013 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1014 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1016 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1018 #ifdef CONFIG_RT_GROUP_SCHED
1021 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1023 if (rt_se_boosted(rt_se))
1024 rt_rq->rt_nr_boosted++;
1027 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1031 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1033 if (rt_se_boosted(rt_se))
1034 rt_rq->rt_nr_boosted--;
1036 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1039 #else /* CONFIG_RT_GROUP_SCHED */
1042 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1044 start_rt_bandwidth(&def_rt_bandwidth);
1048 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1050 #endif /* CONFIG_RT_GROUP_SCHED */
1053 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1055 int prio = rt_se_prio(rt_se);
1057 WARN_ON(!rt_prio(prio));
1058 rt_rq->rt_nr_running++;
1060 inc_rt_prio(rt_rq, prio);
1061 inc_rt_migration(rt_se, rt_rq);
1062 inc_rt_group(rt_se, rt_rq);
1066 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1068 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1069 WARN_ON(!rt_rq->rt_nr_running);
1070 rt_rq->rt_nr_running--;
1072 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1073 dec_rt_migration(rt_se, rt_rq);
1074 dec_rt_group(rt_se, rt_rq);
1077 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1079 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1080 struct rt_prio_array *array = &rt_rq->active;
1081 struct rt_rq *group_rq = group_rt_rq(rt_se);
1082 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1085 * Don't enqueue the group if its throttled, or when empty.
1086 * The latter is a consequence of the former when a child group
1087 * get throttled and the current group doesn't have any other
1090 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1093 if (!rt_rq->rt_nr_running)
1094 list_add_leaf_rt_rq(rt_rq);
1097 list_add(&rt_se->run_list, queue);
1099 list_add_tail(&rt_se->run_list, queue);
1100 __set_bit(rt_se_prio(rt_se), array->bitmap);
1102 inc_rt_tasks(rt_se, rt_rq);
1105 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1107 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1108 struct rt_prio_array *array = &rt_rq->active;
1110 list_del_init(&rt_se->run_list);
1111 if (list_empty(array->queue + rt_se_prio(rt_se)))
1112 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1114 dec_rt_tasks(rt_se, rt_rq);
1115 if (!rt_rq->rt_nr_running)
1116 list_del_leaf_rt_rq(rt_rq);
1120 * Because the prio of an upper entry depends on the lower
1121 * entries, we must remove entries top - down.
1123 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1125 struct sched_rt_entity *back = NULL;
1127 for_each_sched_rt_entity(rt_se) {
1132 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1133 if (on_rt_rq(rt_se))
1134 __dequeue_rt_entity(rt_se);
1138 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1140 dequeue_rt_stack(rt_se);
1141 for_each_sched_rt_entity(rt_se)
1142 __enqueue_rt_entity(rt_se, head);
1145 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1147 dequeue_rt_stack(rt_se);
1149 for_each_sched_rt_entity(rt_se) {
1150 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1152 if (rt_rq && rt_rq->rt_nr_running)
1153 __enqueue_rt_entity(rt_se, false);
1158 * Adding/removing a task to/from a priority array:
1161 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1163 struct sched_rt_entity *rt_se = &p->rt;
1165 if (flags & ENQUEUE_WAKEUP)
1168 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1170 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1171 enqueue_pushable_task(rq, p);
1176 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1178 struct sched_rt_entity *rt_se = &p->rt;
1181 dequeue_rt_entity(rt_se);
1183 dequeue_pushable_task(rq, p);
1189 * Put task to the head or the end of the run list without the overhead of
1190 * dequeue followed by enqueue.
1193 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1195 if (on_rt_rq(rt_se)) {
1196 struct rt_prio_array *array = &rt_rq->active;
1197 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1200 list_move(&rt_se->run_list, queue);
1202 list_move_tail(&rt_se->run_list, queue);
1206 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1208 struct sched_rt_entity *rt_se = &p->rt;
1209 struct rt_rq *rt_rq;
1211 for_each_sched_rt_entity(rt_se) {
1212 rt_rq = rt_rq_of_se(rt_se);
1213 requeue_rt_entity(rt_rq, rt_se, head);
1217 static void yield_task_rt(struct rq *rq)
1219 requeue_task_rt(rq, rq->curr, 0);
1223 static int find_lowest_rq(struct task_struct *task);
1226 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1228 struct task_struct *curr;
1234 if (p->nr_cpus_allowed == 1)
1237 /* For anything but wake ups, just return the task_cpu */
1238 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1244 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1247 * If the current task on @p's runqueue is an RT task, then
1248 * try to see if we can wake this RT task up on another
1249 * runqueue. Otherwise simply start this RT task
1250 * on its current runqueue.
1252 * We want to avoid overloading runqueues. If the woken
1253 * task is a higher priority, then it will stay on this CPU
1254 * and the lower prio task should be moved to another CPU.
1255 * Even though this will probably make the lower prio task
1256 * lose its cache, we do not want to bounce a higher task
1257 * around just because it gave up its CPU, perhaps for a
1260 * For equal prio tasks, we just let the scheduler sort it out.
1262 * Otherwise, just let it ride on the affined RQ and the
1263 * post-schedule router will push the preempted task away
1265 * This test is optimistic, if we get it wrong the load-balancer
1266 * will have to sort it out.
1268 if (curr && unlikely(rt_task(curr)) &&
1269 (curr->nr_cpus_allowed < 2 ||
1270 curr->prio <= p->prio) &&
1271 (p->nr_cpus_allowed > 1)) {
1272 int target = find_lowest_rq(p);
1283 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1285 if (rq->curr->nr_cpus_allowed == 1)
1288 if (p->nr_cpus_allowed != 1
1289 && cpupri_find(&rq->rd->cpupri, p, NULL))
1292 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1296 * There appears to be other cpus that can accept
1297 * current and none to run 'p', so lets reschedule
1298 * to try and push current away:
1300 requeue_task_rt(rq, p, 1);
1301 resched_task(rq->curr);
1304 #endif /* CONFIG_SMP */
1307 * Preempt the current task with a newly woken task if needed:
1309 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1311 if (p->prio < rq->curr->prio) {
1312 resched_task(rq->curr);
1320 * - the newly woken task is of equal priority to the current task
1321 * - the newly woken task is non-migratable while current is migratable
1322 * - current will be preempted on the next reschedule
1324 * we should check to see if current can readily move to a different
1325 * cpu. If so, we will reschedule to allow the push logic to try
1326 * to move current somewhere else, making room for our non-migratable
1329 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1330 check_preempt_equal_prio(rq, p);
1334 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1335 struct rt_rq *rt_rq)
1337 struct rt_prio_array *array = &rt_rq->active;
1338 struct sched_rt_entity *next = NULL;
1339 struct list_head *queue;
1342 idx = sched_find_first_bit(array->bitmap);
1343 BUG_ON(idx >= MAX_RT_PRIO);
1345 queue = array->queue + idx;
1346 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1351 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1353 struct sched_rt_entity *rt_se;
1354 struct task_struct *p;
1355 struct rt_rq *rt_rq;
1359 if (!rt_rq->rt_nr_running)
1362 if (rt_rq_throttled(rt_rq))
1366 rt_se = pick_next_rt_entity(rq, rt_rq);
1368 rt_rq = group_rt_rq(rt_se);
1371 p = rt_task_of(rt_se);
1372 p->se.exec_start = rq->clock_task;
1377 static struct task_struct *pick_next_task_rt(struct rq *rq)
1379 struct task_struct *p = _pick_next_task_rt(rq);
1381 /* The running task is never eligible for pushing */
1383 dequeue_pushable_task(rq, p);
1387 * We detect this state here so that we can avoid taking the RQ
1388 * lock again later if there is no need to push
1390 rq->post_schedule = has_pushable_tasks(rq);
1396 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1401 * The previous task needs to be made eligible for pushing
1402 * if it is still active
1404 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1405 enqueue_pushable_task(rq, p);
1410 /* Only try algorithms three times */
1411 #define RT_MAX_TRIES 3
1413 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1415 if (!task_running(rq, p) &&
1416 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1417 (p->nr_cpus_allowed > 1))
1422 /* Return the second highest RT task, NULL otherwise */
1423 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1425 struct task_struct *next = NULL;
1426 struct sched_rt_entity *rt_se;
1427 struct rt_prio_array *array;
1428 struct rt_rq *rt_rq;
1431 for_each_leaf_rt_rq(rt_rq, rq) {
1432 array = &rt_rq->active;
1433 idx = sched_find_first_bit(array->bitmap);
1435 if (idx >= MAX_RT_PRIO)
1437 if (next && next->prio <= idx)
1439 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1440 struct task_struct *p;
1442 if (!rt_entity_is_task(rt_se))
1445 p = rt_task_of(rt_se);
1446 if (pick_rt_task(rq, p, cpu)) {
1452 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1460 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1462 static int find_lowest_rq(struct task_struct *task)
1464 struct sched_domain *sd;
1465 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1466 int this_cpu = smp_processor_id();
1467 int cpu = task_cpu(task);
1469 /* Make sure the mask is initialized first */
1470 if (unlikely(!lowest_mask))
1473 if (task->nr_cpus_allowed == 1)
1474 return -1; /* No other targets possible */
1476 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1477 return -1; /* No targets found */
1480 * At this point we have built a mask of cpus representing the
1481 * lowest priority tasks in the system. Now we want to elect
1482 * the best one based on our affinity and topology.
1484 * We prioritize the last cpu that the task executed on since
1485 * it is most likely cache-hot in that location.
1487 if (cpumask_test_cpu(cpu, lowest_mask))
1491 * Otherwise, we consult the sched_domains span maps to figure
1492 * out which cpu is logically closest to our hot cache data.
1494 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1495 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1498 for_each_domain(cpu, sd) {
1499 if (sd->flags & SD_WAKE_AFFINE) {
1503 * "this_cpu" is cheaper to preempt than a
1506 if (this_cpu != -1 &&
1507 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1512 best_cpu = cpumask_first_and(lowest_mask,
1513 sched_domain_span(sd));
1514 if (best_cpu < nr_cpu_ids) {
1523 * And finally, if there were no matches within the domains
1524 * just give the caller *something* to work with from the compatible
1530 cpu = cpumask_any(lowest_mask);
1531 if (cpu < nr_cpu_ids)
1536 /* Will lock the rq it finds */
1537 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1539 struct rq *lowest_rq = NULL;
1543 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1544 cpu = find_lowest_rq(task);
1546 if ((cpu == -1) || (cpu == rq->cpu))
1549 lowest_rq = cpu_rq(cpu);
1551 /* if the prio of this runqueue changed, try again */
1552 if (double_lock_balance(rq, lowest_rq)) {
1554 * We had to unlock the run queue. In
1555 * the mean time, task could have
1556 * migrated already or had its affinity changed.
1557 * Also make sure that it wasn't scheduled on its rq.
1559 if (unlikely(task_rq(task) != rq ||
1560 !cpumask_test_cpu(lowest_rq->cpu,
1561 tsk_cpus_allowed(task)) ||
1562 task_running(rq, task) ||
1565 raw_spin_unlock(&lowest_rq->lock);
1571 /* If this rq is still suitable use it. */
1572 if (lowest_rq->rt.highest_prio.curr > task->prio)
1576 double_unlock_balance(rq, lowest_rq);
1583 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1585 struct task_struct *p;
1587 if (!has_pushable_tasks(rq))
1590 p = plist_first_entry(&rq->rt.pushable_tasks,
1591 struct task_struct, pushable_tasks);
1593 BUG_ON(rq->cpu != task_cpu(p));
1594 BUG_ON(task_current(rq, p));
1595 BUG_ON(p->nr_cpus_allowed <= 1);
1598 BUG_ON(!rt_task(p));
1604 * If the current CPU has more than one RT task, see if the non
1605 * running task can migrate over to a CPU that is running a task
1606 * of lesser priority.
1608 static int push_rt_task(struct rq *rq)
1610 struct task_struct *next_task;
1611 struct rq *lowest_rq;
1614 if (!rq->rt.overloaded)
1617 next_task = pick_next_pushable_task(rq);
1621 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1622 if (unlikely(task_running(rq, next_task)))
1627 if (unlikely(next_task == rq->curr)) {
1633 * It's possible that the next_task slipped in of
1634 * higher priority than current. If that's the case
1635 * just reschedule current.
1637 if (unlikely(next_task->prio < rq->curr->prio)) {
1638 resched_task(rq->curr);
1642 /* We might release rq lock */
1643 get_task_struct(next_task);
1645 /* find_lock_lowest_rq locks the rq if found */
1646 lowest_rq = find_lock_lowest_rq(next_task, rq);
1648 struct task_struct *task;
1650 * find_lock_lowest_rq releases rq->lock
1651 * so it is possible that next_task has migrated.
1653 * We need to make sure that the task is still on the same
1654 * run-queue and is also still the next task eligible for
1657 task = pick_next_pushable_task(rq);
1658 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1660 * The task hasn't migrated, and is still the next
1661 * eligible task, but we failed to find a run-queue
1662 * to push it to. Do not retry in this case, since
1663 * other cpus will pull from us when ready.
1669 /* No more tasks, just exit */
1673 * Something has shifted, try again.
1675 put_task_struct(next_task);
1680 deactivate_task(rq, next_task, 0);
1681 set_task_cpu(next_task, lowest_rq->cpu);
1682 activate_task(lowest_rq, next_task, 0);
1685 resched_task(lowest_rq->curr);
1687 double_unlock_balance(rq, lowest_rq);
1690 put_task_struct(next_task);
1695 static void push_rt_tasks(struct rq *rq)
1697 /* push_rt_task will return true if it moved an RT */
1698 while (push_rt_task(rq))
1702 static int pull_rt_task(struct rq *this_rq)
1704 int this_cpu = this_rq->cpu, ret = 0, cpu;
1705 struct task_struct *p;
1708 if (likely(!rt_overloaded(this_rq)))
1711 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1712 if (this_cpu == cpu)
1715 src_rq = cpu_rq(cpu);
1718 * Don't bother taking the src_rq->lock if the next highest
1719 * task is known to be lower-priority than our current task.
1720 * This may look racy, but if this value is about to go
1721 * logically higher, the src_rq will push this task away.
1722 * And if its going logically lower, we do not care
1724 if (src_rq->rt.highest_prio.next >=
1725 this_rq->rt.highest_prio.curr)
1729 * We can potentially drop this_rq's lock in
1730 * double_lock_balance, and another CPU could
1733 double_lock_balance(this_rq, src_rq);
1736 * Are there still pullable RT tasks?
1738 if (src_rq->rt.rt_nr_running <= 1)
1741 p = pick_next_highest_task_rt(src_rq, this_cpu);
1744 * Do we have an RT task that preempts
1745 * the to-be-scheduled task?
1747 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1748 WARN_ON(p == src_rq->curr);
1752 * There's a chance that p is higher in priority
1753 * than what's currently running on its cpu.
1754 * This is just that p is wakeing up and hasn't
1755 * had a chance to schedule. We only pull
1756 * p if it is lower in priority than the
1757 * current task on the run queue
1759 if (p->prio < src_rq->curr->prio)
1764 deactivate_task(src_rq, p, 0);
1765 set_task_cpu(p, this_cpu);
1766 activate_task(this_rq, p, 0);
1768 * We continue with the search, just in
1769 * case there's an even higher prio task
1770 * in another runqueue. (low likelihood
1775 double_unlock_balance(this_rq, src_rq);
1781 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1783 /* Try to pull RT tasks here if we lower this rq's prio */
1784 if (rq->rt.highest_prio.curr > prev->prio)
1788 static void post_schedule_rt(struct rq *rq)
1794 * If we are not running and we are not going to reschedule soon, we should
1795 * try to push tasks away now
1797 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1799 if (!task_running(rq, p) &&
1800 !test_tsk_need_resched(rq->curr) &&
1801 has_pushable_tasks(rq) &&
1802 p->nr_cpus_allowed > 1 &&
1803 rt_task(rq->curr) &&
1804 (rq->curr->nr_cpus_allowed < 2 ||
1805 rq->curr->prio <= p->prio))
1809 static void set_cpus_allowed_rt(struct task_struct *p,
1810 const struct cpumask *new_mask)
1815 BUG_ON(!rt_task(p));
1820 weight = cpumask_weight(new_mask);
1823 * Only update if the process changes its state from whether it
1824 * can migrate or not.
1826 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1832 * The process used to be able to migrate OR it can now migrate
1835 if (!task_current(rq, p))
1836 dequeue_pushable_task(rq, p);
1837 BUG_ON(!rq->rt.rt_nr_migratory);
1838 rq->rt.rt_nr_migratory--;
1840 if (!task_current(rq, p))
1841 enqueue_pushable_task(rq, p);
1842 rq->rt.rt_nr_migratory++;
1845 update_rt_migration(&rq->rt);
1848 /* Assumes rq->lock is held */
1849 static void rq_online_rt(struct rq *rq)
1851 if (rq->rt.overloaded)
1852 rt_set_overload(rq);
1854 __enable_runtime(rq);
1856 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1859 /* Assumes rq->lock is held */
1860 static void rq_offline_rt(struct rq *rq)
1862 if (rq->rt.overloaded)
1863 rt_clear_overload(rq);
1865 __disable_runtime(rq);
1867 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1871 * When switch from the rt queue, we bring ourselves to a position
1872 * that we might want to pull RT tasks from other runqueues.
1874 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1877 * If there are other RT tasks then we will reschedule
1878 * and the scheduling of the other RT tasks will handle
1879 * the balancing. But if we are the last RT task
1880 * we may need to handle the pulling of RT tasks
1883 if (p->on_rq && !rq->rt.rt_nr_running)
1887 void init_sched_rt_class(void)
1891 for_each_possible_cpu(i) {
1892 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1893 GFP_KERNEL, cpu_to_node(i));
1896 #endif /* CONFIG_SMP */
1899 * When switching a task to RT, we may overload the runqueue
1900 * with RT tasks. In this case we try to push them off to
1903 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1905 int check_resched = 1;
1908 * If we are already running, then there's nothing
1909 * that needs to be done. But if we are not running
1910 * we may need to preempt the current running task.
1911 * If that current running task is also an RT task
1912 * then see if we can move to another run queue.
1914 if (p->on_rq && rq->curr != p) {
1916 if (rq->rt.overloaded && push_rt_task(rq) &&
1917 /* Don't resched if we changed runqueues */
1920 #endif /* CONFIG_SMP */
1921 if (check_resched && p->prio < rq->curr->prio)
1922 resched_task(rq->curr);
1927 * Priority of the task has changed. This may cause
1928 * us to initiate a push or pull.
1931 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1936 if (rq->curr == p) {
1939 * If our priority decreases while running, we
1940 * may need to pull tasks to this runqueue.
1942 if (oldprio < p->prio)
1945 * If there's a higher priority task waiting to run
1946 * then reschedule. Note, the above pull_rt_task
1947 * can release the rq lock and p could migrate.
1948 * Only reschedule if p is still on the same runqueue.
1950 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1953 /* For UP simply resched on drop of prio */
1954 if (oldprio < p->prio)
1956 #endif /* CONFIG_SMP */
1959 * This task is not running, but if it is
1960 * greater than the current running task
1963 if (p->prio < rq->curr->prio)
1964 resched_task(rq->curr);
1968 static void watchdog(struct rq *rq, struct task_struct *p)
1970 unsigned long soft, hard;
1972 /* max may change after cur was read, this will be fixed next tick */
1973 soft = task_rlimit(p, RLIMIT_RTTIME);
1974 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1976 if (soft != RLIM_INFINITY) {
1980 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1981 if (p->rt.timeout > next)
1982 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1986 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1993 * RR tasks need a special form of timeslice management.
1994 * FIFO tasks have no timeslices.
1996 if (p->policy != SCHED_RR)
1999 if (--p->rt.time_slice)
2002 p->rt.time_slice = RR_TIMESLICE;
2005 * Requeue to the end of queue if we are not the only element
2008 if (p->rt.run_list.prev != p->rt.run_list.next) {
2009 requeue_task_rt(rq, p, 0);
2010 set_tsk_need_resched(p);
2014 static void set_curr_task_rt(struct rq *rq)
2016 struct task_struct *p = rq->curr;
2018 p->se.exec_start = rq->clock_task;
2020 /* The running task is never eligible for pushing */
2021 dequeue_pushable_task(rq, p);
2024 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2027 * Time slice is 0 for SCHED_FIFO tasks
2029 if (task->policy == SCHED_RR)
2030 return RR_TIMESLICE;
2035 const struct sched_class rt_sched_class = {
2036 .next = &fair_sched_class,
2037 .enqueue_task = enqueue_task_rt,
2038 .dequeue_task = dequeue_task_rt,
2039 .yield_task = yield_task_rt,
2041 .check_preempt_curr = check_preempt_curr_rt,
2043 .pick_next_task = pick_next_task_rt,
2044 .put_prev_task = put_prev_task_rt,
2047 .select_task_rq = select_task_rq_rt,
2049 .set_cpus_allowed = set_cpus_allowed_rt,
2050 .rq_online = rq_online_rt,
2051 .rq_offline = rq_offline_rt,
2052 .pre_schedule = pre_schedule_rt,
2053 .post_schedule = post_schedule_rt,
2054 .task_woken = task_woken_rt,
2055 .switched_from = switched_from_rt,
2058 .set_curr_task = set_curr_task_rt,
2059 .task_tick = task_tick_rt,
2061 .get_rr_interval = get_rr_interval_rt,
2063 .prio_changed = prio_changed_rt,
2064 .switched_to = switched_to_rt,
2067 #ifdef CONFIG_SCHED_DEBUG
2068 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2070 void print_rt_stats(struct seq_file *m, int cpu)
2073 struct rt_rq *rt_rq;
2076 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2077 print_rt_rq(m, cpu, rt_rq);
2080 #endif /* CONFIG_SCHED_DEBUG */