2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth;
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
25 return container_of(dl_se, struct task_struct, dl);
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
30 return container_of(dl_rq, struct rq, dl);
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
46 static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
48 u64 se_bw = dl_se->dl_bw;
50 dl_rq->avg_bw += se_bw;
53 static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
55 u64 se_bw = dl_se->dl_bw;
57 dl_rq->avg_bw -= se_bw;
58 if (dl_rq->avg_bw < 0) {
64 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
66 struct sched_dl_entity *dl_se = &p->dl;
68 return dl_rq->rb_leftmost == &dl_se->rb_node;
71 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
73 raw_spin_lock_init(&dl_b->dl_runtime_lock);
74 dl_b->dl_period = period;
75 dl_b->dl_runtime = runtime;
78 void init_dl_bw(struct dl_bw *dl_b)
80 raw_spin_lock_init(&dl_b->lock);
81 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
82 if (global_rt_runtime() == RUNTIME_INF)
85 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
86 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
90 void init_dl_rq(struct dl_rq *dl_rq)
92 dl_rq->rb_root = RB_ROOT;
95 /* zero means no -deadline tasks */
96 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
98 dl_rq->dl_nr_migratory = 0;
99 dl_rq->overloaded = 0;
100 dl_rq->pushable_dl_tasks_root = RB_ROOT;
102 init_dl_bw(&dl_rq->dl_bw);
108 static inline int dl_overloaded(struct rq *rq)
110 return atomic_read(&rq->rd->dlo_count);
113 static inline void dl_set_overload(struct rq *rq)
118 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
120 * Must be visible before the overload count is
121 * set (as in sched_rt.c).
123 * Matched by the barrier in pull_dl_task().
126 atomic_inc(&rq->rd->dlo_count);
129 static inline void dl_clear_overload(struct rq *rq)
134 atomic_dec(&rq->rd->dlo_count);
135 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
138 static void update_dl_migration(struct dl_rq *dl_rq)
140 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
141 if (!dl_rq->overloaded) {
142 dl_set_overload(rq_of_dl_rq(dl_rq));
143 dl_rq->overloaded = 1;
145 } else if (dl_rq->overloaded) {
146 dl_clear_overload(rq_of_dl_rq(dl_rq));
147 dl_rq->overloaded = 0;
151 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
153 struct task_struct *p = dl_task_of(dl_se);
155 if (p->nr_cpus_allowed > 1)
156 dl_rq->dl_nr_migratory++;
158 update_dl_migration(dl_rq);
161 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
163 struct task_struct *p = dl_task_of(dl_se);
165 if (p->nr_cpus_allowed > 1)
166 dl_rq->dl_nr_migratory--;
168 update_dl_migration(dl_rq);
172 * The list of pushable -deadline task is not a plist, like in
173 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
175 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
177 struct dl_rq *dl_rq = &rq->dl;
178 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
179 struct rb_node *parent = NULL;
180 struct task_struct *entry;
183 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
187 entry = rb_entry(parent, struct task_struct,
189 if (dl_entity_preempt(&p->dl, &entry->dl))
190 link = &parent->rb_left;
192 link = &parent->rb_right;
198 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
200 rb_link_node(&p->pushable_dl_tasks, parent, link);
201 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
204 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
206 struct dl_rq *dl_rq = &rq->dl;
208 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
211 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
212 struct rb_node *next_node;
214 next_node = rb_next(&p->pushable_dl_tasks);
215 dl_rq->pushable_dl_tasks_leftmost = next_node;
218 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
219 RB_CLEAR_NODE(&p->pushable_dl_tasks);
222 static inline int has_pushable_dl_tasks(struct rq *rq)
224 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
227 static int push_dl_task(struct rq *rq);
229 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
231 return dl_task(prev);
234 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
235 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
237 static void push_dl_tasks(struct rq *);
238 static void pull_dl_task(struct rq *);
240 static inline void queue_push_tasks(struct rq *rq)
242 if (!has_pushable_dl_tasks(rq))
245 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
248 static inline void queue_pull_task(struct rq *rq)
250 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
253 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
255 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
257 struct rq *later_rq = NULL;
258 bool fallback = false;
260 later_rq = find_lock_later_rq(p, rq);
266 * If we cannot preempt any rq, fall back to pick any
270 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
271 if (cpu >= nr_cpu_ids) {
273 * Fail to find any suitable cpu.
274 * The task will never come back!
276 BUG_ON(dl_bandwidth_enabled());
279 * If admission control is disabled we
280 * try a little harder to let the task
283 cpu = cpumask_any(cpu_active_mask);
285 later_rq = cpu_rq(cpu);
286 double_lock_balance(rq, later_rq);
290 * By now the task is replenished and enqueued; migrate it.
292 deactivate_task(rq, p, 0);
293 set_task_cpu(p, later_rq->cpu);
294 activate_task(later_rq, p, 0);
297 resched_curr(later_rq);
299 double_unlock_balance(later_rq, rq);
307 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
312 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
317 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
322 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
326 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
331 static inline void pull_dl_task(struct rq *rq)
335 static inline void queue_push_tasks(struct rq *rq)
339 static inline void queue_pull_task(struct rq *rq)
342 #endif /* CONFIG_SMP */
344 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
345 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
346 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
350 * We are being explicitly informed that a new instance is starting,
351 * and this means that:
352 * - the absolute deadline of the entity has to be placed at
353 * current time + relative deadline;
354 * - the runtime of the entity has to be set to the maximum value.
356 * The capability of specifying such event is useful whenever a -deadline
357 * entity wants to (try to!) synchronize its behaviour with the scheduler's
358 * one, and to (try to!) reconcile itself with its own scheduling
361 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
362 struct sched_dl_entity *pi_se)
364 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
365 struct rq *rq = rq_of_dl_rq(dl_rq);
367 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
370 * We use the regular wall clock time to set deadlines in the
371 * future; in fact, we must consider execution overheads (time
372 * spent on hardirq context, etc.).
374 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
375 dl_se->runtime = pi_se->dl_runtime;
380 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
381 * possibility of a entity lasting more than what it declared, and thus
382 * exhausting its runtime.
384 * Here we are interested in making runtime overrun possible, but we do
385 * not want a entity which is misbehaving to affect the scheduling of all
387 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
388 * is used, in order to confine each entity within its own bandwidth.
390 * This function deals exactly with that, and ensures that when the runtime
391 * of a entity is replenished, its deadline is also postponed. That ensures
392 * the overrunning entity can't interfere with other entity in the system and
393 * can't make them miss their deadlines. Reasons why this kind of overruns
394 * could happen are, typically, a entity voluntarily trying to overcome its
395 * runtime, or it just underestimated it during sched_setattr().
397 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
398 struct sched_dl_entity *pi_se)
400 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
401 struct rq *rq = rq_of_dl_rq(dl_rq);
403 BUG_ON(pi_se->dl_runtime <= 0);
406 * This could be the case for a !-dl task that is boosted.
407 * Just go with full inherited parameters.
409 if (dl_se->dl_deadline == 0) {
410 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
411 dl_se->runtime = pi_se->dl_runtime;
415 * We keep moving the deadline away until we get some
416 * available runtime for the entity. This ensures correct
417 * handling of situations where the runtime overrun is
420 while (dl_se->runtime <= 0) {
421 dl_se->deadline += pi_se->dl_period;
422 dl_se->runtime += pi_se->dl_runtime;
426 * At this point, the deadline really should be "in
427 * the future" with respect to rq->clock. If it's
428 * not, we are, for some reason, lagging too much!
429 * Anyway, after having warn userspace abut that,
430 * we still try to keep the things running by
431 * resetting the deadline and the budget of the
434 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
435 printk_deferred_once("sched: DL replenish lagged to much\n");
436 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
437 dl_se->runtime = pi_se->dl_runtime;
440 if (dl_se->dl_yielded)
441 dl_se->dl_yielded = 0;
442 if (dl_se->dl_throttled)
443 dl_se->dl_throttled = 0;
447 * Here we check if --at time t-- an entity (which is probably being
448 * [re]activated or, in general, enqueued) can use its remaining runtime
449 * and its current deadline _without_ exceeding the bandwidth it is
450 * assigned (function returns true if it can't). We are in fact applying
451 * one of the CBS rules: when a task wakes up, if the residual runtime
452 * over residual deadline fits within the allocated bandwidth, then we
453 * can keep the current (absolute) deadline and residual budget without
454 * disrupting the schedulability of the system. Otherwise, we should
455 * refill the runtime and set the deadline a period in the future,
456 * because keeping the current (absolute) deadline of the task would
457 * result in breaking guarantees promised to other tasks (refer to
458 * Documentation/scheduler/sched-deadline.txt for more informations).
460 * This function returns true if:
462 * runtime / (deadline - t) > dl_runtime / dl_period ,
464 * IOW we can't recycle current parameters.
466 * Notice that the bandwidth check is done against the period. For
467 * task with deadline equal to period this is the same of using
468 * dl_deadline instead of dl_period in the equation above.
470 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
471 struct sched_dl_entity *pi_se, u64 t)
476 * left and right are the two sides of the equation above,
477 * after a bit of shuffling to use multiplications instead
480 * Note that none of the time values involved in the two
481 * multiplications are absolute: dl_deadline and dl_runtime
482 * are the relative deadline and the maximum runtime of each
483 * instance, runtime is the runtime left for the last instance
484 * and (deadline - t), since t is rq->clock, is the time left
485 * to the (absolute) deadline. Even if overflowing the u64 type
486 * is very unlikely to occur in both cases, here we scale down
487 * as we want to avoid that risk at all. Scaling down by 10
488 * means that we reduce granularity to 1us. We are fine with it,
489 * since this is only a true/false check and, anyway, thinking
490 * of anything below microseconds resolution is actually fiction
491 * (but still we want to give the user that illusion >;).
493 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
494 right = ((dl_se->deadline - t) >> DL_SCALE) *
495 (pi_se->dl_runtime >> DL_SCALE);
497 return dl_time_before(right, left);
501 * When a -deadline entity is queued back on the runqueue, its runtime and
502 * deadline might need updating.
504 * The policy here is that we update the deadline of the entity only if:
505 * - the current deadline is in the past,
506 * - using the remaining runtime with the current deadline would make
507 * the entity exceed its bandwidth.
509 static void update_dl_entity(struct sched_dl_entity *dl_se,
510 struct sched_dl_entity *pi_se)
512 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
513 struct rq *rq = rq_of_dl_rq(dl_rq);
516 add_average_bw(dl_se, dl_rq);
519 * The arrival of a new instance needs special treatment, i.e.,
520 * the actual scheduling parameters have to be "renewed".
523 setup_new_dl_entity(dl_se, pi_se);
527 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
528 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
529 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
530 dl_se->runtime = pi_se->dl_runtime;
535 * If the entity depleted all its runtime, and if we want it to sleep
536 * while waiting for some new execution time to become available, we
537 * set the bandwidth enforcement timer to the replenishment instant
538 * and try to activate it.
540 * Notice that it is important for the caller to know if the timer
541 * actually started or not (i.e., the replenishment instant is in
542 * the future or in the past).
544 static int start_dl_timer(struct task_struct *p)
546 struct sched_dl_entity *dl_se = &p->dl;
547 struct hrtimer *timer = &dl_se->dl_timer;
548 struct rq *rq = task_rq(p);
552 lockdep_assert_held(&rq->lock);
555 * We want the timer to fire at the deadline, but considering
556 * that it is actually coming from rq->clock and not from
557 * hrtimer's time base reading.
559 act = ns_to_ktime(dl_se->deadline);
560 now = hrtimer_cb_get_time(timer);
561 delta = ktime_to_ns(now) - rq_clock(rq);
562 act = ktime_add_ns(act, delta);
565 * If the expiry time already passed, e.g., because the value
566 * chosen as the deadline is too small, don't even try to
567 * start the timer in the past!
569 if (ktime_us_delta(act, now) < 0)
573 * !enqueued will guarantee another callback; even if one is already in
574 * progress. This ensures a balanced {get,put}_task_struct().
576 * The race against __run_timer() clearing the enqueued state is
577 * harmless because we're holding task_rq()->lock, therefore the timer
578 * expiring after we've done the check will wait on its task_rq_lock()
579 * and observe our state.
581 if (!hrtimer_is_queued(timer)) {
583 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
590 * This is the bandwidth enforcement timer callback. If here, we know
591 * a task is not on its dl_rq, since the fact that the timer was running
592 * means the task is throttled and needs a runtime replenishment.
594 * However, what we actually do depends on the fact the task is active,
595 * (it is on its rq) or has been removed from there by a call to
596 * dequeue_task_dl(). In the former case we must issue the runtime
597 * replenishment and add the task back to the dl_rq; in the latter, we just
598 * do nothing but clearing dl_throttled, so that runtime and deadline
599 * updating (and the queueing back to dl_rq) will be done by the
600 * next call to enqueue_task_dl().
602 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
604 struct sched_dl_entity *dl_se = container_of(timer,
605 struct sched_dl_entity,
607 struct task_struct *p = dl_task_of(dl_se);
611 rq = task_rq_lock(p, &flags);
614 * The task might have changed its scheduling policy to something
615 * different than SCHED_DEADLINE (through switched_fromd_dl()).
618 __dl_clear_params(p);
623 * This is possible if switched_from_dl() raced against a running
624 * callback that took the above !dl_task() path and we've since then
625 * switched back into SCHED_DEADLINE.
627 * There's nothing to do except drop our task reference.
633 * The task might have been boosted by someone else and might be in the
634 * boosting/deboosting path, its not throttled.
636 if (dl_se->dl_boosted)
640 * Spurious timer due to start_dl_timer() race; or we already received
641 * a replenishment from rt_mutex_setprio().
643 if (!dl_se->dl_throttled)
650 * If the throttle happened during sched-out; like:
657 * __dequeue_task_dl()
660 * We can be both throttled and !queued. Replenish the counter
661 * but do not enqueue -- wait for our wakeup to do that.
663 if (!task_on_rq_queued(p)) {
664 replenish_dl_entity(dl_se, dl_se);
668 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
669 if (dl_task(rq->curr))
670 check_preempt_curr_dl(rq, p, 0);
676 * Perform balancing operations here; after the replenishments. We
677 * cannot drop rq->lock before this, otherwise the assertion in
678 * start_dl_timer() about not missing updates is not true.
680 * If we find that the rq the task was on is no longer available, we
681 * need to select a new rq.
683 * XXX figure out if select_task_rq_dl() deals with offline cpus.
685 if (unlikely(!rq->online))
686 rq = dl_task_offline_migration(rq, p);
689 * Queueing this task back might have overloaded rq, check if we need
690 * to kick someone away.
692 if (has_pushable_dl_tasks(rq)) {
694 * Nothing relies on rq->lock after this, so its safe to drop
697 lockdep_unpin_lock(&rq->lock);
699 lockdep_pin_lock(&rq->lock);
704 task_rq_unlock(rq, p, &flags);
707 * This can free the task_struct, including this hrtimer, do not touch
708 * anything related to that after this.
712 return HRTIMER_NORESTART;
715 void init_dl_task_timer(struct sched_dl_entity *dl_se)
717 struct hrtimer *timer = &dl_se->dl_timer;
719 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
720 timer->function = dl_task_timer;
724 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
726 return (dl_se->runtime <= 0);
729 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
732 * Update the current task's runtime statistics (provided it is still
733 * a -deadline task and has not been removed from the dl_rq).
735 static void update_curr_dl(struct rq *rq)
737 struct task_struct *curr = rq->curr;
738 struct sched_dl_entity *dl_se = &curr->dl;
741 if (!dl_task(curr) || !on_dl_rq(dl_se))
745 * Consumed budget is computed considering the time as
746 * observed by schedulable tasks (excluding time spent
747 * in hardirq context, etc.). Deadlines are instead
748 * computed using hard walltime. This seems to be the more
749 * natural solution, but the full ramifications of this
750 * approach need further study.
752 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
753 if (unlikely((s64)delta_exec <= 0))
756 schedstat_set(curr->se.statistics.exec_max,
757 max(curr->se.statistics.exec_max, delta_exec));
759 curr->se.sum_exec_runtime += delta_exec;
760 account_group_exec_runtime(curr, delta_exec);
762 curr->se.exec_start = rq_clock_task(rq);
763 cpuacct_charge(curr, delta_exec);
765 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
766 if (dl_runtime_exceeded(dl_se)) {
767 dl_se->dl_throttled = 1;
768 __dequeue_task_dl(rq, curr, 0);
769 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
770 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
772 if (!is_leftmost(curr, &rq->dl))
777 * Because -- for now -- we share the rt bandwidth, we need to
778 * account our runtime there too, otherwise actual rt tasks
779 * would be able to exceed the shared quota.
781 * Account to the root rt group for now.
783 * The solution we're working towards is having the RT groups scheduled
784 * using deadline servers -- however there's a few nasties to figure
785 * out before that can happen.
787 if (rt_bandwidth_enabled()) {
788 struct rt_rq *rt_rq = &rq->rt;
790 raw_spin_lock(&rt_rq->rt_runtime_lock);
792 * We'll let actual RT tasks worry about the overflow here, we
793 * have our own CBS to keep us inline; only account when RT
794 * bandwidth is relevant.
796 if (sched_rt_bandwidth_account(rt_rq))
797 rt_rq->rt_time += delta_exec;
798 raw_spin_unlock(&rt_rq->rt_runtime_lock);
804 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
806 static inline u64 next_deadline(struct rq *rq)
808 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
810 if (next && dl_prio(next->prio))
811 return next->dl.deadline;
816 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
818 struct rq *rq = rq_of_dl_rq(dl_rq);
820 if (dl_rq->earliest_dl.curr == 0 ||
821 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
823 * If the dl_rq had no -deadline tasks, or if the new task
824 * has shorter deadline than the current one on dl_rq, we
825 * know that the previous earliest becomes our next earliest,
826 * as the new task becomes the earliest itself.
828 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
829 dl_rq->earliest_dl.curr = deadline;
830 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
831 } else if (dl_rq->earliest_dl.next == 0 ||
832 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
834 * On the other hand, if the new -deadline task has a
835 * a later deadline than the earliest one on dl_rq, but
836 * it is earlier than the next (if any), we must
837 * recompute the next-earliest.
839 dl_rq->earliest_dl.next = next_deadline(rq);
843 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
845 struct rq *rq = rq_of_dl_rq(dl_rq);
848 * Since we may have removed our earliest (and/or next earliest)
849 * task we must recompute them.
851 if (!dl_rq->dl_nr_running) {
852 dl_rq->earliest_dl.curr = 0;
853 dl_rq->earliest_dl.next = 0;
854 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
856 struct rb_node *leftmost = dl_rq->rb_leftmost;
857 struct sched_dl_entity *entry;
859 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
860 dl_rq->earliest_dl.curr = entry->deadline;
861 dl_rq->earliest_dl.next = next_deadline(rq);
862 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
868 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
869 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
871 #endif /* CONFIG_SMP */
874 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
876 int prio = dl_task_of(dl_se)->prio;
877 u64 deadline = dl_se->deadline;
879 WARN_ON(!dl_prio(prio));
880 dl_rq->dl_nr_running++;
881 add_nr_running(rq_of_dl_rq(dl_rq), 1);
883 inc_dl_deadline(dl_rq, deadline);
884 inc_dl_migration(dl_se, dl_rq);
888 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
890 int prio = dl_task_of(dl_se)->prio;
892 WARN_ON(!dl_prio(prio));
893 WARN_ON(!dl_rq->dl_nr_running);
894 dl_rq->dl_nr_running--;
895 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
897 dec_dl_deadline(dl_rq, dl_se->deadline);
898 dec_dl_migration(dl_se, dl_rq);
901 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
903 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
904 struct rb_node **link = &dl_rq->rb_root.rb_node;
905 struct rb_node *parent = NULL;
906 struct sched_dl_entity *entry;
909 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
913 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
914 if (dl_time_before(dl_se->deadline, entry->deadline))
915 link = &parent->rb_left;
917 link = &parent->rb_right;
923 dl_rq->rb_leftmost = &dl_se->rb_node;
925 rb_link_node(&dl_se->rb_node, parent, link);
926 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
928 inc_dl_tasks(dl_se, dl_rq);
931 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
933 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
935 if (RB_EMPTY_NODE(&dl_se->rb_node))
938 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
939 struct rb_node *next_node;
941 next_node = rb_next(&dl_se->rb_node);
942 dl_rq->rb_leftmost = next_node;
945 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
946 RB_CLEAR_NODE(&dl_se->rb_node);
948 dec_dl_tasks(dl_se, dl_rq);
952 enqueue_dl_entity(struct sched_dl_entity *dl_se,
953 struct sched_dl_entity *pi_se, int flags)
955 BUG_ON(on_dl_rq(dl_se));
958 * If this is a wakeup or a new instance, the scheduling
959 * parameters of the task might need updating. Otherwise,
960 * we want a replenishment of its runtime.
962 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
963 update_dl_entity(dl_se, pi_se);
964 else if (flags & ENQUEUE_REPLENISH)
965 replenish_dl_entity(dl_se, pi_se);
967 __enqueue_dl_entity(dl_se);
970 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
972 __dequeue_dl_entity(dl_se);
975 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
977 struct task_struct *pi_task = rt_mutex_get_top_task(p);
978 struct sched_dl_entity *pi_se = &p->dl;
981 * Use the scheduling parameters of the top pi-waiter
982 * task if we have one and its (absolute) deadline is
983 * smaller than our one... OTW we keep our runtime and
986 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
987 pi_se = &pi_task->dl;
988 } else if (!dl_prio(p->normal_prio)) {
990 * Special case in which we have a !SCHED_DEADLINE task
991 * that is going to be deboosted, but exceedes its
992 * runtime while doing so. No point in replenishing
993 * it, as it's going to return back to its original
994 * scheduling class after this.
996 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1001 * If p is throttled, we do nothing. In fact, if it exhausted
1002 * its budget it needs a replenishment and, since it now is on
1003 * its rq, the bandwidth timer callback (which clearly has not
1004 * run yet) will take care of this.
1006 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
1009 enqueue_dl_entity(&p->dl, pi_se, flags);
1011 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1012 enqueue_pushable_dl_task(rq, p);
1015 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1017 dequeue_dl_entity(&p->dl);
1018 dequeue_pushable_dl_task(rq, p);
1021 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1024 __dequeue_task_dl(rq, p, flags);
1028 * Yield task semantic for -deadline tasks is:
1030 * get off from the CPU until our next instance, with
1031 * a new runtime. This is of little use now, since we
1032 * don't have a bandwidth reclaiming mechanism. Anyway,
1033 * bandwidth reclaiming is planned for the future, and
1034 * yield_task_dl will indicate that some spare budget
1035 * is available for other task instances to use it.
1037 static void yield_task_dl(struct rq *rq)
1039 struct task_struct *p = rq->curr;
1042 * We make the task go to sleep until its current deadline by
1043 * forcing its runtime to zero. This way, update_curr_dl() stops
1044 * it and the bandwidth timer will wake it up and will give it
1045 * new scheduling parameters (thanks to dl_yielded=1).
1047 if (p->dl.runtime > 0) {
1048 rq->curr->dl.dl_yielded = 1;
1051 update_rq_clock(rq);
1054 * Tell update_rq_clock() that we've just updated,
1055 * so we don't do microscopic update in schedule()
1056 * and double the fastpath cost.
1058 rq_clock_skip_update(rq, true);
1063 static int find_later_rq(struct task_struct *task);
1066 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1068 struct task_struct *curr;
1071 if (sd_flag != SD_BALANCE_WAKE)
1077 curr = READ_ONCE(rq->curr); /* unlocked access */
1080 * If we are dealing with a -deadline task, we must
1081 * decide where to wake it up.
1082 * If it has a later deadline and the current task
1083 * on this rq can't move (provided the waking task
1084 * can!) we prefer to send it somewhere else. On the
1085 * other hand, if it has a shorter deadline, we
1086 * try to make it stay here, it might be important.
1088 if (unlikely(dl_task(curr)) &&
1089 (curr->nr_cpus_allowed < 2 ||
1090 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1091 (p->nr_cpus_allowed > 1)) {
1092 int target = find_later_rq(p);
1095 (dl_time_before(p->dl.deadline,
1096 cpu_rq(target)->dl.earliest_dl.curr) ||
1097 (cpu_rq(target)->dl.dl_nr_running == 0)))
1106 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1109 * Current can't be migrated, useless to reschedule,
1110 * let's hope p can move out.
1112 if (rq->curr->nr_cpus_allowed == 1 ||
1113 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1117 * p is migratable, so let's not schedule it and
1118 * see if it is pushed or pulled somewhere else.
1120 if (p->nr_cpus_allowed != 1 &&
1121 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1127 #endif /* CONFIG_SMP */
1130 * Only called when both the current and waking task are -deadline
1133 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1136 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1143 * In the unlikely case current and p have the same deadline
1144 * let us try to decide what's the best thing to do...
1146 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1147 !test_tsk_need_resched(rq->curr))
1148 check_preempt_equal_dl(rq, p);
1149 #endif /* CONFIG_SMP */
1152 #ifdef CONFIG_SCHED_HRTICK
1153 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1155 hrtick_start(rq, p->dl.runtime);
1157 #else /* !CONFIG_SCHED_HRTICK */
1158 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1163 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1164 struct dl_rq *dl_rq)
1166 struct rb_node *left = dl_rq->rb_leftmost;
1171 return rb_entry(left, struct sched_dl_entity, rb_node);
1174 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1176 struct sched_dl_entity *dl_se;
1177 struct task_struct *p;
1178 struct dl_rq *dl_rq;
1182 if (need_pull_dl_task(rq, prev)) {
1184 * This is OK, because current is on_cpu, which avoids it being
1185 * picked for load-balance and preemption/IRQs are still
1186 * disabled avoiding further scheduler activity on it and we're
1187 * being very careful to re-start the picking loop.
1189 lockdep_unpin_lock(&rq->lock);
1191 lockdep_pin_lock(&rq->lock);
1193 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1194 * means a stop task can slip in, in which case we need to
1195 * re-start task selection.
1197 if (rq->stop && task_on_rq_queued(rq->stop))
1202 * When prev is DL, we may throttle it in put_prev_task().
1203 * So, we update time before we check for dl_nr_running.
1205 if (prev->sched_class == &dl_sched_class)
1208 if (unlikely(!dl_rq->dl_nr_running))
1211 put_prev_task(rq, prev);
1213 dl_se = pick_next_dl_entity(rq, dl_rq);
1216 p = dl_task_of(dl_se);
1217 p->se.exec_start = rq_clock_task(rq);
1219 /* Running task will never be pushed. */
1220 dequeue_pushable_dl_task(rq, p);
1222 if (hrtick_enabled(rq))
1223 start_hrtick_dl(rq, p);
1225 queue_push_tasks(rq);
1230 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1234 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1235 enqueue_pushable_dl_task(rq, p);
1238 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1243 * Even when we have runtime, update_curr_dl() might have resulted in us
1244 * not being the leftmost task anymore. In that case NEED_RESCHED will
1245 * be set and schedule() will start a new hrtick for the next task.
1247 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1248 is_leftmost(p, &rq->dl))
1249 start_hrtick_dl(rq, p);
1252 static void task_fork_dl(struct task_struct *p)
1255 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1260 static void task_dead_dl(struct task_struct *p)
1262 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1263 struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
1264 struct rq *rq = rq_of_dl_rq(dl_rq);
1267 * Since we are TASK_DEAD we won't slip out of the domain!
1269 raw_spin_lock_irq(&dl_b->lock);
1270 /* XXX we should retain the bw until 0-lag */
1271 dl_b->total_bw -= p->dl.dl_bw;
1272 raw_spin_unlock_irq(&dl_b->lock);
1274 clear_average_bw(&p->dl, &rq->dl);
1277 static void set_curr_task_dl(struct rq *rq)
1279 struct task_struct *p = rq->curr;
1281 p->se.exec_start = rq_clock_task(rq);
1283 /* You can't push away the running task */
1284 dequeue_pushable_dl_task(rq, p);
1289 /* Only try algorithms three times */
1290 #define DL_MAX_TRIES 3
1292 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1294 if (!task_running(rq, p) &&
1295 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1300 /* Returns the second earliest -deadline task, NULL otherwise */
1301 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1303 struct rb_node *next_node = rq->dl.rb_leftmost;
1304 struct sched_dl_entity *dl_se;
1305 struct task_struct *p = NULL;
1308 next_node = rb_next(next_node);
1310 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1311 p = dl_task_of(dl_se);
1313 if (pick_dl_task(rq, p, cpu))
1323 * Return the earliest pushable rq's task, which is suitable to be executed
1324 * on the CPU, NULL otherwise:
1326 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1328 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1329 struct task_struct *p = NULL;
1331 if (!has_pushable_dl_tasks(rq))
1336 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1338 if (pick_dl_task(rq, p, cpu))
1341 next_node = rb_next(next_node);
1348 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1350 static int find_later_rq(struct task_struct *task)
1352 struct sched_domain *sd;
1353 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1354 int this_cpu = smp_processor_id();
1355 int best_cpu, cpu = task_cpu(task);
1357 /* Make sure the mask is initialized first */
1358 if (unlikely(!later_mask))
1361 if (task->nr_cpus_allowed == 1)
1365 * We have to consider system topology and task affinity
1366 * first, then we can look for a suitable cpu.
1368 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1374 * If we are here, some target has been found,
1375 * the most suitable of which is cached in best_cpu.
1376 * This is, among the runqueues where the current tasks
1377 * have later deadlines than the task's one, the rq
1378 * with the latest possible one.
1380 * Now we check how well this matches with task's
1381 * affinity and system topology.
1383 * The last cpu where the task run is our first
1384 * guess, since it is most likely cache-hot there.
1386 if (cpumask_test_cpu(cpu, later_mask))
1389 * Check if this_cpu is to be skipped (i.e., it is
1390 * not in the mask) or not.
1392 if (!cpumask_test_cpu(this_cpu, later_mask))
1396 for_each_domain(cpu, sd) {
1397 if (sd->flags & SD_WAKE_AFFINE) {
1400 * If possible, preempting this_cpu is
1401 * cheaper than migrating.
1403 if (this_cpu != -1 &&
1404 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1410 * Last chance: if best_cpu is valid and is
1411 * in the mask, that becomes our choice.
1413 if (best_cpu < nr_cpu_ids &&
1414 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1423 * At this point, all our guesses failed, we just return
1424 * 'something', and let the caller sort the things out.
1429 cpu = cpumask_any(later_mask);
1430 if (cpu < nr_cpu_ids)
1436 /* Locks the rq it finds */
1437 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1439 struct rq *later_rq = NULL;
1443 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1444 cpu = find_later_rq(task);
1446 if ((cpu == -1) || (cpu == rq->cpu))
1449 later_rq = cpu_rq(cpu);
1451 if (later_rq->dl.dl_nr_running &&
1452 !dl_time_before(task->dl.deadline,
1453 later_rq->dl.earliest_dl.curr)) {
1455 * Target rq has tasks of equal or earlier deadline,
1456 * retrying does not release any lock and is unlikely
1457 * to yield a different result.
1463 /* Retry if something changed. */
1464 if (double_lock_balance(rq, later_rq)) {
1465 if (unlikely(task_rq(task) != rq ||
1466 !cpumask_test_cpu(later_rq->cpu,
1467 &task->cpus_allowed) ||
1468 task_running(rq, task) ||
1469 !task_on_rq_queued(task))) {
1470 double_unlock_balance(rq, later_rq);
1477 * If the rq we found has no -deadline task, or
1478 * its earliest one has a later deadline than our
1479 * task, the rq is a good one.
1481 if (!later_rq->dl.dl_nr_running ||
1482 dl_time_before(task->dl.deadline,
1483 later_rq->dl.earliest_dl.curr))
1486 /* Otherwise we try again. */
1487 double_unlock_balance(rq, later_rq);
1494 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1496 struct task_struct *p;
1498 if (!has_pushable_dl_tasks(rq))
1501 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1502 struct task_struct, pushable_dl_tasks);
1504 BUG_ON(rq->cpu != task_cpu(p));
1505 BUG_ON(task_current(rq, p));
1506 BUG_ON(p->nr_cpus_allowed <= 1);
1508 BUG_ON(!task_on_rq_queued(p));
1509 BUG_ON(!dl_task(p));
1515 * See if the non running -deadline tasks on this rq
1516 * can be sent to some other CPU where they can preempt
1517 * and start executing.
1519 static int push_dl_task(struct rq *rq)
1521 struct task_struct *next_task;
1522 struct rq *later_rq;
1525 if (!rq->dl.overloaded)
1528 next_task = pick_next_pushable_dl_task(rq);
1533 if (unlikely(next_task == rq->curr)) {
1539 * If next_task preempts rq->curr, and rq->curr
1540 * can move away, it makes sense to just reschedule
1541 * without going further in pushing next_task.
1543 if (dl_task(rq->curr) &&
1544 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1545 rq->curr->nr_cpus_allowed > 1) {
1550 /* We might release rq lock */
1551 get_task_struct(next_task);
1553 /* Will lock the rq it'll find */
1554 later_rq = find_lock_later_rq(next_task, rq);
1556 struct task_struct *task;
1559 * We must check all this again, since
1560 * find_lock_later_rq releases rq->lock and it is
1561 * then possible that next_task has migrated.
1563 task = pick_next_pushable_dl_task(rq);
1564 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1566 * The task is still there. We don't try
1567 * again, some other cpu will pull it when ready.
1576 put_task_struct(next_task);
1581 deactivate_task(rq, next_task, 0);
1582 clear_average_bw(&next_task->dl, &rq->dl);
1583 set_task_cpu(next_task, later_rq->cpu);
1584 add_average_bw(&next_task->dl, &later_rq->dl);
1585 activate_task(later_rq, next_task, 0);
1588 resched_curr(later_rq);
1590 double_unlock_balance(rq, later_rq);
1593 put_task_struct(next_task);
1598 static void push_dl_tasks(struct rq *rq)
1600 /* push_dl_task() will return true if it moved a -deadline task */
1601 while (push_dl_task(rq))
1605 static void pull_dl_task(struct rq *this_rq)
1607 int this_cpu = this_rq->cpu, cpu;
1608 struct task_struct *p;
1609 bool resched = false;
1611 u64 dmin = LONG_MAX;
1613 if (likely(!dl_overloaded(this_rq)))
1617 * Match the barrier from dl_set_overloaded; this guarantees that if we
1618 * see overloaded we must also see the dlo_mask bit.
1622 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1623 if (this_cpu == cpu)
1626 src_rq = cpu_rq(cpu);
1629 * It looks racy, abd it is! However, as in sched_rt.c,
1630 * we are fine with this.
1632 if (this_rq->dl.dl_nr_running &&
1633 dl_time_before(this_rq->dl.earliest_dl.curr,
1634 src_rq->dl.earliest_dl.next))
1637 /* Might drop this_rq->lock */
1638 double_lock_balance(this_rq, src_rq);
1641 * If there are no more pullable tasks on the
1642 * rq, we're done with it.
1644 if (src_rq->dl.dl_nr_running <= 1)
1647 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1650 * We found a task to be pulled if:
1651 * - it preempts our current (if there's one),
1652 * - it will preempt the last one we pulled (if any).
1654 if (p && dl_time_before(p->dl.deadline, dmin) &&
1655 (!this_rq->dl.dl_nr_running ||
1656 dl_time_before(p->dl.deadline,
1657 this_rq->dl.earliest_dl.curr))) {
1658 WARN_ON(p == src_rq->curr);
1659 WARN_ON(!task_on_rq_queued(p));
1662 * Then we pull iff p has actually an earlier
1663 * deadline than the current task of its runqueue.
1665 if (dl_time_before(p->dl.deadline,
1666 src_rq->curr->dl.deadline))
1671 deactivate_task(src_rq, p, 0);
1672 clear_average_bw(&p->dl, &src_rq->dl);
1673 set_task_cpu(p, this_cpu);
1674 add_average_bw(&p->dl, &this_rq->dl);
1675 activate_task(this_rq, p, 0);
1676 dmin = p->dl.deadline;
1678 /* Is there any other task even earlier? */
1681 double_unlock_balance(this_rq, src_rq);
1685 resched_curr(this_rq);
1689 * Since the task is not running and a reschedule is not going to happen
1690 * anytime soon on its runqueue, we try pushing it away now.
1692 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1694 if (!task_running(rq, p) &&
1695 !test_tsk_need_resched(rq->curr) &&
1696 p->nr_cpus_allowed > 1 &&
1697 dl_task(rq->curr) &&
1698 (rq->curr->nr_cpus_allowed < 2 ||
1699 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1704 static void set_cpus_allowed_dl(struct task_struct *p,
1705 const struct cpumask *new_mask)
1707 struct root_domain *src_rd;
1710 BUG_ON(!dl_task(p));
1715 * Migrating a SCHED_DEADLINE task between exclusive
1716 * cpusets (different root_domains) entails a bandwidth
1717 * update. We already made space for us in the destination
1718 * domain (see cpuset_can_attach()).
1720 if (!cpumask_intersects(src_rd->span, new_mask)) {
1721 struct dl_bw *src_dl_b;
1723 src_dl_b = dl_bw_of(cpu_of(rq));
1725 * We now free resources of the root_domain we are migrating
1726 * off. In the worst case, sched_setattr() may temporary fail
1727 * until we complete the update.
1729 raw_spin_lock(&src_dl_b->lock);
1730 __dl_clear(src_dl_b, p->dl.dl_bw);
1731 raw_spin_unlock(&src_dl_b->lock);
1734 set_cpus_allowed_common(p, new_mask);
1737 /* Assumes rq->lock is held */
1738 static void rq_online_dl(struct rq *rq)
1740 if (rq->dl.overloaded)
1741 dl_set_overload(rq);
1743 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1744 if (rq->dl.dl_nr_running > 0)
1745 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1748 /* Assumes rq->lock is held */
1749 static void rq_offline_dl(struct rq *rq)
1751 if (rq->dl.overloaded)
1752 dl_clear_overload(rq);
1754 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1755 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1758 void __init init_sched_dl_class(void)
1762 for_each_possible_cpu(i)
1763 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1764 GFP_KERNEL, cpu_to_node(i));
1767 #endif /* CONFIG_SMP */
1769 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1772 * Start the deadline timer; if we switch back to dl before this we'll
1773 * continue consuming our current CBS slice. If we stay outside of
1774 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1777 if (!start_dl_timer(p))
1778 __dl_clear_params(p);
1780 clear_average_bw(&p->dl, &rq->dl);
1783 * Since this might be the only -deadline task on the rq,
1784 * this is the right place to try to pull some other one
1785 * from an overloaded cpu, if any.
1787 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1790 queue_pull_task(rq);
1794 * When switching to -deadline, we may overload the rq, then
1795 * we try to push someone off, if possible.
1797 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1799 if (task_on_rq_queued(p) && rq->curr != p) {
1801 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1802 queue_push_tasks(rq);
1804 if (dl_task(rq->curr))
1805 check_preempt_curr_dl(rq, p, 0);
1813 * If the scheduling parameters of a -deadline task changed,
1814 * a push or pull operation might be needed.
1816 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1819 if (task_on_rq_queued(p) || rq->curr == p) {
1822 * This might be too much, but unfortunately
1823 * we don't have the old deadline value, and
1824 * we can't argue if the task is increasing
1825 * or lowering its prio, so...
1827 if (!rq->dl.overloaded)
1828 queue_pull_task(rq);
1831 * If we now have a earlier deadline task than p,
1832 * then reschedule, provided p is still on this
1835 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
1839 * Again, we don't know if p has a earlier
1840 * or later deadline, so let's blindly set a
1841 * (maybe not needed) rescheduling point.
1844 #endif /* CONFIG_SMP */
1846 switched_to_dl(rq, p);
1849 const struct sched_class dl_sched_class = {
1850 .next = &rt_sched_class,
1851 .enqueue_task = enqueue_task_dl,
1852 .dequeue_task = dequeue_task_dl,
1853 .yield_task = yield_task_dl,
1855 .check_preempt_curr = check_preempt_curr_dl,
1857 .pick_next_task = pick_next_task_dl,
1858 .put_prev_task = put_prev_task_dl,
1861 .select_task_rq = select_task_rq_dl,
1862 .set_cpus_allowed = set_cpus_allowed_dl,
1863 .rq_online = rq_online_dl,
1864 .rq_offline = rq_offline_dl,
1865 .task_woken = task_woken_dl,
1868 .set_curr_task = set_curr_task_dl,
1869 .task_tick = task_tick_dl,
1870 .task_fork = task_fork_dl,
1871 .task_dead = task_dead_dl,
1873 .prio_changed = prio_changed_dl,
1874 .switched_from = switched_from_dl,
1875 .switched_to = switched_to_dl,
1877 .update_curr = update_curr_dl,
1880 #ifdef CONFIG_SCHED_DEBUG
1881 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1883 void print_dl_stats(struct seq_file *m, int cpu)
1885 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1887 #endif /* CONFIG_SCHED_DEBUG */