2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
33 #include <trace/events/sched.h>
38 * Targeted preemption latency for CPU-bound tasks:
39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41 * NOTE: this latency value is not the same as the concept of
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
49 unsigned int sysctl_sched_latency = 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
65 * Minimal preemption granularity for CPU-bound tasks:
66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 unsigned int sysctl_sched_min_granularity = 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 static unsigned int sched_nr_latency = 8;
77 * After fork, child runs first. If set to 0 (default) then
78 * parent will (try to) run first.
80 unsigned int sysctl_sched_child_runs_first __read_mostly;
83 * SCHED_OTHER wake-up granularity.
84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
96 * The exponential sliding window over which load is averaged for shares
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102 #ifdef CONFIG_CFS_BANDWIDTH
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
111 * default: 5 msec, units: microseconds
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
116 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
122 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
128 static inline void update_load_set(struct load_weight *lw, unsigned long w)
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
141 * This idea comes from the SD scheduler of Con Kolivas:
143 static int get_update_sysctl_factor(void)
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
152 case SCHED_TUNABLESCALING_LINEAR:
155 case SCHED_TUNABLESCALING_LOG:
157 factor = 1 + ilog2(cpus);
164 static void update_sysctl(void)
166 unsigned int factor = get_update_sysctl_factor();
168 #define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
176 void sched_init_granularity(void)
181 #define WMULT_CONST (~0U)
182 #define WMULT_SHIFT 32
184 static void __update_inv_weight(struct load_weight *lw)
188 if (likely(lw->inv_weight))
191 w = scale_load_down(lw->weight);
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
198 lw->inv_weight = WMULT_CONST / w;
202 * delta_exec * weight / lw.weight
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
218 __update_inv_weight(lw);
220 if (unlikely(fact >> 32)) {
227 /* hint to use a 32x32->64 mul */
228 fact = (u64)(u32)fact * lw->inv_weight;
235 return mul_u64_u32_shr(delta_exec, fact, shift);
239 const struct sched_class fair_sched_class;
241 /**************************************************************
242 * CFS operations on generic schedulable entities:
245 #ifdef CONFIG_FAIR_GROUP_SCHED
247 /* cpu runqueue to which this cfs_rq is attached */
248 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
253 /* An entity is a task if it doesn't "own" a runqueue */
254 #define entity_is_task(se) (!se->my_q)
256 static inline struct task_struct *task_of(struct sched_entity *se)
258 #ifdef CONFIG_SCHED_DEBUG
259 WARN_ON_ONCE(!entity_is_task(se));
261 return container_of(se, struct task_struct, se);
264 /* Walk up scheduling entities hierarchy */
265 #define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
268 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
273 /* runqueue on which this entity is (to be) queued */
274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
279 /* runqueue "owned" by this group */
280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
285 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
288 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
290 if (!cfs_rq->on_list) {
292 * Ensure we either appear before our parent (if already
293 * enqueued) or force our parent to appear after us when it is
294 * enqueued. The fact that we always enqueue bottom-up
295 * reduces this to two cases.
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
300 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
303 &rq_of(cfs_rq)->leaf_cfs_rq_list);
307 /* We should have no load, but we need to update last_decay. */
308 update_cfs_rq_blocked_load(cfs_rq, 0);
312 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
314 if (cfs_rq->on_list) {
315 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
320 /* Iterate thr' all leaf cfs_rq's on a runqueue */
321 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
324 /* Do the two (enqueued) entities belong to the same group ? */
326 is_same_group(struct sched_entity *se, struct sched_entity *pse)
328 if (se->cfs_rq == pse->cfs_rq)
334 static inline struct sched_entity *parent_entity(struct sched_entity *se)
339 /* return depth at which a sched entity is present in the hierarchy */
340 static inline int depth_se(struct sched_entity *se)
344 for_each_sched_entity(se)
351 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
353 int se_depth, pse_depth;
356 * preemption test can be made between sibling entities who are in the
357 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
358 * both tasks until we find their ancestors who are siblings of common
362 /* First walk up until both entities are at same depth */
363 se_depth = depth_se(*se);
364 pse_depth = depth_se(*pse);
366 while (se_depth > pse_depth) {
368 *se = parent_entity(*se);
371 while (pse_depth > se_depth) {
373 *pse = parent_entity(*pse);
376 while (!is_same_group(*se, *pse)) {
377 *se = parent_entity(*se);
378 *pse = parent_entity(*pse);
382 #else /* !CONFIG_FAIR_GROUP_SCHED */
384 static inline struct task_struct *task_of(struct sched_entity *se)
386 return container_of(se, struct task_struct, se);
389 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
391 return container_of(cfs_rq, struct rq, cfs);
394 #define entity_is_task(se) 1
396 #define for_each_sched_entity(se) \
397 for (; se; se = NULL)
399 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
401 return &task_rq(p)->cfs;
404 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
406 struct task_struct *p = task_of(se);
407 struct rq *rq = task_rq(p);
412 /* runqueue "owned" by this group */
413 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
418 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
422 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
426 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
427 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
430 is_same_group(struct sched_entity *se, struct sched_entity *pse)
435 static inline struct sched_entity *parent_entity(struct sched_entity *se)
441 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
445 #endif /* CONFIG_FAIR_GROUP_SCHED */
447 static __always_inline
448 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
450 /**************************************************************
451 * Scheduling class tree data structure manipulation methods:
454 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
456 s64 delta = (s64)(vruntime - max_vruntime);
458 max_vruntime = vruntime;
463 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
465 s64 delta = (s64)(vruntime - min_vruntime);
467 min_vruntime = vruntime;
472 static inline int entity_before(struct sched_entity *a,
473 struct sched_entity *b)
475 return (s64)(a->vruntime - b->vruntime) < 0;
478 static void update_min_vruntime(struct cfs_rq *cfs_rq)
480 u64 vruntime = cfs_rq->min_vruntime;
483 vruntime = cfs_rq->curr->vruntime;
485 if (cfs_rq->rb_leftmost) {
486 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
491 vruntime = se->vruntime;
493 vruntime = min_vruntime(vruntime, se->vruntime);
496 /* ensure we never gain time by being placed backwards. */
497 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
500 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
505 * Enqueue an entity into the rb-tree:
507 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
509 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
510 struct rb_node *parent = NULL;
511 struct sched_entity *entry;
515 * Find the right place in the rbtree:
519 entry = rb_entry(parent, struct sched_entity, run_node);
521 * We dont care about collisions. Nodes with
522 * the same key stay together.
524 if (entity_before(se, entry)) {
525 link = &parent->rb_left;
527 link = &parent->rb_right;
533 * Maintain a cache of leftmost tree entries (it is frequently
537 cfs_rq->rb_leftmost = &se->run_node;
539 rb_link_node(&se->run_node, parent, link);
540 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
543 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
545 if (cfs_rq->rb_leftmost == &se->run_node) {
546 struct rb_node *next_node;
548 next_node = rb_next(&se->run_node);
549 cfs_rq->rb_leftmost = next_node;
552 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
555 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
557 struct rb_node *left = cfs_rq->rb_leftmost;
562 return rb_entry(left, struct sched_entity, run_node);
565 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
567 struct rb_node *next = rb_next(&se->run_node);
572 return rb_entry(next, struct sched_entity, run_node);
575 #ifdef CONFIG_SCHED_DEBUG
576 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
578 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
583 return rb_entry(last, struct sched_entity, run_node);
586 /**************************************************************
587 * Scheduling class statistics methods:
590 int sched_proc_update_handler(struct ctl_table *table, int write,
591 void __user *buffer, size_t *lenp,
594 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
595 int factor = get_update_sysctl_factor();
600 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
601 sysctl_sched_min_granularity);
603 #define WRT_SYSCTL(name) \
604 (normalized_sysctl_##name = sysctl_##name / (factor))
605 WRT_SYSCTL(sched_min_granularity);
606 WRT_SYSCTL(sched_latency);
607 WRT_SYSCTL(sched_wakeup_granularity);
617 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
619 if (unlikely(se->load.weight != NICE_0_LOAD))
620 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
626 * The idea is to set a period in which each task runs once.
628 * When there are too many tasks (sched_nr_latency) we have to stretch
629 * this period because otherwise the slices get too small.
631 * p = (nr <= nl) ? l : l*nr/nl
633 static u64 __sched_period(unsigned long nr_running)
635 u64 period = sysctl_sched_latency;
636 unsigned long nr_latency = sched_nr_latency;
638 if (unlikely(nr_running > nr_latency)) {
639 period = sysctl_sched_min_granularity;
640 period *= nr_running;
647 * We calculate the wall-time slice from the period by taking a part
648 * proportional to the weight.
652 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
654 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
656 for_each_sched_entity(se) {
657 struct load_weight *load;
658 struct load_weight lw;
660 cfs_rq = cfs_rq_of(se);
661 load = &cfs_rq->load;
663 if (unlikely(!se->on_rq)) {
666 update_load_add(&lw, se->load.weight);
669 slice = __calc_delta(slice, se->load.weight, load);
675 * We calculate the vruntime slice of a to-be-inserted task.
679 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
681 return calc_delta_fair(sched_slice(cfs_rq, se), se);
685 static unsigned long task_h_load(struct task_struct *p);
687 static inline void __update_task_entity_contrib(struct sched_entity *se);
689 /* Give new task start runnable values to heavy its load in infant time */
690 void init_task_runnable_average(struct task_struct *p)
694 p->se.avg.decay_count = 0;
695 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
696 p->se.avg.runnable_avg_sum = slice;
697 p->se.avg.runnable_avg_period = slice;
698 __update_task_entity_contrib(&p->se);
701 void init_task_runnable_average(struct task_struct *p)
707 * Update the current task's runtime statistics.
709 static void update_curr(struct cfs_rq *cfs_rq)
711 struct sched_entity *curr = cfs_rq->curr;
712 u64 now = rq_clock_task(rq_of(cfs_rq));
718 delta_exec = now - curr->exec_start;
719 if (unlikely((s64)delta_exec <= 0))
722 curr->exec_start = now;
724 schedstat_set(curr->statistics.exec_max,
725 max(delta_exec, curr->statistics.exec_max));
727 curr->sum_exec_runtime += delta_exec;
728 schedstat_add(cfs_rq, exec_clock, delta_exec);
730 curr->vruntime += calc_delta_fair(delta_exec, curr);
731 update_min_vruntime(cfs_rq);
733 if (entity_is_task(curr)) {
734 struct task_struct *curtask = task_of(curr);
736 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
737 cpuacct_charge(curtask, delta_exec);
738 account_group_exec_runtime(curtask, delta_exec);
741 account_cfs_rq_runtime(cfs_rq, delta_exec);
745 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
747 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
751 * Task is being enqueued - update stats:
753 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
756 * Are we enqueueing a waiting task? (for current tasks
757 * a dequeue/enqueue event is a NOP)
759 if (se != cfs_rq->curr)
760 update_stats_wait_start(cfs_rq, se);
764 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
766 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
767 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
768 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
769 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
770 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
771 #ifdef CONFIG_SCHEDSTATS
772 if (entity_is_task(se)) {
773 trace_sched_stat_wait(task_of(se),
774 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
777 schedstat_set(se->statistics.wait_start, 0);
781 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784 * Mark the end of the wait period if dequeueing a
787 if (se != cfs_rq->curr)
788 update_stats_wait_end(cfs_rq, se);
792 * We are picking a new current task - update its stats:
795 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
798 * We are starting a new run period:
800 se->exec_start = rq_clock_task(rq_of(cfs_rq));
803 /**************************************************
804 * Scheduling class queueing methods:
807 #ifdef CONFIG_NUMA_BALANCING
809 * Approximate time to scan a full NUMA task in ms. The task scan period is
810 * calculated based on the tasks virtual memory size and
811 * numa_balancing_scan_size.
813 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
814 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
816 /* Portion of address space to scan in MB */
817 unsigned int sysctl_numa_balancing_scan_size = 256;
819 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
820 unsigned int sysctl_numa_balancing_scan_delay = 1000;
822 static unsigned int task_nr_scan_windows(struct task_struct *p)
824 unsigned long rss = 0;
825 unsigned long nr_scan_pages;
828 * Calculations based on RSS as non-present and empty pages are skipped
829 * by the PTE scanner and NUMA hinting faults should be trapped based
832 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
833 rss = get_mm_rss(p->mm);
837 rss = round_up(rss, nr_scan_pages);
838 return rss / nr_scan_pages;
841 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
842 #define MAX_SCAN_WINDOW 2560
844 static unsigned int task_scan_min(struct task_struct *p)
846 unsigned int scan, floor;
847 unsigned int windows = 1;
849 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
850 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
851 floor = 1000 / windows;
853 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
854 return max_t(unsigned int, floor, scan);
857 static unsigned int task_scan_max(struct task_struct *p)
859 unsigned int smin = task_scan_min(p);
862 /* Watch for min being lower than max due to floor calculations */
863 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
864 return max(smin, smax);
867 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
869 rq->nr_numa_running += (p->numa_preferred_nid != -1);
870 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
873 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
875 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
876 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
882 spinlock_t lock; /* nr_tasks, tasks */
885 struct list_head task_list;
888 nodemask_t active_nodes;
889 unsigned long total_faults;
891 * Faults_cpu is used to decide whether memory should move
892 * towards the CPU. As a consequence, these stats are weighted
893 * more by CPU use than by memory faults.
895 unsigned long *faults_cpu;
896 unsigned long faults[0];
899 /* Shared or private faults. */
900 #define NR_NUMA_HINT_FAULT_TYPES 2
902 /* Memory and CPU locality */
903 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
905 /* Averaged statistics, and temporary buffers. */
906 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
908 pid_t task_numa_group_id(struct task_struct *p)
910 return p->numa_group ? p->numa_group->gid : 0;
913 static inline int task_faults_idx(int nid, int priv)
915 return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
918 static inline unsigned long task_faults(struct task_struct *p, int nid)
920 if (!p->numa_faults_memory)
923 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
924 p->numa_faults_memory[task_faults_idx(nid, 1)];
927 static inline unsigned long group_faults(struct task_struct *p, int nid)
932 return p->numa_group->faults[task_faults_idx(nid, 0)] +
933 p->numa_group->faults[task_faults_idx(nid, 1)];
936 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
938 return group->faults_cpu[task_faults_idx(nid, 0)] +
939 group->faults_cpu[task_faults_idx(nid, 1)];
943 * These return the fraction of accesses done by a particular task, or
944 * task group, on a particular numa node. The group weight is given a
945 * larger multiplier, in order to group tasks together that are almost
946 * evenly spread out between numa nodes.
948 static inline unsigned long task_weight(struct task_struct *p, int nid)
950 unsigned long total_faults;
952 if (!p->numa_faults_memory)
955 total_faults = p->total_numa_faults;
960 return 1000 * task_faults(p, nid) / total_faults;
963 static inline unsigned long group_weight(struct task_struct *p, int nid)
965 if (!p->numa_group || !p->numa_group->total_faults)
968 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
971 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
972 int src_nid, int dst_cpu)
974 struct numa_group *ng = p->numa_group;
975 int dst_nid = cpu_to_node(dst_cpu);
976 int last_cpupid, this_cpupid;
978 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
981 * Multi-stage node selection is used in conjunction with a periodic
982 * migration fault to build a temporal task<->page relation. By using
983 * a two-stage filter we remove short/unlikely relations.
985 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
986 * a task's usage of a particular page (n_p) per total usage of this
987 * page (n_t) (in a given time-span) to a probability.
989 * Our periodic faults will sample this probability and getting the
990 * same result twice in a row, given these samples are fully
991 * independent, is then given by P(n)^2, provided our sample period
992 * is sufficiently short compared to the usage pattern.
994 * This quadric squishes small probabilities, making it less likely we
995 * act on an unlikely task<->page relation.
997 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
998 if (!cpupid_pid_unset(last_cpupid) &&
999 cpupid_to_nid(last_cpupid) != dst_nid)
1002 /* Always allow migrate on private faults */
1003 if (cpupid_match_pid(p, last_cpupid))
1006 /* A shared fault, but p->numa_group has not been set up yet. */
1011 * Do not migrate if the destination is not a node that
1012 * is actively used by this numa group.
1014 if (!node_isset(dst_nid, ng->active_nodes))
1018 * Source is a node that is not actively used by this
1019 * numa group, while the destination is. Migrate.
1021 if (!node_isset(src_nid, ng->active_nodes))
1025 * Both source and destination are nodes in active
1026 * use by this numa group. Maximize memory bandwidth
1027 * by migrating from more heavily used groups, to less
1028 * heavily used ones, spreading the load around.
1029 * Use a 1/4 hysteresis to avoid spurious page movement.
1031 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1034 static unsigned long weighted_cpuload(const int cpu);
1035 static unsigned long source_load(int cpu, int type);
1036 static unsigned long target_load(int cpu, int type);
1037 static unsigned long power_of(int cpu);
1038 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1040 /* Cached statistics for all CPUs within a node */
1042 unsigned long nr_running;
1045 /* Total compute capacity of CPUs on a node */
1046 unsigned long power;
1048 /* Approximate capacity in terms of runnable tasks on a node */
1049 unsigned long capacity;
1054 * XXX borrowed from update_sg_lb_stats
1056 static void update_numa_stats(struct numa_stats *ns, int nid)
1060 memset(ns, 0, sizeof(*ns));
1061 for_each_cpu(cpu, cpumask_of_node(nid)) {
1062 struct rq *rq = cpu_rq(cpu);
1064 ns->nr_running += rq->nr_running;
1065 ns->load += weighted_cpuload(cpu);
1066 ns->power += power_of(cpu);
1072 * If we raced with hotplug and there are no CPUs left in our mask
1073 * the @ns structure is NULL'ed and task_numa_compare() will
1074 * not find this node attractive.
1076 * We'll either bail at !has_capacity, or we'll detect a huge imbalance
1082 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1083 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1084 ns->has_capacity = (ns->nr_running < ns->capacity);
1087 struct task_numa_env {
1088 struct task_struct *p;
1090 int src_cpu, src_nid;
1091 int dst_cpu, dst_nid;
1093 struct numa_stats src_stats, dst_stats;
1097 struct task_struct *best_task;
1102 static void task_numa_assign(struct task_numa_env *env,
1103 struct task_struct *p, long imp)
1106 put_task_struct(env->best_task);
1111 env->best_imp = imp;
1112 env->best_cpu = env->dst_cpu;
1116 * This checks if the overall compute and NUMA accesses of the system would
1117 * be improved if the source tasks was migrated to the target dst_cpu taking
1118 * into account that it might be best if task running on the dst_cpu should
1119 * be exchanged with the source task
1121 static void task_numa_compare(struct task_numa_env *env,
1122 long taskimp, long groupimp)
1124 struct rq *src_rq = cpu_rq(env->src_cpu);
1125 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1126 struct task_struct *cur;
1127 long dst_load, src_load;
1129 long imp = (groupimp > 0) ? groupimp : taskimp;
1132 cur = ACCESS_ONCE(dst_rq->curr);
1133 if (cur->pid == 0) /* idle */
1137 * "imp" is the fault differential for the source task between the
1138 * source and destination node. Calculate the total differential for
1139 * the source task and potential destination task. The more negative
1140 * the value is, the more rmeote accesses that would be expected to
1141 * be incurred if the tasks were swapped.
1144 /* Skip this swap candidate if cannot move to the source cpu */
1145 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1149 * If dst and source tasks are in the same NUMA group, or not
1150 * in any group then look only at task weights.
1152 if (cur->numa_group == env->p->numa_group) {
1153 imp = taskimp + task_weight(cur, env->src_nid) -
1154 task_weight(cur, env->dst_nid);
1156 * Add some hysteresis to prevent swapping the
1157 * tasks within a group over tiny differences.
1159 if (cur->numa_group)
1163 * Compare the group weights. If a task is all by
1164 * itself (not part of a group), use the task weight
1167 if (env->p->numa_group)
1172 if (cur->numa_group)
1173 imp += group_weight(cur, env->src_nid) -
1174 group_weight(cur, env->dst_nid);
1176 imp += task_weight(cur, env->src_nid) -
1177 task_weight(cur, env->dst_nid);
1181 if (imp < env->best_imp)
1185 /* Is there capacity at our destination? */
1186 if (env->src_stats.has_capacity &&
1187 !env->dst_stats.has_capacity)
1193 /* Balance doesn't matter much if we're running a task per cpu */
1194 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1198 * In the overloaded case, try and keep the load balanced.
1201 dst_load = env->dst_stats.load;
1202 src_load = env->src_stats.load;
1204 /* XXX missing power terms */
1205 load = task_h_load(env->p);
1210 load = task_h_load(cur);
1215 /* make src_load the smaller */
1216 if (dst_load < src_load)
1217 swap(dst_load, src_load);
1219 if (src_load * env->imbalance_pct < dst_load * 100)
1223 task_numa_assign(env, cur, imp);
1228 static void task_numa_find_cpu(struct task_numa_env *env,
1229 long taskimp, long groupimp)
1233 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1234 /* Skip this CPU if the source task cannot migrate */
1235 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1239 task_numa_compare(env, taskimp, groupimp);
1243 static int task_numa_migrate(struct task_struct *p)
1245 struct task_numa_env env = {
1248 .src_cpu = task_cpu(p),
1249 .src_nid = task_node(p),
1251 .imbalance_pct = 112,
1257 struct sched_domain *sd;
1258 unsigned long taskweight, groupweight;
1260 long taskimp, groupimp;
1263 * Pick the lowest SD_NUMA domain, as that would have the smallest
1264 * imbalance and would be the first to start moving tasks about.
1266 * And we want to avoid any moving of tasks about, as that would create
1267 * random movement of tasks -- counter the numa conditions we're trying
1271 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1273 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1277 * Cpusets can break the scheduler domain tree into smaller
1278 * balance domains, some of which do not cross NUMA boundaries.
1279 * Tasks that are "trapped" in such domains cannot be migrated
1280 * elsewhere, so there is no point in (re)trying.
1282 if (unlikely(!sd)) {
1283 p->numa_preferred_nid = task_node(p);
1287 taskweight = task_weight(p, env.src_nid);
1288 groupweight = group_weight(p, env.src_nid);
1289 update_numa_stats(&env.src_stats, env.src_nid);
1290 env.dst_nid = p->numa_preferred_nid;
1291 taskimp = task_weight(p, env.dst_nid) - taskweight;
1292 groupimp = group_weight(p, env.dst_nid) - groupweight;
1293 update_numa_stats(&env.dst_stats, env.dst_nid);
1295 /* If the preferred nid has capacity, try to use it. */
1296 if (env.dst_stats.has_capacity)
1297 task_numa_find_cpu(&env, taskimp, groupimp);
1299 /* No space available on the preferred nid. Look elsewhere. */
1300 if (env.best_cpu == -1) {
1301 for_each_online_node(nid) {
1302 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1305 /* Only consider nodes where both task and groups benefit */
1306 taskimp = task_weight(p, nid) - taskweight;
1307 groupimp = group_weight(p, nid) - groupweight;
1308 if (taskimp < 0 && groupimp < 0)
1312 update_numa_stats(&env.dst_stats, env.dst_nid);
1313 task_numa_find_cpu(&env, taskimp, groupimp);
1317 /* No better CPU than the current one was found. */
1318 if (env.best_cpu == -1)
1321 sched_setnuma(p, env.dst_nid);
1324 * Reset the scan period if the task is being rescheduled on an
1325 * alternative node to recheck if the tasks is now properly placed.
1327 p->numa_scan_period = task_scan_min(p);
1329 if (env.best_task == NULL) {
1330 int ret = migrate_task_to(p, env.best_cpu);
1334 ret = migrate_swap(p, env.best_task);
1335 put_task_struct(env.best_task);
1339 /* Attempt to migrate a task to a CPU on the preferred node. */
1340 static void numa_migrate_preferred(struct task_struct *p)
1342 /* This task has no NUMA fault statistics yet */
1343 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
1346 /* Periodically retry migrating the task to the preferred node */
1347 p->numa_migrate_retry = jiffies + HZ;
1349 /* Success if task is already running on preferred CPU */
1350 if (task_node(p) == p->numa_preferred_nid)
1353 /* Otherwise, try migrate to a CPU on the preferred node */
1354 task_numa_migrate(p);
1358 * Find the nodes on which the workload is actively running. We do this by
1359 * tracking the nodes from which NUMA hinting faults are triggered. This can
1360 * be different from the set of nodes where the workload's memory is currently
1363 * The bitmask is used to make smarter decisions on when to do NUMA page
1364 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1365 * are added when they cause over 6/16 of the maximum number of faults, but
1366 * only removed when they drop below 3/16.
1368 static void update_numa_active_node_mask(struct numa_group *numa_group)
1370 unsigned long faults, max_faults = 0;
1373 for_each_online_node(nid) {
1374 faults = group_faults_cpu(numa_group, nid);
1375 if (faults > max_faults)
1376 max_faults = faults;
1379 for_each_online_node(nid) {
1380 faults = group_faults_cpu(numa_group, nid);
1381 if (!node_isset(nid, numa_group->active_nodes)) {
1382 if (faults > max_faults * 6 / 16)
1383 node_set(nid, numa_group->active_nodes);
1384 } else if (faults < max_faults * 3 / 16)
1385 node_clear(nid, numa_group->active_nodes);
1390 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1391 * increments. The more local the fault statistics are, the higher the scan
1392 * period will be for the next scan window. If local/remote ratio is below
1393 * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
1394 * scan period will decrease
1396 #define NUMA_PERIOD_SLOTS 10
1397 #define NUMA_PERIOD_THRESHOLD 3
1400 * Increase the scan period (slow down scanning) if the majority of
1401 * our memory is already on our local node, or if the majority of
1402 * the page accesses are shared with other processes.
1403 * Otherwise, decrease the scan period.
1405 static void update_task_scan_period(struct task_struct *p,
1406 unsigned long shared, unsigned long private)
1408 unsigned int period_slot;
1412 unsigned long remote = p->numa_faults_locality[0];
1413 unsigned long local = p->numa_faults_locality[1];
1416 * If there were no record hinting faults then either the task is
1417 * completely idle or all activity is areas that are not of interest
1418 * to automatic numa balancing. Scan slower
1420 if (local + shared == 0) {
1421 p->numa_scan_period = min(p->numa_scan_period_max,
1422 p->numa_scan_period << 1);
1424 p->mm->numa_next_scan = jiffies +
1425 msecs_to_jiffies(p->numa_scan_period);
1431 * Prepare to scale scan period relative to the current period.
1432 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1433 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1434 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1436 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1437 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1438 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1439 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1442 diff = slot * period_slot;
1444 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1447 * Scale scan rate increases based on sharing. There is an
1448 * inverse relationship between the degree of sharing and
1449 * the adjustment made to the scanning period. Broadly
1450 * speaking the intent is that there is little point
1451 * scanning faster if shared accesses dominate as it may
1452 * simply bounce migrations uselessly
1454 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1455 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1458 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1459 task_scan_min(p), task_scan_max(p));
1460 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1464 * Get the fraction of time the task has been running since the last
1465 * NUMA placement cycle. The scheduler keeps similar statistics, but
1466 * decays those on a 32ms period, which is orders of magnitude off
1467 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1468 * stats only if the task is so new there are no NUMA statistics yet.
1470 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1472 u64 runtime, delta, now;
1473 /* Use the start of this time slice to avoid calculations. */
1474 now = p->se.exec_start;
1475 runtime = p->se.sum_exec_runtime;
1477 if (p->last_task_numa_placement) {
1478 delta = runtime - p->last_sum_exec_runtime;
1479 *period = now - p->last_task_numa_placement;
1481 delta = p->se.avg.runnable_avg_sum;
1482 *period = p->se.avg.runnable_avg_period;
1485 p->last_sum_exec_runtime = runtime;
1486 p->last_task_numa_placement = now;
1491 static void task_numa_placement(struct task_struct *p)
1493 int seq, nid, max_nid = -1, max_group_nid = -1;
1494 unsigned long max_faults = 0, max_group_faults = 0;
1495 unsigned long fault_types[2] = { 0, 0 };
1496 unsigned long total_faults;
1497 u64 runtime, period;
1498 spinlock_t *group_lock = NULL;
1500 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1501 if (p->numa_scan_seq == seq)
1503 p->numa_scan_seq = seq;
1504 p->numa_scan_period_max = task_scan_max(p);
1506 total_faults = p->numa_faults_locality[0] +
1507 p->numa_faults_locality[1];
1508 runtime = numa_get_avg_runtime(p, &period);
1510 /* If the task is part of a group prevent parallel updates to group stats */
1511 if (p->numa_group) {
1512 group_lock = &p->numa_group->lock;
1513 spin_lock(group_lock);
1516 /* Find the node with the highest number of faults */
1517 for_each_online_node(nid) {
1518 unsigned long faults = 0, group_faults = 0;
1521 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1522 long diff, f_diff, f_weight;
1524 i = task_faults_idx(nid, priv);
1526 /* Decay existing window, copy faults since last scan */
1527 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
1528 fault_types[priv] += p->numa_faults_buffer_memory[i];
1529 p->numa_faults_buffer_memory[i] = 0;
1532 * Normalize the faults_from, so all tasks in a group
1533 * count according to CPU use, instead of by the raw
1534 * number of faults. Tasks with little runtime have
1535 * little over-all impact on throughput, and thus their
1536 * faults are less important.
1538 f_weight = div64_u64(runtime << 16, period + 1);
1539 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1541 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
1542 p->numa_faults_buffer_cpu[i] = 0;
1544 p->numa_faults_memory[i] += diff;
1545 p->numa_faults_cpu[i] += f_diff;
1546 faults += p->numa_faults_memory[i];
1547 p->total_numa_faults += diff;
1548 if (p->numa_group) {
1549 /* safe because we can only change our own group */
1550 p->numa_group->faults[i] += diff;
1551 p->numa_group->faults_cpu[i] += f_diff;
1552 p->numa_group->total_faults += diff;
1553 group_faults += p->numa_group->faults[i];
1557 if (faults > max_faults) {
1558 max_faults = faults;
1562 if (group_faults > max_group_faults) {
1563 max_group_faults = group_faults;
1564 max_group_nid = nid;
1568 update_task_scan_period(p, fault_types[0], fault_types[1]);
1570 if (p->numa_group) {
1571 update_numa_active_node_mask(p->numa_group);
1573 * If the preferred task and group nids are different,
1574 * iterate over the nodes again to find the best place.
1576 if (max_nid != max_group_nid) {
1577 unsigned long weight, max_weight = 0;
1579 for_each_online_node(nid) {
1580 weight = task_weight(p, nid) + group_weight(p, nid);
1581 if (weight > max_weight) {
1582 max_weight = weight;
1588 spin_unlock(group_lock);
1591 /* Preferred node as the node with the most faults */
1592 if (max_faults && max_nid != p->numa_preferred_nid) {
1593 /* Update the preferred nid and migrate task if possible */
1594 sched_setnuma(p, max_nid);
1595 numa_migrate_preferred(p);
1599 static inline int get_numa_group(struct numa_group *grp)
1601 return atomic_inc_not_zero(&grp->refcount);
1604 static inline void put_numa_group(struct numa_group *grp)
1606 if (atomic_dec_and_test(&grp->refcount))
1607 kfree_rcu(grp, rcu);
1610 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1613 struct numa_group *grp, *my_grp;
1614 struct task_struct *tsk;
1616 int cpu = cpupid_to_cpu(cpupid);
1619 if (unlikely(!p->numa_group)) {
1620 unsigned int size = sizeof(struct numa_group) +
1621 4*nr_node_ids*sizeof(unsigned long);
1623 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1627 atomic_set(&grp->refcount, 1);
1628 spin_lock_init(&grp->lock);
1629 INIT_LIST_HEAD(&grp->task_list);
1631 /* Second half of the array tracks nids where faults happen */
1632 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1635 node_set(task_node(current), grp->active_nodes);
1637 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1638 grp->faults[i] = p->numa_faults_memory[i];
1640 grp->total_faults = p->total_numa_faults;
1642 list_add(&p->numa_entry, &grp->task_list);
1644 rcu_assign_pointer(p->numa_group, grp);
1648 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1650 if (!cpupid_match_pid(tsk, cpupid))
1653 grp = rcu_dereference(tsk->numa_group);
1657 my_grp = p->numa_group;
1662 * Only join the other group if its bigger; if we're the bigger group,
1663 * the other task will join us.
1665 if (my_grp->nr_tasks > grp->nr_tasks)
1669 * Tie-break on the grp address.
1671 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1674 /* Always join threads in the same process. */
1675 if (tsk->mm == current->mm)
1678 /* Simple filter to avoid false positives due to PID collisions */
1679 if (flags & TNF_SHARED)
1682 /* Update priv based on whether false sharing was detected */
1685 if (join && !get_numa_group(grp))
1693 double_lock(&my_grp->lock, &grp->lock);
1695 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1696 my_grp->faults[i] -= p->numa_faults_memory[i];
1697 grp->faults[i] += p->numa_faults_memory[i];
1699 my_grp->total_faults -= p->total_numa_faults;
1700 grp->total_faults += p->total_numa_faults;
1702 list_move(&p->numa_entry, &grp->task_list);
1706 spin_unlock(&my_grp->lock);
1707 spin_unlock(&grp->lock);
1709 rcu_assign_pointer(p->numa_group, grp);
1711 put_numa_group(my_grp);
1719 void task_numa_free(struct task_struct *p)
1721 struct numa_group *grp = p->numa_group;
1723 void *numa_faults = p->numa_faults_memory;
1726 spin_lock(&grp->lock);
1727 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1728 grp->faults[i] -= p->numa_faults_memory[i];
1729 grp->total_faults -= p->total_numa_faults;
1731 list_del(&p->numa_entry);
1733 spin_unlock(&grp->lock);
1734 rcu_assign_pointer(p->numa_group, NULL);
1735 put_numa_group(grp);
1738 p->numa_faults_memory = NULL;
1739 p->numa_faults_buffer_memory = NULL;
1740 p->numa_faults_cpu= NULL;
1741 p->numa_faults_buffer_cpu = NULL;
1746 * Got a PROT_NONE fault for a page on @node.
1748 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
1750 struct task_struct *p = current;
1751 bool migrated = flags & TNF_MIGRATED;
1752 int cpu_node = task_node(current);
1755 if (!numabalancing_enabled)
1758 /* for example, ksmd faulting in a user's mm */
1762 /* Do not worry about placement if exiting */
1763 if (p->state == TASK_DEAD)
1766 /* Allocate buffer to track faults on a per-node basis */
1767 if (unlikely(!p->numa_faults_memory)) {
1768 int size = sizeof(*p->numa_faults_memory) *
1769 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
1771 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
1772 if (!p->numa_faults_memory)
1775 BUG_ON(p->numa_faults_buffer_memory);
1777 * The averaged statistics, shared & private, memory & cpu,
1778 * occupy the first half of the array. The second half of the
1779 * array is for current counters, which are averaged into the
1780 * first set by task_numa_placement.
1782 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
1783 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
1784 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
1785 p->total_numa_faults = 0;
1786 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1790 * First accesses are treated as private, otherwise consider accesses
1791 * to be private if the accessing pid has not changed
1793 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1796 priv = cpupid_match_pid(p, last_cpupid);
1797 if (!priv && !(flags & TNF_NO_GROUP))
1798 task_numa_group(p, last_cpupid, flags, &priv);
1801 task_numa_placement(p);
1804 * Retry task to preferred node migration periodically, in case it
1805 * case it previously failed, or the scheduler moved us.
1807 if (time_after(jiffies, p->numa_migrate_retry))
1808 numa_migrate_preferred(p);
1811 p->numa_pages_migrated += pages;
1813 p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
1814 p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
1815 p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
1818 static void reset_ptenuma_scan(struct task_struct *p)
1820 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1821 p->mm->numa_scan_offset = 0;
1825 * The expensive part of numa migration is done from task_work context.
1826 * Triggered from task_tick_numa().
1828 void task_numa_work(struct callback_head *work)
1830 unsigned long migrate, next_scan, now = jiffies;
1831 struct task_struct *p = current;
1832 struct mm_struct *mm = p->mm;
1833 struct vm_area_struct *vma;
1834 unsigned long start, end;
1835 unsigned long nr_pte_updates = 0;
1838 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1840 work->next = work; /* protect against double add */
1842 * Who cares about NUMA placement when they're dying.
1844 * NOTE: make sure not to dereference p->mm before this check,
1845 * exit_task_work() happens _after_ exit_mm() so we could be called
1846 * without p->mm even though we still had it when we enqueued this
1849 if (p->flags & PF_EXITING)
1852 if (!mm->numa_next_scan) {
1853 mm->numa_next_scan = now +
1854 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1858 * Enforce maximal scan/migration frequency..
1860 migrate = mm->numa_next_scan;
1861 if (time_before(now, migrate))
1864 if (p->numa_scan_period == 0) {
1865 p->numa_scan_period_max = task_scan_max(p);
1866 p->numa_scan_period = task_scan_min(p);
1869 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1870 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1874 * Delay this task enough that another task of this mm will likely win
1875 * the next time around.
1877 p->node_stamp += 2 * TICK_NSEC;
1879 start = mm->numa_scan_offset;
1880 pages = sysctl_numa_balancing_scan_size;
1881 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1885 down_read(&mm->mmap_sem);
1886 vma = find_vma(mm, start);
1888 reset_ptenuma_scan(p);
1892 for (; vma; vma = vma->vm_next) {
1893 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
1897 * Shared library pages mapped by multiple processes are not
1898 * migrated as it is expected they are cache replicated. Avoid
1899 * hinting faults in read-only file-backed mappings or the vdso
1900 * as migrating the pages will be of marginal benefit.
1903 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1907 * Skip inaccessible VMAs to avoid any confusion between
1908 * PROT_NONE and NUMA hinting ptes
1910 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1914 start = max(start, vma->vm_start);
1915 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1916 end = min(end, vma->vm_end);
1917 nr_pte_updates += change_prot_numa(vma, start, end);
1920 * Scan sysctl_numa_balancing_scan_size but ensure that
1921 * at least one PTE is updated so that unused virtual
1922 * address space is quickly skipped.
1925 pages -= (end - start) >> PAGE_SHIFT;
1930 } while (end != vma->vm_end);
1935 * It is possible to reach the end of the VMA list but the last few
1936 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1937 * would find the !migratable VMA on the next scan but not reset the
1938 * scanner to the start so check it now.
1941 mm->numa_scan_offset = start;
1943 reset_ptenuma_scan(p);
1944 up_read(&mm->mmap_sem);
1948 * Drive the periodic memory faults..
1950 void task_tick_numa(struct rq *rq, struct task_struct *curr)
1952 struct callback_head *work = &curr->numa_work;
1956 * We don't care about NUMA placement if we don't have memory.
1958 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1962 * Using runtime rather than walltime has the dual advantage that
1963 * we (mostly) drive the selection from busy threads and that the
1964 * task needs to have done some actual work before we bother with
1967 now = curr->se.sum_exec_runtime;
1968 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1970 if (now - curr->node_stamp > period) {
1971 if (!curr->node_stamp)
1972 curr->numa_scan_period = task_scan_min(curr);
1973 curr->node_stamp += period;
1975 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1976 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1977 task_work_add(curr, work, true);
1982 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1986 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1990 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1993 #endif /* CONFIG_NUMA_BALANCING */
1996 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1998 update_load_add(&cfs_rq->load, se->load.weight);
1999 if (!parent_entity(se))
2000 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2002 if (entity_is_task(se)) {
2003 struct rq *rq = rq_of(cfs_rq);
2005 account_numa_enqueue(rq, task_of(se));
2006 list_add(&se->group_node, &rq->cfs_tasks);
2009 cfs_rq->nr_running++;
2013 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2015 update_load_sub(&cfs_rq->load, se->load.weight);
2016 if (!parent_entity(se))
2017 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2018 if (entity_is_task(se)) {
2019 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2020 list_del_init(&se->group_node);
2022 cfs_rq->nr_running--;
2025 #ifdef CONFIG_FAIR_GROUP_SCHED
2027 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2032 * Use this CPU's actual weight instead of the last load_contribution
2033 * to gain a more accurate current total weight. See
2034 * update_cfs_rq_load_contribution().
2036 tg_weight = atomic_long_read(&tg->load_avg);
2037 tg_weight -= cfs_rq->tg_load_contrib;
2038 tg_weight += cfs_rq->load.weight;
2043 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2045 long tg_weight, load, shares;
2047 tg_weight = calc_tg_weight(tg, cfs_rq);
2048 load = cfs_rq->load.weight;
2050 shares = (tg->shares * load);
2052 shares /= tg_weight;
2054 if (shares < MIN_SHARES)
2055 shares = MIN_SHARES;
2056 if (shares > tg->shares)
2057 shares = tg->shares;
2061 # else /* CONFIG_SMP */
2062 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2066 # endif /* CONFIG_SMP */
2067 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2068 unsigned long weight)
2071 /* commit outstanding execution time */
2072 if (cfs_rq->curr == se)
2073 update_curr(cfs_rq);
2074 account_entity_dequeue(cfs_rq, se);
2077 update_load_set(&se->load, weight);
2080 account_entity_enqueue(cfs_rq, se);
2083 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2085 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2087 struct task_group *tg;
2088 struct sched_entity *se;
2092 se = tg->se[cpu_of(rq_of(cfs_rq))];
2093 if (!se || throttled_hierarchy(cfs_rq))
2096 if (likely(se->load.weight == tg->shares))
2099 shares = calc_cfs_shares(cfs_rq, tg);
2101 reweight_entity(cfs_rq_of(se), se, shares);
2103 #else /* CONFIG_FAIR_GROUP_SCHED */
2104 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2107 #endif /* CONFIG_FAIR_GROUP_SCHED */
2111 * We choose a half-life close to 1 scheduling period.
2112 * Note: The tables below are dependent on this value.
2114 #define LOAD_AVG_PERIOD 32
2115 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2116 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2118 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2119 static const u32 runnable_avg_yN_inv[] = {
2120 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2121 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2122 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2123 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2124 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2125 0x85aac367, 0x82cd8698,
2129 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2130 * over-estimates when re-combining.
2132 static const u32 runnable_avg_yN_sum[] = {
2133 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2134 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2135 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2140 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2142 static __always_inline u64 decay_load(u64 val, u64 n)
2144 unsigned int local_n;
2148 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2151 /* after bounds checking we can collapse to 32-bit */
2155 * As y^PERIOD = 1/2, we can combine
2156 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
2157 * With a look-up table which covers k^n (n<PERIOD)
2159 * To achieve constant time decay_load.
2161 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2162 val >>= local_n / LOAD_AVG_PERIOD;
2163 local_n %= LOAD_AVG_PERIOD;
2166 val *= runnable_avg_yN_inv[local_n];
2167 /* We don't use SRR here since we always want to round down. */
2172 * For updates fully spanning n periods, the contribution to runnable
2173 * average will be: \Sum 1024*y^n
2175 * We can compute this reasonably efficiently by combining:
2176 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2178 static u32 __compute_runnable_contrib(u64 n)
2182 if (likely(n <= LOAD_AVG_PERIOD))
2183 return runnable_avg_yN_sum[n];
2184 else if (unlikely(n >= LOAD_AVG_MAX_N))
2185 return LOAD_AVG_MAX;
2187 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2189 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2190 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2192 n -= LOAD_AVG_PERIOD;
2193 } while (n > LOAD_AVG_PERIOD);
2195 contrib = decay_load(contrib, n);
2196 return contrib + runnable_avg_yN_sum[n];
2200 * We can represent the historical contribution to runnable average as the
2201 * coefficients of a geometric series. To do this we sub-divide our runnable
2202 * history into segments of approximately 1ms (1024us); label the segment that
2203 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2205 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2207 * (now) (~1ms ago) (~2ms ago)
2209 * Let u_i denote the fraction of p_i that the entity was runnable.
2211 * We then designate the fractions u_i as our co-efficients, yielding the
2212 * following representation of historical load:
2213 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2215 * We choose y based on the with of a reasonably scheduling period, fixing:
2218 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2219 * approximately half as much as the contribution to load within the last ms
2222 * When a period "rolls over" and we have new u_0`, multiplying the previous
2223 * sum again by y is sufficient to update:
2224 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2225 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2227 static __always_inline int __update_entity_runnable_avg(u64 now,
2228 struct sched_avg *sa,
2232 u32 runnable_contrib;
2233 int delta_w, decayed = 0;
2235 delta = now - sa->last_runnable_update;
2237 * This should only happen when time goes backwards, which it
2238 * unfortunately does during sched clock init when we swap over to TSC.
2240 if ((s64)delta < 0) {
2241 sa->last_runnable_update = now;
2246 * Use 1024ns as the unit of measurement since it's a reasonable
2247 * approximation of 1us and fast to compute.
2252 sa->last_runnable_update = now;
2254 /* delta_w is the amount already accumulated against our next period */
2255 delta_w = sa->runnable_avg_period % 1024;
2256 if (delta + delta_w >= 1024) {
2257 /* period roll-over */
2261 * Now that we know we're crossing a period boundary, figure
2262 * out how much from delta we need to complete the current
2263 * period and accrue it.
2265 delta_w = 1024 - delta_w;
2267 sa->runnable_avg_sum += delta_w;
2268 sa->runnable_avg_period += delta_w;
2272 /* Figure out how many additional periods this update spans */
2273 periods = delta / 1024;
2276 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2278 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2281 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2282 runnable_contrib = __compute_runnable_contrib(periods);
2284 sa->runnable_avg_sum += runnable_contrib;
2285 sa->runnable_avg_period += runnable_contrib;
2288 /* Remainder of delta accrued against u_0` */
2290 sa->runnable_avg_sum += delta;
2291 sa->runnable_avg_period += delta;
2296 /* Synchronize an entity's decay with its parenting cfs_rq.*/
2297 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2299 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2300 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2302 decays -= se->avg.decay_count;
2306 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2307 se->avg.decay_count = 0;
2312 #ifdef CONFIG_FAIR_GROUP_SCHED
2313 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2316 struct task_group *tg = cfs_rq->tg;
2319 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2320 tg_contrib -= cfs_rq->tg_load_contrib;
2322 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2323 atomic_long_add(tg_contrib, &tg->load_avg);
2324 cfs_rq->tg_load_contrib += tg_contrib;
2329 * Aggregate cfs_rq runnable averages into an equivalent task_group
2330 * representation for computing load contributions.
2332 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2333 struct cfs_rq *cfs_rq)
2335 struct task_group *tg = cfs_rq->tg;
2338 /* The fraction of a cpu used by this cfs_rq */
2339 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2340 sa->runnable_avg_period + 1);
2341 contrib -= cfs_rq->tg_runnable_contrib;
2343 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2344 atomic_add(contrib, &tg->runnable_avg);
2345 cfs_rq->tg_runnable_contrib += contrib;
2349 static inline void __update_group_entity_contrib(struct sched_entity *se)
2351 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2352 struct task_group *tg = cfs_rq->tg;
2357 contrib = cfs_rq->tg_load_contrib * tg->shares;
2358 se->avg.load_avg_contrib = div_u64(contrib,
2359 atomic_long_read(&tg->load_avg) + 1);
2362 * For group entities we need to compute a correction term in the case
2363 * that they are consuming <1 cpu so that we would contribute the same
2364 * load as a task of equal weight.
2366 * Explicitly co-ordinating this measurement would be expensive, but
2367 * fortunately the sum of each cpus contribution forms a usable
2368 * lower-bound on the true value.
2370 * Consider the aggregate of 2 contributions. Either they are disjoint
2371 * (and the sum represents true value) or they are disjoint and we are
2372 * understating by the aggregate of their overlap.
2374 * Extending this to N cpus, for a given overlap, the maximum amount we
2375 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2376 * cpus that overlap for this interval and w_i is the interval width.
2378 * On a small machine; the first term is well-bounded which bounds the
2379 * total error since w_i is a subset of the period. Whereas on a
2380 * larger machine, while this first term can be larger, if w_i is the
2381 * of consequential size guaranteed to see n_i*w_i quickly converge to
2382 * our upper bound of 1-cpu.
2384 runnable_avg = atomic_read(&tg->runnable_avg);
2385 if (runnable_avg < NICE_0_LOAD) {
2386 se->avg.load_avg_contrib *= runnable_avg;
2387 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2391 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2392 int force_update) {}
2393 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2394 struct cfs_rq *cfs_rq) {}
2395 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2398 static inline void __update_task_entity_contrib(struct sched_entity *se)
2402 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2403 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2404 contrib /= (se->avg.runnable_avg_period + 1);
2405 se->avg.load_avg_contrib = scale_load(contrib);
2408 /* Compute the current contribution to load_avg by se, return any delta */
2409 static long __update_entity_load_avg_contrib(struct sched_entity *se)
2411 long old_contrib = se->avg.load_avg_contrib;
2413 if (entity_is_task(se)) {
2414 __update_task_entity_contrib(se);
2416 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2417 __update_group_entity_contrib(se);
2420 return se->avg.load_avg_contrib - old_contrib;
2423 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2426 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2427 cfs_rq->blocked_load_avg -= load_contrib;
2429 cfs_rq->blocked_load_avg = 0;
2432 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2434 /* Update a sched_entity's runnable average */
2435 static inline void update_entity_load_avg(struct sched_entity *se,
2438 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2443 * For a group entity we need to use their owned cfs_rq_clock_task() in
2444 * case they are the parent of a throttled hierarchy.
2446 if (entity_is_task(se))
2447 now = cfs_rq_clock_task(cfs_rq);
2449 now = cfs_rq_clock_task(group_cfs_rq(se));
2451 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2454 contrib_delta = __update_entity_load_avg_contrib(se);
2460 cfs_rq->runnable_load_avg += contrib_delta;
2462 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2466 * Decay the load contributed by all blocked children and account this so that
2467 * their contribution may appropriately discounted when they wake up.
2469 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2471 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2474 decays = now - cfs_rq->last_decay;
2475 if (!decays && !force_update)
2478 if (atomic_long_read(&cfs_rq->removed_load)) {
2479 unsigned long removed_load;
2480 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2481 subtract_blocked_load_contrib(cfs_rq, removed_load);
2485 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2487 atomic64_add(decays, &cfs_rq->decay_counter);
2488 cfs_rq->last_decay = now;
2491 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2494 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2496 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2497 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2500 /* Add the load generated by se into cfs_rq's child load-average */
2501 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2502 struct sched_entity *se,
2506 * We track migrations using entity decay_count <= 0, on a wake-up
2507 * migration we use a negative decay count to track the remote decays
2508 * accumulated while sleeping.
2510 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2511 * are seen by enqueue_entity_load_avg() as a migration with an already
2512 * constructed load_avg_contrib.
2514 if (unlikely(se->avg.decay_count <= 0)) {
2515 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2516 if (se->avg.decay_count) {
2518 * In a wake-up migration we have to approximate the
2519 * time sleeping. This is because we can't synchronize
2520 * clock_task between the two cpus, and it is not
2521 * guaranteed to be read-safe. Instead, we can
2522 * approximate this using our carried decays, which are
2523 * explicitly atomically readable.
2525 se->avg.last_runnable_update -= (-se->avg.decay_count)
2527 update_entity_load_avg(se, 0);
2528 /* Indicate that we're now synchronized and on-rq */
2529 se->avg.decay_count = 0;
2533 __synchronize_entity_decay(se);
2536 /* migrated tasks did not contribute to our blocked load */
2538 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2539 update_entity_load_avg(se, 0);
2542 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2543 /* we force update consideration on load-balancer moves */
2544 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2548 * Remove se's load from this cfs_rq child load-average, if the entity is
2549 * transitioning to a blocked state we track its projected decay using
2552 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2553 struct sched_entity *se,
2556 update_entity_load_avg(se, 1);
2557 /* we force update consideration on load-balancer moves */
2558 update_cfs_rq_blocked_load(cfs_rq, !sleep);
2560 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2562 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2563 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2564 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2568 * Update the rq's load with the elapsed running time before entering
2569 * idle. if the last scheduled task is not a CFS task, idle_enter will
2570 * be the only way to update the runnable statistic.
2572 void idle_enter_fair(struct rq *this_rq)
2574 update_rq_runnable_avg(this_rq, 1);
2578 * Update the rq's load with the elapsed idle time before a task is
2579 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2580 * be the only way to update the runnable statistic.
2582 void idle_exit_fair(struct rq *this_rq)
2584 update_rq_runnable_avg(this_rq, 0);
2588 static inline void update_entity_load_avg(struct sched_entity *se,
2589 int update_cfs_rq) {}
2590 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2591 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2592 struct sched_entity *se,
2594 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2595 struct sched_entity *se,
2597 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2598 int force_update) {}
2601 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2603 #ifdef CONFIG_SCHEDSTATS
2604 struct task_struct *tsk = NULL;
2606 if (entity_is_task(se))
2609 if (se->statistics.sleep_start) {
2610 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2615 if (unlikely(delta > se->statistics.sleep_max))
2616 se->statistics.sleep_max = delta;
2618 se->statistics.sleep_start = 0;
2619 se->statistics.sum_sleep_runtime += delta;
2622 account_scheduler_latency(tsk, delta >> 10, 1);
2623 trace_sched_stat_sleep(tsk, delta);
2626 if (se->statistics.block_start) {
2627 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2632 if (unlikely(delta > se->statistics.block_max))
2633 se->statistics.block_max = delta;
2635 se->statistics.block_start = 0;
2636 se->statistics.sum_sleep_runtime += delta;
2639 if (tsk->in_iowait) {
2640 se->statistics.iowait_sum += delta;
2641 se->statistics.iowait_count++;
2642 trace_sched_stat_iowait(tsk, delta);
2645 trace_sched_stat_blocked(tsk, delta);
2648 * Blocking time is in units of nanosecs, so shift by
2649 * 20 to get a milliseconds-range estimation of the
2650 * amount of time that the task spent sleeping:
2652 if (unlikely(prof_on == SLEEP_PROFILING)) {
2653 profile_hits(SLEEP_PROFILING,
2654 (void *)get_wchan(tsk),
2657 account_scheduler_latency(tsk, delta >> 10, 0);
2663 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2665 #ifdef CONFIG_SCHED_DEBUG
2666 s64 d = se->vruntime - cfs_rq->min_vruntime;
2671 if (d > 3*sysctl_sched_latency)
2672 schedstat_inc(cfs_rq, nr_spread_over);
2677 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2679 u64 vruntime = cfs_rq->min_vruntime;
2682 * The 'current' period is already promised to the current tasks,
2683 * however the extra weight of the new task will slow them down a
2684 * little, place the new task so that it fits in the slot that
2685 * stays open at the end.
2687 if (initial && sched_feat(START_DEBIT))
2688 vruntime += sched_vslice(cfs_rq, se);
2690 /* sleeps up to a single latency don't count. */
2692 unsigned long thresh = sysctl_sched_latency;
2695 * Halve their sleep time's effect, to allow
2696 * for a gentler effect of sleepers:
2698 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2704 /* ensure we never gain time by being placed backwards. */
2705 se->vruntime = max_vruntime(se->vruntime, vruntime);
2708 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2711 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2714 * Update the normalized vruntime before updating min_vruntime
2715 * through calling update_curr().
2717 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
2718 se->vruntime += cfs_rq->min_vruntime;
2721 * Update run-time statistics of the 'current'.
2723 update_curr(cfs_rq);
2724 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
2725 account_entity_enqueue(cfs_rq, se);
2726 update_cfs_shares(cfs_rq);
2728 if (flags & ENQUEUE_WAKEUP) {
2729 place_entity(cfs_rq, se, 0);
2730 enqueue_sleeper(cfs_rq, se);
2733 update_stats_enqueue(cfs_rq, se);
2734 check_spread(cfs_rq, se);
2735 if (se != cfs_rq->curr)
2736 __enqueue_entity(cfs_rq, se);
2739 if (cfs_rq->nr_running == 1) {
2740 list_add_leaf_cfs_rq(cfs_rq);
2741 check_enqueue_throttle(cfs_rq);
2745 static void __clear_buddies_last(struct sched_entity *se)
2747 for_each_sched_entity(se) {
2748 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2749 if (cfs_rq->last == se)
2750 cfs_rq->last = NULL;
2756 static void __clear_buddies_next(struct sched_entity *se)
2758 for_each_sched_entity(se) {
2759 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2760 if (cfs_rq->next == se)
2761 cfs_rq->next = NULL;
2767 static void __clear_buddies_skip(struct sched_entity *se)
2769 for_each_sched_entity(se) {
2770 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2771 if (cfs_rq->skip == se)
2772 cfs_rq->skip = NULL;
2778 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2780 if (cfs_rq->last == se)
2781 __clear_buddies_last(se);
2783 if (cfs_rq->next == se)
2784 __clear_buddies_next(se);
2786 if (cfs_rq->skip == se)
2787 __clear_buddies_skip(se);
2790 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2793 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2796 * Update run-time statistics of the 'current'.
2798 update_curr(cfs_rq);
2799 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
2801 update_stats_dequeue(cfs_rq, se);
2802 if (flags & DEQUEUE_SLEEP) {
2803 #ifdef CONFIG_SCHEDSTATS
2804 if (entity_is_task(se)) {
2805 struct task_struct *tsk = task_of(se);
2807 if (tsk->state & TASK_INTERRUPTIBLE)
2808 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
2809 if (tsk->state & TASK_UNINTERRUPTIBLE)
2810 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
2815 clear_buddies(cfs_rq, se);
2817 if (se != cfs_rq->curr)
2818 __dequeue_entity(cfs_rq, se);
2820 account_entity_dequeue(cfs_rq, se);
2823 * Normalize the entity after updating the min_vruntime because the
2824 * update can refer to the ->curr item and we need to reflect this
2825 * movement in our normalized position.
2827 if (!(flags & DEQUEUE_SLEEP))
2828 se->vruntime -= cfs_rq->min_vruntime;
2830 /* return excess runtime on last dequeue */
2831 return_cfs_rq_runtime(cfs_rq);
2833 update_min_vruntime(cfs_rq);
2834 update_cfs_shares(cfs_rq);
2838 * Preempt the current task with a newly woken task if needed:
2841 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2843 unsigned long ideal_runtime, delta_exec;
2844 struct sched_entity *se;
2847 ideal_runtime = sched_slice(cfs_rq, curr);
2848 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
2849 if (delta_exec > ideal_runtime) {
2850 resched_task(rq_of(cfs_rq)->curr);
2852 * The current task ran long enough, ensure it doesn't get
2853 * re-elected due to buddy favours.
2855 clear_buddies(cfs_rq, curr);
2860 * Ensure that a task that missed wakeup preemption by a
2861 * narrow margin doesn't have to wait for a full slice.
2862 * This also mitigates buddy induced latencies under load.
2864 if (delta_exec < sysctl_sched_min_granularity)
2867 se = __pick_first_entity(cfs_rq);
2868 delta = curr->vruntime - se->vruntime;
2873 if (delta > ideal_runtime)
2874 resched_task(rq_of(cfs_rq)->curr);
2878 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2880 /* 'current' is not kept within the tree. */
2883 * Any task has to be enqueued before it get to execute on
2884 * a CPU. So account for the time it spent waiting on the
2887 update_stats_wait_end(cfs_rq, se);
2888 __dequeue_entity(cfs_rq, se);
2891 update_stats_curr_start(cfs_rq, se);
2893 #ifdef CONFIG_SCHEDSTATS
2895 * Track our maximum slice length, if the CPU's load is at
2896 * least twice that of our own weight (i.e. dont track it
2897 * when there are only lesser-weight tasks around):
2899 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
2900 se->statistics.slice_max = max(se->statistics.slice_max,
2901 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2904 se->prev_sum_exec_runtime = se->sum_exec_runtime;
2908 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2911 * Pick the next process, keeping these things in mind, in this order:
2912 * 1) keep things fair between processes/task groups
2913 * 2) pick the "next" process, since someone really wants that to run
2914 * 3) pick the "last" process, for cache locality
2915 * 4) do not run the "skip" process, if something else is available
2917 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
2919 struct sched_entity *se = __pick_first_entity(cfs_rq);
2920 struct sched_entity *left = se;
2923 * Avoid running the skip buddy, if running something else can
2924 * be done without getting too unfair.
2926 if (cfs_rq->skip == se) {
2927 struct sched_entity *second = __pick_next_entity(se);
2928 if (second && wakeup_preempt_entity(second, left) < 1)
2933 * Prefer last buddy, try to return the CPU to a preempted task.
2935 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2939 * Someone really wants this to run. If it's not unfair, run it.
2941 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2944 clear_buddies(cfs_rq, se);
2949 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2951 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2954 * If still on the runqueue then deactivate_task()
2955 * was not called and update_curr() has to be done:
2958 update_curr(cfs_rq);
2960 /* throttle cfs_rqs exceeding runtime */
2961 check_cfs_rq_runtime(cfs_rq);
2963 check_spread(cfs_rq, prev);
2965 update_stats_wait_start(cfs_rq, prev);
2966 /* Put 'current' back into the tree. */
2967 __enqueue_entity(cfs_rq, prev);
2968 /* in !on_rq case, update occurred at dequeue */
2969 update_entity_load_avg(prev, 1);
2971 cfs_rq->curr = NULL;
2975 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2978 * Update run-time statistics of the 'current'.
2980 update_curr(cfs_rq);
2983 * Ensure that runnable average is periodically updated.
2985 update_entity_load_avg(curr, 1);
2986 update_cfs_rq_blocked_load(cfs_rq, 1);
2987 update_cfs_shares(cfs_rq);
2989 #ifdef CONFIG_SCHED_HRTICK
2991 * queued ticks are scheduled to match the slice, so don't bother
2992 * validating it and just reschedule.
2995 resched_task(rq_of(cfs_rq)->curr);
2999 * don't let the period tick interfere with the hrtick preemption
3001 if (!sched_feat(DOUBLE_TICK) &&
3002 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3006 if (cfs_rq->nr_running > 1)
3007 check_preempt_tick(cfs_rq, curr);
3011 /**************************************************
3012 * CFS bandwidth control machinery
3015 #ifdef CONFIG_CFS_BANDWIDTH
3017 #ifdef HAVE_JUMP_LABEL
3018 static struct static_key __cfs_bandwidth_used;
3020 static inline bool cfs_bandwidth_used(void)
3022 return static_key_false(&__cfs_bandwidth_used);
3025 void cfs_bandwidth_usage_inc(void)
3027 static_key_slow_inc(&__cfs_bandwidth_used);
3030 void cfs_bandwidth_usage_dec(void)
3032 static_key_slow_dec(&__cfs_bandwidth_used);
3034 #else /* HAVE_JUMP_LABEL */
3035 static bool cfs_bandwidth_used(void)
3040 void cfs_bandwidth_usage_inc(void) {}
3041 void cfs_bandwidth_usage_dec(void) {}
3042 #endif /* HAVE_JUMP_LABEL */
3045 * default period for cfs group bandwidth.
3046 * default: 0.1s, units: nanoseconds
3048 static inline u64 default_cfs_period(void)
3050 return 100000000ULL;
3053 static inline u64 sched_cfs_bandwidth_slice(void)
3055 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3059 * Replenish runtime according to assigned quota and update expiration time.
3060 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3061 * additional synchronization around rq->lock.
3063 * requires cfs_b->lock
3065 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3069 if (cfs_b->quota == RUNTIME_INF)
3072 now = sched_clock_cpu(smp_processor_id());
3073 cfs_b->runtime = cfs_b->quota;
3074 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3077 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3079 return &tg->cfs_bandwidth;
3082 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3083 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3085 if (unlikely(cfs_rq->throttle_count))
3086 return cfs_rq->throttled_clock_task;
3088 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3091 /* returns 0 on failure to allocate runtime */
3092 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3094 struct task_group *tg = cfs_rq->tg;
3095 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3096 u64 amount = 0, min_amount, expires;
3098 /* note: this is a positive sum as runtime_remaining <= 0 */
3099 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3101 raw_spin_lock(&cfs_b->lock);
3102 if (cfs_b->quota == RUNTIME_INF)
3103 amount = min_amount;
3106 * If the bandwidth pool has become inactive, then at least one
3107 * period must have elapsed since the last consumption.
3108 * Refresh the global state and ensure bandwidth timer becomes
3111 if (!cfs_b->timer_active) {
3112 __refill_cfs_bandwidth_runtime(cfs_b);
3113 __start_cfs_bandwidth(cfs_b);
3116 if (cfs_b->runtime > 0) {
3117 amount = min(cfs_b->runtime, min_amount);
3118 cfs_b->runtime -= amount;
3122 expires = cfs_b->runtime_expires;
3123 raw_spin_unlock(&cfs_b->lock);
3125 cfs_rq->runtime_remaining += amount;
3127 * we may have advanced our local expiration to account for allowed
3128 * spread between our sched_clock and the one on which runtime was
3131 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3132 cfs_rq->runtime_expires = expires;
3134 return cfs_rq->runtime_remaining > 0;
3138 * Note: This depends on the synchronization provided by sched_clock and the
3139 * fact that rq->clock snapshots this value.
3141 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3143 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3145 /* if the deadline is ahead of our clock, nothing to do */
3146 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3149 if (cfs_rq->runtime_remaining < 0)
3153 * If the local deadline has passed we have to consider the
3154 * possibility that our sched_clock is 'fast' and the global deadline
3155 * has not truly expired.
3157 * Fortunately we can check determine whether this the case by checking
3158 * whether the global deadline has advanced.
3161 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
3162 /* extend local deadline, drift is bounded above by 2 ticks */
3163 cfs_rq->runtime_expires += TICK_NSEC;
3165 /* global deadline is ahead, expiration has passed */
3166 cfs_rq->runtime_remaining = 0;
3170 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3172 /* dock delta_exec before expiring quota (as it could span periods) */
3173 cfs_rq->runtime_remaining -= delta_exec;
3174 expire_cfs_rq_runtime(cfs_rq);
3176 if (likely(cfs_rq->runtime_remaining > 0))
3180 * if we're unable to extend our runtime we resched so that the active
3181 * hierarchy can be throttled
3183 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3184 resched_task(rq_of(cfs_rq)->curr);
3187 static __always_inline
3188 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3190 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3193 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3196 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3198 return cfs_bandwidth_used() && cfs_rq->throttled;
3201 /* check whether cfs_rq, or any parent, is throttled */
3202 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3204 return cfs_bandwidth_used() && cfs_rq->throttle_count;
3208 * Ensure that neither of the group entities corresponding to src_cpu or
3209 * dest_cpu are members of a throttled hierarchy when performing group
3210 * load-balance operations.
3212 static inline int throttled_lb_pair(struct task_group *tg,
3213 int src_cpu, int dest_cpu)
3215 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3217 src_cfs_rq = tg->cfs_rq[src_cpu];
3218 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3220 return throttled_hierarchy(src_cfs_rq) ||
3221 throttled_hierarchy(dest_cfs_rq);
3224 /* updated child weight may affect parent so we have to do this bottom up */
3225 static int tg_unthrottle_up(struct task_group *tg, void *data)
3227 struct rq *rq = data;
3228 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3230 cfs_rq->throttle_count--;
3232 if (!cfs_rq->throttle_count) {
3233 /* adjust cfs_rq_clock_task() */
3234 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3235 cfs_rq->throttled_clock_task;
3242 static int tg_throttle_down(struct task_group *tg, void *data)
3244 struct rq *rq = data;
3245 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3247 /* group is entering throttled state, stop time */
3248 if (!cfs_rq->throttle_count)
3249 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3250 cfs_rq->throttle_count++;
3255 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3257 struct rq *rq = rq_of(cfs_rq);
3258 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3259 struct sched_entity *se;
3260 long task_delta, dequeue = 1;
3262 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3264 /* freeze hierarchy runnable averages while throttled */
3266 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3269 task_delta = cfs_rq->h_nr_running;
3270 for_each_sched_entity(se) {
3271 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3272 /* throttled entity or throttle-on-deactivate */
3277 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3278 qcfs_rq->h_nr_running -= task_delta;
3280 if (qcfs_rq->load.weight)
3285 rq->nr_running -= task_delta;
3287 cfs_rq->throttled = 1;
3288 cfs_rq->throttled_clock = rq_clock(rq);
3289 raw_spin_lock(&cfs_b->lock);
3290 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3291 if (!cfs_b->timer_active)
3292 __start_cfs_bandwidth(cfs_b);
3293 raw_spin_unlock(&cfs_b->lock);
3296 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3298 struct rq *rq = rq_of(cfs_rq);
3299 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3300 struct sched_entity *se;
3304 se = cfs_rq->tg->se[cpu_of(rq)];
3306 cfs_rq->throttled = 0;
3308 update_rq_clock(rq);
3310 raw_spin_lock(&cfs_b->lock);
3311 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3312 list_del_rcu(&cfs_rq->throttled_list);
3313 raw_spin_unlock(&cfs_b->lock);
3315 /* update hierarchical throttle state */
3316 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3318 if (!cfs_rq->load.weight)
3321 task_delta = cfs_rq->h_nr_running;
3322 for_each_sched_entity(se) {
3326 cfs_rq = cfs_rq_of(se);
3328 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3329 cfs_rq->h_nr_running += task_delta;
3331 if (cfs_rq_throttled(cfs_rq))
3336 rq->nr_running += task_delta;
3338 /* determine whether we need to wake up potentially idle cpu */
3339 if (rq->curr == rq->idle && rq->cfs.nr_running)
3340 resched_task(rq->curr);
3343 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3344 u64 remaining, u64 expires)
3346 struct cfs_rq *cfs_rq;
3347 u64 runtime = remaining;
3350 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3352 struct rq *rq = rq_of(cfs_rq);
3354 raw_spin_lock(&rq->lock);
3355 if (!cfs_rq_throttled(cfs_rq))
3358 runtime = -cfs_rq->runtime_remaining + 1;
3359 if (runtime > remaining)
3360 runtime = remaining;
3361 remaining -= runtime;
3363 cfs_rq->runtime_remaining += runtime;
3364 cfs_rq->runtime_expires = expires;
3366 /* we check whether we're throttled above */
3367 if (cfs_rq->runtime_remaining > 0)
3368 unthrottle_cfs_rq(cfs_rq);
3371 raw_spin_unlock(&rq->lock);
3382 * Responsible for refilling a task_group's bandwidth and unthrottling its
3383 * cfs_rqs as appropriate. If there has been no activity within the last
3384 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3385 * used to track this state.
3387 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3389 u64 runtime, runtime_expires;
3390 int idle = 1, throttled;
3392 raw_spin_lock(&cfs_b->lock);
3393 /* no need to continue the timer with no bandwidth constraint */
3394 if (cfs_b->quota == RUNTIME_INF)
3397 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3398 /* idle depends on !throttled (for the case of a large deficit) */
3399 idle = cfs_b->idle && !throttled;
3400 cfs_b->nr_periods += overrun;
3402 /* if we're going inactive then everything else can be deferred */
3407 * if we have relooped after returning idle once, we need to update our
3408 * status as actually running, so that other cpus doing
3409 * __start_cfs_bandwidth will stop trying to cancel us.
3411 cfs_b->timer_active = 1;
3413 __refill_cfs_bandwidth_runtime(cfs_b);
3416 /* mark as potentially idle for the upcoming period */
3421 /* account preceding periods in which throttling occurred */
3422 cfs_b->nr_throttled += overrun;
3425 * There are throttled entities so we must first use the new bandwidth
3426 * to unthrottle them before making it generally available. This
3427 * ensures that all existing debts will be paid before a new cfs_rq is
3430 runtime = cfs_b->runtime;
3431 runtime_expires = cfs_b->runtime_expires;
3435 * This check is repeated as we are holding onto the new bandwidth
3436 * while we unthrottle. This can potentially race with an unthrottled
3437 * group trying to acquire new bandwidth from the global pool.
3439 while (throttled && runtime > 0) {
3440 raw_spin_unlock(&cfs_b->lock);
3441 /* we can't nest cfs_b->lock while distributing bandwidth */
3442 runtime = distribute_cfs_runtime(cfs_b, runtime,
3444 raw_spin_lock(&cfs_b->lock);
3446 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3449 /* return (any) remaining runtime */
3450 cfs_b->runtime = runtime;
3452 * While we are ensured activity in the period following an
3453 * unthrottle, this also covers the case in which the new bandwidth is
3454 * insufficient to cover the existing bandwidth deficit. (Forcing the
3455 * timer to remain active while there are any throttled entities.)
3460 cfs_b->timer_active = 0;
3461 raw_spin_unlock(&cfs_b->lock);
3466 /* a cfs_rq won't donate quota below this amount */
3467 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3468 /* minimum remaining period time to redistribute slack quota */
3469 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3470 /* how long we wait to gather additional slack before distributing */
3471 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3474 * Are we near the end of the current quota period?
3476 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3477 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3478 * migrate_hrtimers, base is never cleared, so we are fine.
3480 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3482 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3485 /* if the call-back is running a quota refresh is already occurring */
3486 if (hrtimer_callback_running(refresh_timer))
3489 /* is a quota refresh about to occur? */
3490 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3491 if (remaining < min_expire)
3497 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3499 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3501 /* if there's a quota refresh soon don't bother with slack */
3502 if (runtime_refresh_within(cfs_b, min_left))
3505 start_bandwidth_timer(&cfs_b->slack_timer,
3506 ns_to_ktime(cfs_bandwidth_slack_period));
3509 /* we know any runtime found here is valid as update_curr() precedes return */
3510 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3512 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3513 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3515 if (slack_runtime <= 0)
3518 raw_spin_lock(&cfs_b->lock);
3519 if (cfs_b->quota != RUNTIME_INF &&
3520 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3521 cfs_b->runtime += slack_runtime;
3523 /* we are under rq->lock, defer unthrottling using a timer */
3524 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3525 !list_empty(&cfs_b->throttled_cfs_rq))
3526 start_cfs_slack_bandwidth(cfs_b);
3528 raw_spin_unlock(&cfs_b->lock);
3530 /* even if it's not valid for return we don't want to try again */
3531 cfs_rq->runtime_remaining -= slack_runtime;
3534 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3536 if (!cfs_bandwidth_used())
3539 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3542 __return_cfs_rq_runtime(cfs_rq);
3546 * This is done with a timer (instead of inline with bandwidth return) since
3547 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3549 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3551 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3554 /* confirm we're still not at a refresh boundary */
3555 raw_spin_lock(&cfs_b->lock);
3556 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3557 raw_spin_unlock(&cfs_b->lock);
3561 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3562 runtime = cfs_b->runtime;
3565 expires = cfs_b->runtime_expires;
3566 raw_spin_unlock(&cfs_b->lock);
3571 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3573 raw_spin_lock(&cfs_b->lock);
3574 if (expires == cfs_b->runtime_expires)
3575 cfs_b->runtime = runtime;
3576 raw_spin_unlock(&cfs_b->lock);
3580 * When a group wakes up we want to make sure that its quota is not already
3581 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3582 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3584 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3586 if (!cfs_bandwidth_used())
3589 /* an active group must be handled by the update_curr()->put() path */
3590 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3593 /* ensure the group is not already throttled */
3594 if (cfs_rq_throttled(cfs_rq))
3597 /* update runtime allocation */
3598 account_cfs_rq_runtime(cfs_rq, 0);
3599 if (cfs_rq->runtime_remaining <= 0)
3600 throttle_cfs_rq(cfs_rq);
3603 /* conditionally throttle active cfs_rq's from put_prev_entity() */
3604 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3606 if (!cfs_bandwidth_used())
3609 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3613 * it's possible for a throttled entity to be forced into a running
3614 * state (e.g. set_curr_task), in this case we're finished.
3616 if (cfs_rq_throttled(cfs_rq))
3619 throttle_cfs_rq(cfs_rq);
3622 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3624 struct cfs_bandwidth *cfs_b =
3625 container_of(timer, struct cfs_bandwidth, slack_timer);
3626 do_sched_cfs_slack_timer(cfs_b);
3628 return HRTIMER_NORESTART;
3631 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3633 struct cfs_bandwidth *cfs_b =
3634 container_of(timer, struct cfs_bandwidth, period_timer);
3640 now = hrtimer_cb_get_time(timer);
3641 overrun = hrtimer_forward(timer, now, cfs_b->period);
3646 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3649 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3652 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3654 raw_spin_lock_init(&cfs_b->lock);
3656 cfs_b->quota = RUNTIME_INF;
3657 cfs_b->period = ns_to_ktime(default_cfs_period());
3659 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3660 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3661 cfs_b->period_timer.function = sched_cfs_period_timer;
3662 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3663 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3666 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3668 cfs_rq->runtime_enabled = 0;
3669 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3672 /* requires cfs_b->lock, may release to reprogram timer */
3673 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3676 * The timer may be active because we're trying to set a new bandwidth
3677 * period or because we're racing with the tear-down path
3678 * (timer_active==0 becomes visible before the hrtimer call-back
3679 * terminates). In either case we ensure that it's re-programmed
3681 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3682 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3683 /* bounce the lock to allow do_sched_cfs_period_timer to run */
3684 raw_spin_unlock(&cfs_b->lock);
3686 raw_spin_lock(&cfs_b->lock);
3687 /* if someone else restarted the timer then we're done */
3688 if (cfs_b->timer_active)
3692 cfs_b->timer_active = 1;
3693 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3696 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3698 hrtimer_cancel(&cfs_b->period_timer);
3699 hrtimer_cancel(&cfs_b->slack_timer);
3702 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
3704 struct cfs_rq *cfs_rq;
3706 for_each_leaf_cfs_rq(rq, cfs_rq) {
3707 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3709 if (!cfs_rq->runtime_enabled)
3713 * clock_task is not advancing so we just need to make sure
3714 * there's some valid quota amount
3716 cfs_rq->runtime_remaining = cfs_b->quota;
3717 if (cfs_rq_throttled(cfs_rq))
3718 unthrottle_cfs_rq(cfs_rq);
3722 #else /* CONFIG_CFS_BANDWIDTH */
3723 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3725 return rq_clock_task(rq_of(cfs_rq));
3728 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3729 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3730 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3731 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3733 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3738 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3743 static inline int throttled_lb_pair(struct task_group *tg,
3744 int src_cpu, int dest_cpu)
3749 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3751 #ifdef CONFIG_FAIR_GROUP_SCHED
3752 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3755 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3759 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3760 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
3762 #endif /* CONFIG_CFS_BANDWIDTH */
3764 /**************************************************
3765 * CFS operations on tasks:
3768 #ifdef CONFIG_SCHED_HRTICK
3769 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3771 struct sched_entity *se = &p->se;
3772 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3774 WARN_ON(task_rq(p) != rq);
3776 if (cfs_rq->nr_running > 1) {
3777 u64 slice = sched_slice(cfs_rq, se);
3778 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3779 s64 delta = slice - ran;
3788 * Don't schedule slices shorter than 10000ns, that just
3789 * doesn't make sense. Rely on vruntime for fairness.
3792 delta = max_t(s64, 10000LL, delta);
3794 hrtick_start(rq, delta);
3799 * called from enqueue/dequeue and updates the hrtick when the
3800 * current task is from our class and nr_running is low enough
3803 static void hrtick_update(struct rq *rq)
3805 struct task_struct *curr = rq->curr;
3807 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
3810 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3811 hrtick_start_fair(rq, curr);
3813 #else /* !CONFIG_SCHED_HRTICK */
3815 hrtick_start_fair(struct rq *rq, struct task_struct *p)
3819 static inline void hrtick_update(struct rq *rq)
3825 * The enqueue_task method is called before nr_running is
3826 * increased. Here we update the fair scheduling stats and
3827 * then put the task into the rbtree:
3830 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3832 struct cfs_rq *cfs_rq;
3833 struct sched_entity *se = &p->se;
3835 for_each_sched_entity(se) {
3838 cfs_rq = cfs_rq_of(se);
3839 enqueue_entity(cfs_rq, se, flags);
3842 * end evaluation on encountering a throttled cfs_rq
3844 * note: in the case of encountering a throttled cfs_rq we will
3845 * post the final h_nr_running increment below.
3847 if (cfs_rq_throttled(cfs_rq))
3849 cfs_rq->h_nr_running++;
3851 flags = ENQUEUE_WAKEUP;
3854 for_each_sched_entity(se) {
3855 cfs_rq = cfs_rq_of(se);
3856 cfs_rq->h_nr_running++;
3858 if (cfs_rq_throttled(cfs_rq))
3861 update_cfs_shares(cfs_rq);
3862 update_entity_load_avg(se, 1);
3866 update_rq_runnable_avg(rq, rq->nr_running);
3872 static void set_next_buddy(struct sched_entity *se);
3875 * The dequeue_task method is called before nr_running is
3876 * decreased. We remove the task from the rbtree and
3877 * update the fair scheduling stats:
3879 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3881 struct cfs_rq *cfs_rq;
3882 struct sched_entity *se = &p->se;
3883 int task_sleep = flags & DEQUEUE_SLEEP;
3885 for_each_sched_entity(se) {
3886 cfs_rq = cfs_rq_of(se);
3887 dequeue_entity(cfs_rq, se, flags);
3890 * end evaluation on encountering a throttled cfs_rq
3892 * note: in the case of encountering a throttled cfs_rq we will
3893 * post the final h_nr_running decrement below.
3895 if (cfs_rq_throttled(cfs_rq))
3897 cfs_rq->h_nr_running--;
3899 /* Don't dequeue parent if it has other entities besides us */
3900 if (cfs_rq->load.weight) {
3902 * Bias pick_next to pick a task from this cfs_rq, as
3903 * p is sleeping when it is within its sched_slice.
3905 if (task_sleep && parent_entity(se))
3906 set_next_buddy(parent_entity(se));
3908 /* avoid re-evaluating load for this entity */
3909 se = parent_entity(se);
3912 flags |= DEQUEUE_SLEEP;
3915 for_each_sched_entity(se) {
3916 cfs_rq = cfs_rq_of(se);
3917 cfs_rq->h_nr_running--;
3919 if (cfs_rq_throttled(cfs_rq))
3922 update_cfs_shares(cfs_rq);
3923 update_entity_load_avg(se, 1);
3928 update_rq_runnable_avg(rq, 1);
3934 /* Used instead of source_load when we know the type == 0 */
3935 static unsigned long weighted_cpuload(const int cpu)
3937 return cpu_rq(cpu)->cfs.runnable_load_avg;
3941 * Return a low guess at the load of a migration-source cpu weighted
3942 * according to the scheduling class and "nice" value.
3944 * We want to under-estimate the load of migration sources, to
3945 * balance conservatively.
3947 static unsigned long source_load(int cpu, int type)
3949 struct rq *rq = cpu_rq(cpu);
3950 unsigned long total = weighted_cpuload(cpu);
3952 if (type == 0 || !sched_feat(LB_BIAS))
3955 return min(rq->cpu_load[type-1], total);
3959 * Return a high guess at the load of a migration-target cpu weighted
3960 * according to the scheduling class and "nice" value.
3962 static unsigned long target_load(int cpu, int type)
3964 struct rq *rq = cpu_rq(cpu);
3965 unsigned long total = weighted_cpuload(cpu);
3967 if (type == 0 || !sched_feat(LB_BIAS))
3970 return max(rq->cpu_load[type-1], total);
3973 static unsigned long power_of(int cpu)
3975 return cpu_rq(cpu)->cpu_power;
3978 static unsigned long cpu_avg_load_per_task(int cpu)
3980 struct rq *rq = cpu_rq(cpu);
3981 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
3982 unsigned long load_avg = rq->cfs.runnable_load_avg;
3985 return load_avg / nr_running;
3990 static void record_wakee(struct task_struct *p)
3993 * Rough decay (wiping) for cost saving, don't worry
3994 * about the boundary, really active task won't care
3997 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3998 current->wakee_flips = 0;
3999 current->wakee_flip_decay_ts = jiffies;
4002 if (current->last_wakee != p) {
4003 current->last_wakee = p;
4004 current->wakee_flips++;
4008 static void task_waking_fair(struct task_struct *p)
4010 struct sched_entity *se = &p->se;
4011 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4014 #ifndef CONFIG_64BIT
4015 u64 min_vruntime_copy;
4018 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4020 min_vruntime = cfs_rq->min_vruntime;
4021 } while (min_vruntime != min_vruntime_copy);
4023 min_vruntime = cfs_rq->min_vruntime;
4026 se->vruntime -= min_vruntime;
4030 #ifdef CONFIG_FAIR_GROUP_SCHED
4032 * effective_load() calculates the load change as seen from the root_task_group
4034 * Adding load to a group doesn't make a group heavier, but can cause movement
4035 * of group shares between cpus. Assuming the shares were perfectly aligned one
4036 * can calculate the shift in shares.
4038 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4039 * on this @cpu and results in a total addition (subtraction) of @wg to the
4040 * total group weight.
4042 * Given a runqueue weight distribution (rw_i) we can compute a shares
4043 * distribution (s_i) using:
4045 * s_i = rw_i / \Sum rw_j (1)
4047 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4048 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4049 * shares distribution (s_i):
4051 * rw_i = { 2, 4, 1, 0 }
4052 * s_i = { 2/7, 4/7, 1/7, 0 }
4054 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4055 * task used to run on and the CPU the waker is running on), we need to
4056 * compute the effect of waking a task on either CPU and, in case of a sync
4057 * wakeup, compute the effect of the current task going to sleep.
4059 * So for a change of @wl to the local @cpu with an overall group weight change
4060 * of @wl we can compute the new shares distribution (s'_i) using:
4062 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4064 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4065 * differences in waking a task to CPU 0. The additional task changes the
4066 * weight and shares distributions like:
4068 * rw'_i = { 3, 4, 1, 0 }
4069 * s'_i = { 3/8, 4/8, 1/8, 0 }
4071 * We can then compute the difference in effective weight by using:
4073 * dw_i = S * (s'_i - s_i) (3)
4075 * Where 'S' is the group weight as seen by its parent.
4077 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4078 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4079 * 4/7) times the weight of the group.
4081 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4083 struct sched_entity *se = tg->se[cpu];
4085 if (!tg->parent) /* the trivial, non-cgroup case */
4088 for_each_sched_entity(se) {
4094 * W = @wg + \Sum rw_j
4096 W = wg + calc_tg_weight(tg, se->my_q);
4101 w = se->my_q->load.weight + wl;
4104 * wl = S * s'_i; see (2)
4107 wl = (w * tg->shares) / W;
4112 * Per the above, wl is the new se->load.weight value; since
4113 * those are clipped to [MIN_SHARES, ...) do so now. See
4114 * calc_cfs_shares().
4116 if (wl < MIN_SHARES)
4120 * wl = dw_i = S * (s'_i - s_i); see (3)
4122 wl -= se->load.weight;
4125 * Recursively apply this logic to all parent groups to compute
4126 * the final effective load change on the root group. Since
4127 * only the @tg group gets extra weight, all parent groups can
4128 * only redistribute existing shares. @wl is the shift in shares
4129 * resulting from this level per the above.
4138 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4145 static int wake_wide(struct task_struct *p)
4147 int factor = this_cpu_read(sd_llc_size);
4150 * Yeah, it's the switching-frequency, could means many wakee or
4151 * rapidly switch, use factor here will just help to automatically
4152 * adjust the loose-degree, so bigger node will lead to more pull.
4154 if (p->wakee_flips > factor) {
4156 * wakee is somewhat hot, it needs certain amount of cpu
4157 * resource, so if waker is far more hot, prefer to leave
4160 if (current->wakee_flips > (factor * p->wakee_flips))
4167 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4169 s64 this_load, load;
4170 int idx, this_cpu, prev_cpu;
4171 unsigned long tl_per_task;
4172 struct task_group *tg;
4173 unsigned long weight;
4177 * If we wake multiple tasks be careful to not bounce
4178 * ourselves around too much.
4184 this_cpu = smp_processor_id();
4185 prev_cpu = task_cpu(p);
4186 load = source_load(prev_cpu, idx);
4187 this_load = target_load(this_cpu, idx);
4190 * If sync wakeup then subtract the (maximum possible)
4191 * effect of the currently running task from the load
4192 * of the current CPU:
4195 tg = task_group(current);
4196 weight = current->se.load.weight;
4198 this_load += effective_load(tg, this_cpu, -weight, -weight);
4199 load += effective_load(tg, prev_cpu, 0, -weight);
4203 weight = p->se.load.weight;
4206 * In low-load situations, where prev_cpu is idle and this_cpu is idle
4207 * due to the sync cause above having dropped this_load to 0, we'll
4208 * always have an imbalance, but there's really nothing you can do
4209 * about that, so that's good too.
4211 * Otherwise check if either cpus are near enough in load to allow this
4212 * task to be woken on this_cpu.
4214 if (this_load > 0) {
4215 s64 this_eff_load, prev_eff_load;
4217 this_eff_load = 100;
4218 this_eff_load *= power_of(prev_cpu);
4219 this_eff_load *= this_load +
4220 effective_load(tg, this_cpu, weight, weight);
4222 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4223 prev_eff_load *= power_of(this_cpu);
4224 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4226 balanced = this_eff_load <= prev_eff_load;
4231 * If the currently running task will sleep within
4232 * a reasonable amount of time then attract this newly
4235 if (sync && balanced)
4238 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4239 tl_per_task = cpu_avg_load_per_task(this_cpu);
4242 (this_load <= load &&
4243 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4245 * This domain has SD_WAKE_AFFINE and
4246 * p is cache cold in this domain, and
4247 * there is no bad imbalance.
4249 schedstat_inc(sd, ttwu_move_affine);
4250 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4258 * find_idlest_group finds and returns the least busy CPU group within the
4261 static struct sched_group *
4262 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4263 int this_cpu, int sd_flag)
4265 struct sched_group *idlest = NULL, *group = sd->groups;
4266 unsigned long min_load = ULONG_MAX, this_load = 0;
4267 int load_idx = sd->forkexec_idx;
4268 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4270 if (sd_flag & SD_BALANCE_WAKE)
4271 load_idx = sd->wake_idx;
4274 unsigned long load, avg_load;
4278 /* Skip over this group if it has no CPUs allowed */
4279 if (!cpumask_intersects(sched_group_cpus(group),
4280 tsk_cpus_allowed(p)))
4283 local_group = cpumask_test_cpu(this_cpu,
4284 sched_group_cpus(group));
4286 /* Tally up the load of all CPUs in the group */
4289 for_each_cpu(i, sched_group_cpus(group)) {
4290 /* Bias balancing toward cpus of our domain */
4292 load = source_load(i, load_idx);
4294 load = target_load(i, load_idx);
4299 /* Adjust by relative CPU power of the group */
4300 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
4303 this_load = avg_load;
4304 } else if (avg_load < min_load) {
4305 min_load = avg_load;
4308 } while (group = group->next, group != sd->groups);
4310 if (!idlest || 100*this_load < imbalance*min_load)
4316 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4319 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4321 unsigned long load, min_load = ULONG_MAX;
4325 /* Traverse only the allowed CPUs */
4326 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4327 load = weighted_cpuload(i);
4329 if (load < min_load || (load == min_load && i == this_cpu)) {
4339 * Try and locate an idle CPU in the sched_domain.
4341 static int select_idle_sibling(struct task_struct *p, int target)
4343 struct sched_domain *sd;
4344 struct sched_group *sg;
4345 int i = task_cpu(p);
4347 if (idle_cpu(target))
4351 * If the prevous cpu is cache affine and idle, don't be stupid.
4353 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4357 * Otherwise, iterate the domains and find an elegible idle cpu.
4359 sd = rcu_dereference(per_cpu(sd_llc, target));
4360 for_each_lower_domain(sd) {
4363 if (!cpumask_intersects(sched_group_cpus(sg),
4364 tsk_cpus_allowed(p)))
4367 for_each_cpu(i, sched_group_cpus(sg)) {
4368 if (i == target || !idle_cpu(i))
4372 target = cpumask_first_and(sched_group_cpus(sg),
4373 tsk_cpus_allowed(p));
4377 } while (sg != sd->groups);
4384 * sched_balance_self: balance the current task (running on cpu) in domains
4385 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
4388 * Balance, ie. select the least loaded group.
4390 * Returns the target CPU number, or the same CPU if no balancing is needed.
4392 * preempt must be disabled.
4395 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4397 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4398 int cpu = smp_processor_id();
4400 int want_affine = 0;
4401 int sync = wake_flags & WF_SYNC;
4403 if (p->nr_cpus_allowed == 1)
4406 if (sd_flag & SD_BALANCE_WAKE) {
4407 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
4413 for_each_domain(cpu, tmp) {
4414 if (!(tmp->flags & SD_LOAD_BALANCE))
4418 * If both cpu and prev_cpu are part of this domain,
4419 * cpu is a valid SD_WAKE_AFFINE target.
4421 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4422 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4427 if (tmp->flags & sd_flag)
4432 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4435 new_cpu = select_idle_sibling(p, prev_cpu);
4440 struct sched_group *group;
4443 if (!(sd->flags & sd_flag)) {
4448 group = find_idlest_group(sd, p, cpu, sd_flag);
4454 new_cpu = find_idlest_cpu(group, p, cpu);
4455 if (new_cpu == -1 || new_cpu == cpu) {
4456 /* Now try balancing at a lower domain level of cpu */
4461 /* Now try balancing at a lower domain level of new_cpu */
4463 weight = sd->span_weight;
4465 for_each_domain(cpu, tmp) {
4466 if (weight <= tmp->span_weight)
4468 if (tmp->flags & sd_flag)
4471 /* while loop will break here if sd == NULL */
4480 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4481 * cfs_rq_of(p) references at time of call are still valid and identify the
4482 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4483 * other assumptions, including the state of rq->lock, should be made.
4486 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4488 struct sched_entity *se = &p->se;
4489 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4492 * Load tracking: accumulate removed load so that it can be processed
4493 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4494 * to blocked load iff they have a positive decay-count. It can never
4495 * be negative here since on-rq tasks have decay-count == 0.
4497 if (se->avg.decay_count) {
4498 se->avg.decay_count = -__synchronize_entity_decay(se);
4499 atomic_long_add(se->avg.load_avg_contrib,
4500 &cfs_rq->removed_load);
4503 #endif /* CONFIG_SMP */
4505 static unsigned long
4506 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4508 unsigned long gran = sysctl_sched_wakeup_granularity;
4511 * Since its curr running now, convert the gran from real-time
4512 * to virtual-time in his units.
4514 * By using 'se' instead of 'curr' we penalize light tasks, so
4515 * they get preempted easier. That is, if 'se' < 'curr' then
4516 * the resulting gran will be larger, therefore penalizing the
4517 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4518 * be smaller, again penalizing the lighter task.
4520 * This is especially important for buddies when the leftmost
4521 * task is higher priority than the buddy.
4523 return calc_delta_fair(gran, se);
4527 * Should 'se' preempt 'curr'.
4541 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4543 s64 gran, vdiff = curr->vruntime - se->vruntime;
4548 gran = wakeup_gran(curr, se);
4555 static void set_last_buddy(struct sched_entity *se)
4557 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4560 for_each_sched_entity(se)
4561 cfs_rq_of(se)->last = se;
4564 static void set_next_buddy(struct sched_entity *se)
4566 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4569 for_each_sched_entity(se)
4570 cfs_rq_of(se)->next = se;
4573 static void set_skip_buddy(struct sched_entity *se)
4575 for_each_sched_entity(se)
4576 cfs_rq_of(se)->skip = se;
4580 * Preempt the current task with a newly woken task if needed:
4582 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
4584 struct task_struct *curr = rq->curr;
4585 struct sched_entity *se = &curr->se, *pse = &p->se;
4586 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4587 int scale = cfs_rq->nr_running >= sched_nr_latency;
4588 int next_buddy_marked = 0;
4590 if (unlikely(se == pse))
4594 * This is possible from callers such as move_task(), in which we
4595 * unconditionally check_prempt_curr() after an enqueue (which may have
4596 * lead to a throttle). This both saves work and prevents false
4597 * next-buddy nomination below.
4599 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4602 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
4603 set_next_buddy(pse);
4604 next_buddy_marked = 1;
4608 * We can come here with TIF_NEED_RESCHED already set from new task
4611 * Note: this also catches the edge-case of curr being in a throttled
4612 * group (e.g. via set_curr_task), since update_curr() (in the
4613 * enqueue of curr) will have resulted in resched being set. This
4614 * prevents us from potentially nominating it as a false LAST_BUDDY
4617 if (test_tsk_need_resched(curr))
4620 /* Idle tasks are by definition preempted by non-idle tasks. */
4621 if (unlikely(curr->policy == SCHED_IDLE) &&
4622 likely(p->policy != SCHED_IDLE))
4626 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4627 * is driven by the tick):
4629 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
4632 find_matching_se(&se, &pse);
4633 update_curr(cfs_rq_of(se));
4635 if (wakeup_preempt_entity(se, pse) == 1) {
4637 * Bias pick_next to pick the sched entity that is
4638 * triggering this preemption.
4640 if (!next_buddy_marked)
4641 set_next_buddy(pse);
4650 * Only set the backward buddy when the current task is still
4651 * on the rq. This can happen when a wakeup gets interleaved
4652 * with schedule on the ->pre_schedule() or idle_balance()
4653 * point, either of which can * drop the rq lock.
4655 * Also, during early boot the idle thread is in the fair class,
4656 * for obvious reasons its a bad idea to schedule back to it.
4658 if (unlikely(!se->on_rq || curr == rq->idle))
4661 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4665 static struct task_struct *pick_next_task_fair(struct rq *rq)
4667 struct task_struct *p;
4668 struct cfs_rq *cfs_rq = &rq->cfs;
4669 struct sched_entity *se;
4671 if (!cfs_rq->nr_running)
4675 se = pick_next_entity(cfs_rq);
4676 set_next_entity(cfs_rq, se);
4677 cfs_rq = group_cfs_rq(se);
4681 if (hrtick_enabled(rq))
4682 hrtick_start_fair(rq, p);
4688 * Account for a descheduled task:
4690 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
4692 struct sched_entity *se = &prev->se;
4693 struct cfs_rq *cfs_rq;
4695 for_each_sched_entity(se) {
4696 cfs_rq = cfs_rq_of(se);
4697 put_prev_entity(cfs_rq, se);
4702 * sched_yield() is very simple
4704 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4706 static void yield_task_fair(struct rq *rq)
4708 struct task_struct *curr = rq->curr;
4709 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4710 struct sched_entity *se = &curr->se;
4713 * Are we the only task in the tree?
4715 if (unlikely(rq->nr_running == 1))
4718 clear_buddies(cfs_rq, se);
4720 if (curr->policy != SCHED_BATCH) {
4721 update_rq_clock(rq);
4723 * Update run-time statistics of the 'current'.
4725 update_curr(cfs_rq);
4727 * Tell update_rq_clock() that we've just updated,
4728 * so we don't do microscopic update in schedule()
4729 * and double the fastpath cost.
4731 rq->skip_clock_update = 1;
4737 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4739 struct sched_entity *se = &p->se;
4741 /* throttled hierarchies are not runnable */
4742 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
4745 /* Tell the scheduler that we'd really like pse to run next. */
4748 yield_task_fair(rq);
4754 /**************************************************
4755 * Fair scheduling class load-balancing methods.
4759 * The purpose of load-balancing is to achieve the same basic fairness the
4760 * per-cpu scheduler provides, namely provide a proportional amount of compute
4761 * time to each task. This is expressed in the following equation:
4763 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4765 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4766 * W_i,0 is defined as:
4768 * W_i,0 = \Sum_j w_i,j (2)
4770 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4771 * is derived from the nice value as per prio_to_weight[].
4773 * The weight average is an exponential decay average of the instantaneous
4776 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4778 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4779 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4780 * can also include other factors [XXX].
4782 * To achieve this balance we define a measure of imbalance which follows
4783 * directly from (1):
4785 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
4787 * We them move tasks around to minimize the imbalance. In the continuous
4788 * function space it is obvious this converges, in the discrete case we get
4789 * a few fun cases generally called infeasible weight scenarios.
4792 * - infeasible weights;
4793 * - local vs global optima in the discrete case. ]
4798 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4799 * for all i,j solution, we create a tree of cpus that follows the hardware
4800 * topology where each level pairs two lower groups (or better). This results
4801 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4802 * tree to only the first of the previous level and we decrease the frequency
4803 * of load-balance at each level inv. proportional to the number of cpus in
4809 * \Sum { --- * --- * 2^i } = O(n) (5)
4811 * `- size of each group
4812 * | | `- number of cpus doing load-balance
4814 * `- sum over all levels
4816 * Coupled with a limit on how many tasks we can migrate every balance pass,
4817 * this makes (5) the runtime complexity of the balancer.
4819 * An important property here is that each CPU is still (indirectly) connected
4820 * to every other cpu in at most O(log n) steps:
4822 * The adjacency matrix of the resulting graph is given by:
4825 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4828 * And you'll find that:
4830 * A^(log_2 n)_i,j != 0 for all i,j (7)
4832 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4833 * The task movement gives a factor of O(m), giving a convergence complexity
4836 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4841 * In order to avoid CPUs going idle while there's still work to do, new idle
4842 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4843 * tree itself instead of relying on other CPUs to bring it work.
4845 * This adds some complexity to both (5) and (8) but it reduces the total idle
4853 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4856 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4861 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4863 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4865 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4868 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4869 * rewrite all of this once again.]
4872 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4874 enum fbq_type { regular, remote, all };
4876 #define LBF_ALL_PINNED 0x01
4877 #define LBF_NEED_BREAK 0x02
4878 #define LBF_DST_PINNED 0x04
4879 #define LBF_SOME_PINNED 0x08
4882 struct sched_domain *sd;
4890 struct cpumask *dst_grpmask;
4892 enum cpu_idle_type idle;
4894 /* The set of CPUs under consideration for load-balancing */
4895 struct cpumask *cpus;
4900 unsigned int loop_break;
4901 unsigned int loop_max;
4903 enum fbq_type fbq_type;
4907 * move_task - move a task from one runqueue to another runqueue.
4908 * Both runqueues must be locked.
4910 static void move_task(struct task_struct *p, struct lb_env *env)
4912 deactivate_task(env->src_rq, p, 0);
4913 set_task_cpu(p, env->dst_cpu);
4914 activate_task(env->dst_rq, p, 0);
4915 check_preempt_curr(env->dst_rq, p, 0);
4919 * Is this task likely cache-hot:
4922 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4926 if (p->sched_class != &fair_sched_class)
4929 if (unlikely(p->policy == SCHED_IDLE))
4933 * Buddy candidates are cache hot:
4935 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4936 (&p->se == cfs_rq_of(&p->se)->next ||
4937 &p->se == cfs_rq_of(&p->se)->last))
4940 if (sysctl_sched_migration_cost == -1)
4942 if (sysctl_sched_migration_cost == 0)
4945 delta = now - p->se.exec_start;
4947 return delta < (s64)sysctl_sched_migration_cost;
4950 #ifdef CONFIG_NUMA_BALANCING
4951 /* Returns true if the destination node has incurred more faults */
4952 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4954 int src_nid, dst_nid;
4956 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
4957 !(env->sd->flags & SD_NUMA)) {
4961 src_nid = cpu_to_node(env->src_cpu);
4962 dst_nid = cpu_to_node(env->dst_cpu);
4964 if (src_nid == dst_nid)
4967 /* Always encourage migration to the preferred node. */
4968 if (dst_nid == p->numa_preferred_nid)
4971 /* If both task and group weight improve, this move is a winner. */
4972 if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
4973 group_weight(p, dst_nid) > group_weight(p, src_nid))
4980 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4982 int src_nid, dst_nid;
4984 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4987 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
4990 src_nid = cpu_to_node(env->src_cpu);
4991 dst_nid = cpu_to_node(env->dst_cpu);
4993 if (src_nid == dst_nid)
4996 /* Migrating away from the preferred node is always bad. */
4997 if (src_nid == p->numa_preferred_nid)
5000 /* If either task or group weight get worse, don't do it. */
5001 if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
5002 group_weight(p, dst_nid) < group_weight(p, src_nid))
5009 static inline bool migrate_improves_locality(struct task_struct *p,
5015 static inline bool migrate_degrades_locality(struct task_struct *p,
5023 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5026 int can_migrate_task(struct task_struct *p, struct lb_env *env)
5028 int tsk_cache_hot = 0;
5030 * We do not migrate tasks that are:
5031 * 1) throttled_lb_pair, or
5032 * 2) cannot be migrated to this CPU due to cpus_allowed, or
5033 * 3) running (obviously), or
5034 * 4) are cache-hot on their current CPU.
5036 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5039 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5042 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5044 env->flags |= LBF_SOME_PINNED;
5047 * Remember if this task can be migrated to any other cpu in
5048 * our sched_group. We may want to revisit it if we couldn't
5049 * meet load balance goals by pulling other tasks on src_cpu.
5051 * Also avoid computing new_dst_cpu if we have already computed
5052 * one in current iteration.
5054 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5057 /* Prevent to re-select dst_cpu via env's cpus */
5058 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5059 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5060 env->flags |= LBF_DST_PINNED;
5061 env->new_dst_cpu = cpu;
5069 /* Record that we found atleast one task that could run on dst_cpu */
5070 env->flags &= ~LBF_ALL_PINNED;
5072 if (task_running(env->src_rq, p)) {
5073 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5078 * Aggressive migration if:
5079 * 1) destination numa is preferred
5080 * 2) task is cache cold, or
5081 * 3) too many balance attempts have failed.
5083 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
5085 tsk_cache_hot = migrate_degrades_locality(p, env);
5087 if (migrate_improves_locality(p, env)) {
5088 #ifdef CONFIG_SCHEDSTATS
5089 if (tsk_cache_hot) {
5090 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5091 schedstat_inc(p, se.statistics.nr_forced_migrations);
5097 if (!tsk_cache_hot ||
5098 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5100 if (tsk_cache_hot) {
5101 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5102 schedstat_inc(p, se.statistics.nr_forced_migrations);
5108 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5113 * move_one_task tries to move exactly one task from busiest to this_rq, as
5114 * part of active balancing operations within "domain".
5115 * Returns 1 if successful and 0 otherwise.
5117 * Called with both runqueues locked.
5119 static int move_one_task(struct lb_env *env)
5121 struct task_struct *p, *n;
5123 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5124 if (!can_migrate_task(p, env))
5129 * Right now, this is only the second place move_task()
5130 * is called, so we can safely collect move_task()
5131 * stats here rather than inside move_task().
5133 schedstat_inc(env->sd, lb_gained[env->idle]);
5139 static const unsigned int sched_nr_migrate_break = 32;
5142 * move_tasks tries to move up to imbalance weighted load from busiest to
5143 * this_rq, as part of a balancing operation within domain "sd".
5144 * Returns 1 if successful and 0 otherwise.
5146 * Called with both runqueues locked.
5148 static int move_tasks(struct lb_env *env)
5150 struct list_head *tasks = &env->src_rq->cfs_tasks;
5151 struct task_struct *p;
5155 if (env->imbalance <= 0)
5158 while (!list_empty(tasks)) {
5159 p = list_first_entry(tasks, struct task_struct, se.group_node);
5162 /* We've more or less seen every task there is, call it quits */
5163 if (env->loop > env->loop_max)
5166 /* take a breather every nr_migrate tasks */
5167 if (env->loop > env->loop_break) {
5168 env->loop_break += sched_nr_migrate_break;
5169 env->flags |= LBF_NEED_BREAK;
5173 if (!can_migrate_task(p, env))
5176 load = task_h_load(p);
5178 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5181 if ((load / 2) > env->imbalance)
5186 env->imbalance -= load;
5188 #ifdef CONFIG_PREEMPT
5190 * NEWIDLE balancing is a source of latency, so preemptible
5191 * kernels will stop after the first task is pulled to minimize
5192 * the critical section.
5194 if (env->idle == CPU_NEWLY_IDLE)
5199 * We only want to steal up to the prescribed amount of
5202 if (env->imbalance <= 0)
5207 list_move_tail(&p->se.group_node, tasks);
5211 * Right now, this is one of only two places move_task() is called,
5212 * so we can safely collect move_task() stats here rather than
5213 * inside move_task().
5215 schedstat_add(env->sd, lb_gained[env->idle], pulled);
5220 #ifdef CONFIG_FAIR_GROUP_SCHED
5222 * update tg->load_weight by folding this cpu's load_avg
5224 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5226 struct sched_entity *se = tg->se[cpu];
5227 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5229 /* throttled entities do not contribute to load */
5230 if (throttled_hierarchy(cfs_rq))
5233 update_cfs_rq_blocked_load(cfs_rq, 1);
5236 update_entity_load_avg(se, 1);
5238 * We pivot on our runnable average having decayed to zero for
5239 * list removal. This generally implies that all our children
5240 * have also been removed (modulo rounding error or bandwidth
5241 * control); however, such cases are rare and we can fix these
5244 * TODO: fix up out-of-order children on enqueue.
5246 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5247 list_del_leaf_cfs_rq(cfs_rq);
5249 struct rq *rq = rq_of(cfs_rq);
5250 update_rq_runnable_avg(rq, rq->nr_running);
5254 static void update_blocked_averages(int cpu)
5256 struct rq *rq = cpu_rq(cpu);
5257 struct cfs_rq *cfs_rq;
5258 unsigned long flags;
5260 raw_spin_lock_irqsave(&rq->lock, flags);
5261 update_rq_clock(rq);
5263 * Iterates the task_group tree in a bottom up fashion, see
5264 * list_add_leaf_cfs_rq() for details.
5266 for_each_leaf_cfs_rq(rq, cfs_rq) {
5268 * Note: We may want to consider periodically releasing
5269 * rq->lock about these updates so that creating many task
5270 * groups does not result in continually extending hold time.
5272 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5275 raw_spin_unlock_irqrestore(&rq->lock, flags);
5279 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5280 * This needs to be done in a top-down fashion because the load of a child
5281 * group is a fraction of its parents load.
5283 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5285 struct rq *rq = rq_of(cfs_rq);
5286 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5287 unsigned long now = jiffies;
5290 if (cfs_rq->last_h_load_update == now)
5293 cfs_rq->h_load_next = NULL;
5294 for_each_sched_entity(se) {
5295 cfs_rq = cfs_rq_of(se);
5296 cfs_rq->h_load_next = se;
5297 if (cfs_rq->last_h_load_update == now)
5302 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5303 cfs_rq->last_h_load_update = now;
5306 while ((se = cfs_rq->h_load_next) != NULL) {
5307 load = cfs_rq->h_load;
5308 load = div64_ul(load * se->avg.load_avg_contrib,
5309 cfs_rq->runnable_load_avg + 1);
5310 cfs_rq = group_cfs_rq(se);
5311 cfs_rq->h_load = load;
5312 cfs_rq->last_h_load_update = now;
5316 static unsigned long task_h_load(struct task_struct *p)
5318 struct cfs_rq *cfs_rq = task_cfs_rq(p);
5320 update_cfs_rq_h_load(cfs_rq);
5321 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5322 cfs_rq->runnable_load_avg + 1);
5325 static inline void update_blocked_averages(int cpu)
5329 static unsigned long task_h_load(struct task_struct *p)
5331 return p->se.avg.load_avg_contrib;
5335 /********** Helpers for find_busiest_group ************************/
5337 * sg_lb_stats - stats of a sched_group required for load_balancing
5339 struct sg_lb_stats {
5340 unsigned long avg_load; /*Avg load across the CPUs of the group */
5341 unsigned long group_load; /* Total load over the CPUs of the group */
5342 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5343 unsigned long load_per_task;
5344 unsigned long group_power;
5345 unsigned int sum_nr_running; /* Nr tasks running in the group */
5346 unsigned int group_capacity;
5347 unsigned int idle_cpus;
5348 unsigned int group_weight;
5349 int group_imb; /* Is there an imbalance in the group ? */
5350 int group_has_capacity; /* Is there extra capacity in the group? */
5351 #ifdef CONFIG_NUMA_BALANCING
5352 unsigned int nr_numa_running;
5353 unsigned int nr_preferred_running;
5358 * sd_lb_stats - Structure to store the statistics of a sched_domain
5359 * during load balancing.
5361 struct sd_lb_stats {
5362 struct sched_group *busiest; /* Busiest group in this sd */
5363 struct sched_group *local; /* Local group in this sd */
5364 unsigned long total_load; /* Total load of all groups in sd */
5365 unsigned long total_pwr; /* Total power of all groups in sd */
5366 unsigned long avg_load; /* Average load across all groups in sd */
5368 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
5369 struct sg_lb_stats local_stat; /* Statistics of the local group */
5372 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5375 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5376 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5377 * We must however clear busiest_stat::avg_load because
5378 * update_sd_pick_busiest() reads this before assignment.
5380 *sds = (struct sd_lb_stats){
5392 * get_sd_load_idx - Obtain the load index for a given sched domain.
5393 * @sd: The sched_domain whose load_idx is to be obtained.
5394 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
5396 * Return: The load index.
5398 static inline int get_sd_load_idx(struct sched_domain *sd,
5399 enum cpu_idle_type idle)
5405 load_idx = sd->busy_idx;
5408 case CPU_NEWLY_IDLE:
5409 load_idx = sd->newidle_idx;
5412 load_idx = sd->idle_idx;
5419 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
5421 return SCHED_POWER_SCALE;
5424 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5426 return default_scale_freq_power(sd, cpu);
5429 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
5431 unsigned long weight = sd->span_weight;
5432 unsigned long smt_gain = sd->smt_gain;
5439 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5441 return default_scale_smt_power(sd, cpu);
5444 static unsigned long scale_rt_power(int cpu)
5446 struct rq *rq = cpu_rq(cpu);
5447 u64 total, available, age_stamp, avg;
5450 * Since we're reading these variables without serialization make sure
5451 * we read them once before doing sanity checks on them.
5453 age_stamp = ACCESS_ONCE(rq->age_stamp);
5454 avg = ACCESS_ONCE(rq->rt_avg);
5456 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
5458 if (unlikely(total < avg)) {
5459 /* Ensures that power won't end up being negative */
5462 available = total - avg;
5465 if (unlikely((s64)total < SCHED_POWER_SCALE))
5466 total = SCHED_POWER_SCALE;
5468 total >>= SCHED_POWER_SHIFT;
5470 return div_u64(available, total);
5473 static void update_cpu_power(struct sched_domain *sd, int cpu)
5475 unsigned long weight = sd->span_weight;
5476 unsigned long power = SCHED_POWER_SCALE;
5477 struct sched_group *sdg = sd->groups;
5479 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5480 if (sched_feat(ARCH_POWER))
5481 power *= arch_scale_smt_power(sd, cpu);
5483 power *= default_scale_smt_power(sd, cpu);
5485 power >>= SCHED_POWER_SHIFT;
5488 sdg->sgp->power_orig = power;
5490 if (sched_feat(ARCH_POWER))
5491 power *= arch_scale_freq_power(sd, cpu);
5493 power *= default_scale_freq_power(sd, cpu);
5495 power >>= SCHED_POWER_SHIFT;
5497 power *= scale_rt_power(cpu);
5498 power >>= SCHED_POWER_SHIFT;
5503 cpu_rq(cpu)->cpu_power = power;
5504 sdg->sgp->power = power;
5507 void update_group_power(struct sched_domain *sd, int cpu)
5509 struct sched_domain *child = sd->child;
5510 struct sched_group *group, *sdg = sd->groups;
5511 unsigned long power, power_orig;
5512 unsigned long interval;
5514 interval = msecs_to_jiffies(sd->balance_interval);
5515 interval = clamp(interval, 1UL, max_load_balance_interval);
5516 sdg->sgp->next_update = jiffies + interval;
5519 update_cpu_power(sd, cpu);
5523 power_orig = power = 0;
5525 if (child->flags & SD_OVERLAP) {
5527 * SD_OVERLAP domains cannot assume that child groups
5528 * span the current group.
5531 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5532 struct sched_group_power *sgp;
5533 struct rq *rq = cpu_rq(cpu);
5536 * build_sched_domains() -> init_sched_groups_power()
5537 * gets here before we've attached the domains to the
5540 * Use power_of(), which is set irrespective of domains
5541 * in update_cpu_power().
5543 * This avoids power/power_orig from being 0 and
5544 * causing divide-by-zero issues on boot.
5546 * Runtime updates will correct power_orig.
5548 if (unlikely(!rq->sd)) {
5549 power_orig += power_of(cpu);
5550 power += power_of(cpu);
5554 sgp = rq->sd->groups->sgp;
5555 power_orig += sgp->power_orig;
5556 power += sgp->power;
5560 * !SD_OVERLAP domains can assume that child groups
5561 * span the current group.
5564 group = child->groups;
5566 power_orig += group->sgp->power_orig;
5567 power += group->sgp->power;
5568 group = group->next;
5569 } while (group != child->groups);
5572 sdg->sgp->power_orig = power_orig;
5573 sdg->sgp->power = power;
5577 * Try and fix up capacity for tiny siblings, this is needed when
5578 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5579 * which on its own isn't powerful enough.
5581 * See update_sd_pick_busiest() and check_asym_packing().
5584 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5587 * Only siblings can have significantly less than SCHED_POWER_SCALE
5589 if (!(sd->flags & SD_SHARE_CPUPOWER))
5593 * If ~90% of the cpu_power is still there, we're good.
5595 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
5602 * Group imbalance indicates (and tries to solve) the problem where balancing
5603 * groups is inadequate due to tsk_cpus_allowed() constraints.
5605 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5606 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5609 * { 0 1 2 3 } { 4 5 6 7 }
5612 * If we were to balance group-wise we'd place two tasks in the first group and
5613 * two tasks in the second group. Clearly this is undesired as it will overload
5614 * cpu 3 and leave one of the cpus in the second group unused.
5616 * The current solution to this issue is detecting the skew in the first group
5617 * by noticing the lower domain failed to reach balance and had difficulty
5618 * moving tasks due to affinity constraints.
5620 * When this is so detected; this group becomes a candidate for busiest; see
5621 * update_sd_pick_busiest(). And calculate_imbalance() and
5622 * find_busiest_group() avoid some of the usual balance conditions to allow it
5623 * to create an effective group imbalance.
5625 * This is a somewhat tricky proposition since the next run might not find the
5626 * group imbalance and decide the groups need to be balanced again. A most
5627 * subtle and fragile situation.
5630 static inline int sg_imbalanced(struct sched_group *group)
5632 return group->sgp->imbalance;
5636 * Compute the group capacity.
5638 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5639 * first dividing out the smt factor and computing the actual number of cores
5640 * and limit power unit capacity with that.
5642 static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5644 unsigned int capacity, smt, cpus;
5645 unsigned int power, power_orig;
5647 power = group->sgp->power;
5648 power_orig = group->sgp->power_orig;
5649 cpus = group->group_weight;
5651 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5652 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5653 capacity = cpus / smt; /* cores */
5655 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
5657 capacity = fix_small_capacity(env->sd, group);
5663 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5664 * @env: The load balancing environment.
5665 * @group: sched_group whose statistics are to be updated.
5666 * @load_idx: Load index of sched_domain of this_cpu for load calc.
5667 * @local_group: Does group contain this_cpu.
5668 * @sgs: variable to hold the statistics for this group.
5670 static inline void update_sg_lb_stats(struct lb_env *env,
5671 struct sched_group *group, int load_idx,
5672 int local_group, struct sg_lb_stats *sgs)
5677 memset(sgs, 0, sizeof(*sgs));
5679 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
5680 struct rq *rq = cpu_rq(i);
5682 /* Bias balancing toward cpus of our domain */
5684 load = target_load(i, load_idx);
5686 load = source_load(i, load_idx);
5688 sgs->group_load += load;
5689 sgs->sum_nr_running += rq->nr_running;
5690 #ifdef CONFIG_NUMA_BALANCING
5691 sgs->nr_numa_running += rq->nr_numa_running;
5692 sgs->nr_preferred_running += rq->nr_preferred_running;
5694 sgs->sum_weighted_load += weighted_cpuload(i);
5699 /* Adjust by relative CPU power of the group */
5700 sgs->group_power = group->sgp->power;
5701 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
5703 if (sgs->sum_nr_running)
5704 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
5706 sgs->group_weight = group->group_weight;
5708 sgs->group_imb = sg_imbalanced(group);
5709 sgs->group_capacity = sg_capacity(env, group);
5711 if (sgs->group_capacity > sgs->sum_nr_running)
5712 sgs->group_has_capacity = 1;
5716 * update_sd_pick_busiest - return 1 on busiest group
5717 * @env: The load balancing environment.
5718 * @sds: sched_domain statistics
5719 * @sg: sched_group candidate to be checked for being the busiest
5720 * @sgs: sched_group statistics
5722 * Determine if @sg is a busier group than the previously selected
5725 * Return: %true if @sg is a busier group than the previously selected
5726 * busiest group. %false otherwise.
5728 static bool update_sd_pick_busiest(struct lb_env *env,
5729 struct sd_lb_stats *sds,
5730 struct sched_group *sg,
5731 struct sg_lb_stats *sgs)
5733 if (sgs->avg_load <= sds->busiest_stat.avg_load)
5736 if (sgs->sum_nr_running > sgs->group_capacity)
5743 * ASYM_PACKING needs to move all the work to the lowest
5744 * numbered CPUs in the group, therefore mark all groups
5745 * higher than ourself as busy.
5747 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5748 env->dst_cpu < group_first_cpu(sg)) {
5752 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5759 #ifdef CONFIG_NUMA_BALANCING
5760 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5762 if (sgs->sum_nr_running > sgs->nr_numa_running)
5764 if (sgs->sum_nr_running > sgs->nr_preferred_running)
5769 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5771 if (rq->nr_running > rq->nr_numa_running)
5773 if (rq->nr_running > rq->nr_preferred_running)
5778 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5783 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5787 #endif /* CONFIG_NUMA_BALANCING */
5790 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
5791 * @env: The load balancing environment.
5792 * @sds: variable to hold the statistics for this sched_domain.
5794 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
5796 struct sched_domain *child = env->sd->child;
5797 struct sched_group *sg = env->sd->groups;
5798 struct sg_lb_stats tmp_sgs;
5799 int load_idx, prefer_sibling = 0;
5801 if (child && child->flags & SD_PREFER_SIBLING)
5804 load_idx = get_sd_load_idx(env->sd, env->idle);
5807 struct sg_lb_stats *sgs = &tmp_sgs;
5810 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
5813 sgs = &sds->local_stat;
5815 if (env->idle != CPU_NEWLY_IDLE ||
5816 time_after_eq(jiffies, sg->sgp->next_update))
5817 update_group_power(env->sd, env->dst_cpu);
5820 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
5826 * In case the child domain prefers tasks go to siblings
5827 * first, lower the sg capacity to one so that we'll try
5828 * and move all the excess tasks away. We lower the capacity
5829 * of a group only if the local group has the capacity to fit
5830 * these excess tasks, i.e. nr_running < group_capacity. The
5831 * extra check prevents the case where you always pull from the
5832 * heaviest group when it is already under-utilized (possible
5833 * with a large weight task outweighs the tasks on the system).
5835 if (prefer_sibling && sds->local &&
5836 sds->local_stat.group_has_capacity)
5837 sgs->group_capacity = min(sgs->group_capacity, 1U);
5839 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
5841 sds->busiest_stat = *sgs;
5845 /* Now, start updating sd_lb_stats */
5846 sds->total_load += sgs->group_load;
5847 sds->total_pwr += sgs->group_power;
5850 } while (sg != env->sd->groups);
5852 if (env->sd->flags & SD_NUMA)
5853 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
5857 * check_asym_packing - Check to see if the group is packed into the
5860 * This is primarily intended to used at the sibling level. Some
5861 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5862 * case of POWER7, it can move to lower SMT modes only when higher
5863 * threads are idle. When in lower SMT modes, the threads will
5864 * perform better since they share less core resources. Hence when we
5865 * have idle threads, we want them to be the higher ones.
5867 * This packing function is run on idle threads. It checks to see if
5868 * the busiest CPU in this domain (core in the P7 case) has a higher
5869 * CPU number than the packing function is being run on. Here we are
5870 * assuming lower CPU number will be equivalent to lower a SMT thread
5873 * Return: 1 when packing is required and a task should be moved to
5874 * this CPU. The amount of the imbalance is returned in *imbalance.
5876 * @env: The load balancing environment.
5877 * @sds: Statistics of the sched_domain which is to be packed
5879 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
5883 if (!(env->sd->flags & SD_ASYM_PACKING))
5889 busiest_cpu = group_first_cpu(sds->busiest);
5890 if (env->dst_cpu > busiest_cpu)
5893 env->imbalance = DIV_ROUND_CLOSEST(
5894 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5901 * fix_small_imbalance - Calculate the minor imbalance that exists
5902 * amongst the groups of a sched_domain, during
5904 * @env: The load balancing environment.
5905 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
5908 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
5910 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5911 unsigned int imbn = 2;
5912 unsigned long scaled_busy_load_per_task;
5913 struct sg_lb_stats *local, *busiest;
5915 local = &sds->local_stat;
5916 busiest = &sds->busiest_stat;
5918 if (!local->sum_nr_running)
5919 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5920 else if (busiest->load_per_task > local->load_per_task)
5923 scaled_busy_load_per_task =
5924 (busiest->load_per_task * SCHED_POWER_SCALE) /
5925 busiest->group_power;
5927 if (busiest->avg_load + scaled_busy_load_per_task >=
5928 local->avg_load + (scaled_busy_load_per_task * imbn)) {
5929 env->imbalance = busiest->load_per_task;
5934 * OK, we don't have enough imbalance to justify moving tasks,
5935 * however we may be able to increase total CPU power used by
5939 pwr_now += busiest->group_power *
5940 min(busiest->load_per_task, busiest->avg_load);
5941 pwr_now += local->group_power *
5942 min(local->load_per_task, local->avg_load);
5943 pwr_now /= SCHED_POWER_SCALE;
5945 /* Amount of load we'd subtract */
5946 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
5947 busiest->group_power;
5948 if (busiest->avg_load > tmp) {
5949 pwr_move += busiest->group_power *
5950 min(busiest->load_per_task,
5951 busiest->avg_load - tmp);
5954 /* Amount of load we'd add */
5955 if (busiest->avg_load * busiest->group_power <
5956 busiest->load_per_task * SCHED_POWER_SCALE) {
5957 tmp = (busiest->avg_load * busiest->group_power) /
5960 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
5963 pwr_move += local->group_power *
5964 min(local->load_per_task, local->avg_load + tmp);
5965 pwr_move /= SCHED_POWER_SCALE;
5967 /* Move if we gain throughput */
5968 if (pwr_move > pwr_now)
5969 env->imbalance = busiest->load_per_task;
5973 * calculate_imbalance - Calculate the amount of imbalance present within the
5974 * groups of a given sched_domain during load balance.
5975 * @env: load balance environment
5976 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
5978 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
5980 unsigned long max_pull, load_above_capacity = ~0UL;
5981 struct sg_lb_stats *local, *busiest;
5983 local = &sds->local_stat;
5984 busiest = &sds->busiest_stat;
5986 if (busiest->group_imb) {
5988 * In the group_imb case we cannot rely on group-wide averages
5989 * to ensure cpu-load equilibrium, look at wider averages. XXX
5991 busiest->load_per_task =
5992 min(busiest->load_per_task, sds->avg_load);
5996 * In the presence of smp nice balancing, certain scenarios can have
5997 * max load less than avg load(as we skip the groups at or below
5998 * its cpu_power, while calculating max_load..)
6000 if (busiest->avg_load <= sds->avg_load ||
6001 local->avg_load >= sds->avg_load) {
6003 return fix_small_imbalance(env, sds);
6006 if (!busiest->group_imb) {
6008 * Don't want to pull so many tasks that a group would go idle.
6009 * Except of course for the group_imb case, since then we might
6010 * have to drop below capacity to reach cpu-load equilibrium.
6012 load_above_capacity =
6013 (busiest->sum_nr_running - busiest->group_capacity);
6015 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
6016 load_above_capacity /= busiest->group_power;
6020 * We're trying to get all the cpus to the average_load, so we don't
6021 * want to push ourselves above the average load, nor do we wish to
6022 * reduce the max loaded cpu below the average load. At the same time,
6023 * we also don't want to reduce the group load below the group capacity
6024 * (so that we can implement power-savings policies etc). Thus we look
6025 * for the minimum possible imbalance.
6027 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6029 /* How much load to actually move to equalise the imbalance */
6030 env->imbalance = min(
6031 max_pull * busiest->group_power,
6032 (sds->avg_load - local->avg_load) * local->group_power
6033 ) / SCHED_POWER_SCALE;
6036 * if *imbalance is less than the average load per runnable task
6037 * there is no guarantee that any tasks will be moved so we'll have
6038 * a think about bumping its value to force at least one task to be
6041 if (env->imbalance < busiest->load_per_task)
6042 return fix_small_imbalance(env, sds);
6045 /******* find_busiest_group() helpers end here *********************/
6048 * find_busiest_group - Returns the busiest group within the sched_domain
6049 * if there is an imbalance. If there isn't an imbalance, and
6050 * the user has opted for power-savings, it returns a group whose
6051 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6052 * such a group exists.
6054 * Also calculates the amount of weighted load which should be moved
6055 * to restore balance.
6057 * @env: The load balancing environment.
6059 * Return: - The busiest group if imbalance exists.
6060 * - If no imbalance and user has opted for power-savings balance,
6061 * return the least loaded group whose CPUs can be
6062 * put to idle by rebalancing its tasks onto our group.
6064 static struct sched_group *find_busiest_group(struct lb_env *env)
6066 struct sg_lb_stats *local, *busiest;
6067 struct sd_lb_stats sds;
6069 init_sd_lb_stats(&sds);
6072 * Compute the various statistics relavent for load balancing at
6075 update_sd_lb_stats(env, &sds);
6076 local = &sds.local_stat;
6077 busiest = &sds.busiest_stat;
6079 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6080 check_asym_packing(env, &sds))
6083 /* There is no busy sibling group to pull tasks from */
6084 if (!sds.busiest || busiest->sum_nr_running == 0)
6087 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
6090 * If the busiest group is imbalanced the below checks don't
6091 * work because they assume all things are equal, which typically
6092 * isn't true due to cpus_allowed constraints and the like.
6094 if (busiest->group_imb)
6097 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6098 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
6099 !busiest->group_has_capacity)
6103 * If the local group is more busy than the selected busiest group
6104 * don't try and pull any tasks.
6106 if (local->avg_load >= busiest->avg_load)
6110 * Don't pull any tasks if this group is already above the domain
6113 if (local->avg_load >= sds.avg_load)
6116 if (env->idle == CPU_IDLE) {
6118 * This cpu is idle. If the busiest group load doesn't
6119 * have more tasks than the number of available cpu's and
6120 * there is no imbalance between this and busiest group
6121 * wrt to idle cpu's, it is balanced.
6123 if ((local->idle_cpus < busiest->idle_cpus) &&
6124 busiest->sum_nr_running <= busiest->group_weight)
6128 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6129 * imbalance_pct to be conservative.
6131 if (100 * busiest->avg_load <=
6132 env->sd->imbalance_pct * local->avg_load)
6137 /* Looks like there is an imbalance. Compute it */
6138 calculate_imbalance(env, &sds);
6147 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6149 static struct rq *find_busiest_queue(struct lb_env *env,
6150 struct sched_group *group)
6152 struct rq *busiest = NULL, *rq;
6153 unsigned long busiest_load = 0, busiest_power = 1;
6156 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6157 unsigned long power, capacity, wl;
6161 rt = fbq_classify_rq(rq);
6164 * We classify groups/runqueues into three groups:
6165 * - regular: there are !numa tasks
6166 * - remote: there are numa tasks that run on the 'wrong' node
6167 * - all: there is no distinction
6169 * In order to avoid migrating ideally placed numa tasks,
6170 * ignore those when there's better options.
6172 * If we ignore the actual busiest queue to migrate another
6173 * task, the next balance pass can still reduce the busiest
6174 * queue by moving tasks around inside the node.
6176 * If we cannot move enough load due to this classification
6177 * the next pass will adjust the group classification and
6178 * allow migration of more tasks.
6180 * Both cases only affect the total convergence complexity.
6182 if (rt > env->fbq_type)
6185 power = power_of(i);
6186 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
6188 capacity = fix_small_capacity(env->sd, group);
6190 wl = weighted_cpuload(i);
6193 * When comparing with imbalance, use weighted_cpuload()
6194 * which is not scaled with the cpu power.
6196 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
6200 * For the load comparisons with the other cpu's, consider
6201 * the weighted_cpuload() scaled with the cpu power, so that
6202 * the load can be moved away from the cpu that is potentially
6203 * running at a lower capacity.
6205 * Thus we're looking for max(wl_i / power_i), crosswise
6206 * multiplication to rid ourselves of the division works out
6207 * to: wl_i * power_j > wl_j * power_i; where j is our
6210 if (wl * busiest_power > busiest_load * power) {
6212 busiest_power = power;
6221 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6222 * so long as it is large enough.
6224 #define MAX_PINNED_INTERVAL 512
6226 /* Working cpumask for load_balance and load_balance_newidle. */
6227 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6229 static int need_active_balance(struct lb_env *env)
6231 struct sched_domain *sd = env->sd;
6233 if (env->idle == CPU_NEWLY_IDLE) {
6236 * ASYM_PACKING needs to force migrate tasks from busy but
6237 * higher numbered CPUs in order to pack all tasks in the
6238 * lowest numbered CPUs.
6240 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6244 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6247 static int active_load_balance_cpu_stop(void *data);
6249 static int should_we_balance(struct lb_env *env)
6251 struct sched_group *sg = env->sd->groups;
6252 struct cpumask *sg_cpus, *sg_mask;
6253 int cpu, balance_cpu = -1;
6256 * In the newly idle case, we will allow all the cpu's
6257 * to do the newly idle load balance.
6259 if (env->idle == CPU_NEWLY_IDLE)
6262 sg_cpus = sched_group_cpus(sg);
6263 sg_mask = sched_group_mask(sg);
6264 /* Try to find first idle cpu */
6265 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6266 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6273 if (balance_cpu == -1)
6274 balance_cpu = group_balance_cpu(sg);
6277 * First idle cpu or the first cpu(busiest) in this sched group
6278 * is eligible for doing load balancing at this and above domains.
6280 return balance_cpu == env->dst_cpu;
6284 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6285 * tasks if there is an imbalance.
6287 static int load_balance(int this_cpu, struct rq *this_rq,
6288 struct sched_domain *sd, enum cpu_idle_type idle,
6289 int *continue_balancing)
6291 int ld_moved, cur_ld_moved, active_balance = 0;
6292 struct sched_domain *sd_parent = sd->parent;
6293 struct sched_group *group;
6295 unsigned long flags;
6296 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
6298 struct lb_env env = {
6300 .dst_cpu = this_cpu,
6302 .dst_grpmask = sched_group_cpus(sd->groups),
6304 .loop_break = sched_nr_migrate_break,
6310 * For NEWLY_IDLE load_balancing, we don't need to consider
6311 * other cpus in our group
6313 if (idle == CPU_NEWLY_IDLE)
6314 env.dst_grpmask = NULL;
6316 cpumask_copy(cpus, cpu_active_mask);
6318 schedstat_inc(sd, lb_count[idle]);
6321 if (!should_we_balance(&env)) {
6322 *continue_balancing = 0;
6326 group = find_busiest_group(&env);
6328 schedstat_inc(sd, lb_nobusyg[idle]);
6332 busiest = find_busiest_queue(&env, group);
6334 schedstat_inc(sd, lb_nobusyq[idle]);
6338 BUG_ON(busiest == env.dst_rq);
6340 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6343 if (busiest->nr_running > 1) {
6345 * Attempt to move tasks. If find_busiest_group has found
6346 * an imbalance but busiest->nr_running <= 1, the group is
6347 * still unbalanced. ld_moved simply stays zero, so it is
6348 * correctly treated as an imbalance.
6350 env.flags |= LBF_ALL_PINNED;
6351 env.src_cpu = busiest->cpu;
6352 env.src_rq = busiest;
6353 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
6356 local_irq_save(flags);
6357 double_rq_lock(env.dst_rq, busiest);
6360 * cur_ld_moved - load moved in current iteration
6361 * ld_moved - cumulative load moved across iterations
6363 cur_ld_moved = move_tasks(&env);
6364 ld_moved += cur_ld_moved;
6365 double_rq_unlock(env.dst_rq, busiest);
6366 local_irq_restore(flags);
6369 * some other cpu did the load balance for us.
6371 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6372 resched_cpu(env.dst_cpu);
6374 if (env.flags & LBF_NEED_BREAK) {
6375 env.flags &= ~LBF_NEED_BREAK;
6380 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6381 * us and move them to an alternate dst_cpu in our sched_group
6382 * where they can run. The upper limit on how many times we
6383 * iterate on same src_cpu is dependent on number of cpus in our
6386 * This changes load balance semantics a bit on who can move
6387 * load to a given_cpu. In addition to the given_cpu itself
6388 * (or a ilb_cpu acting on its behalf where given_cpu is
6389 * nohz-idle), we now have balance_cpu in a position to move
6390 * load to given_cpu. In rare situations, this may cause
6391 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6392 * _independently_ and at _same_ time to move some load to
6393 * given_cpu) causing exceess load to be moved to given_cpu.
6394 * This however should not happen so much in practice and
6395 * moreover subsequent load balance cycles should correct the
6396 * excess load moved.
6398 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
6400 /* Prevent to re-select dst_cpu via env's cpus */
6401 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6403 env.dst_rq = cpu_rq(env.new_dst_cpu);
6404 env.dst_cpu = env.new_dst_cpu;
6405 env.flags &= ~LBF_DST_PINNED;
6407 env.loop_break = sched_nr_migrate_break;
6410 * Go back to "more_balance" rather than "redo" since we
6411 * need to continue with same src_cpu.
6417 * We failed to reach balance because of affinity.
6420 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6422 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6423 *group_imbalance = 1;
6424 } else if (*group_imbalance)
6425 *group_imbalance = 0;
6428 /* All tasks on this runqueue were pinned by CPU affinity */
6429 if (unlikely(env.flags & LBF_ALL_PINNED)) {
6430 cpumask_clear_cpu(cpu_of(busiest), cpus);
6431 if (!cpumask_empty(cpus)) {
6433 env.loop_break = sched_nr_migrate_break;
6441 schedstat_inc(sd, lb_failed[idle]);
6443 * Increment the failure counter only on periodic balance.
6444 * We do not want newidle balance, which can be very
6445 * frequent, pollute the failure counter causing
6446 * excessive cache_hot migrations and active balances.
6448 if (idle != CPU_NEWLY_IDLE)
6449 sd->nr_balance_failed++;
6451 if (need_active_balance(&env)) {
6452 raw_spin_lock_irqsave(&busiest->lock, flags);
6454 /* don't kick the active_load_balance_cpu_stop,
6455 * if the curr task on busiest cpu can't be
6458 if (!cpumask_test_cpu(this_cpu,
6459 tsk_cpus_allowed(busiest->curr))) {
6460 raw_spin_unlock_irqrestore(&busiest->lock,
6462 env.flags |= LBF_ALL_PINNED;
6463 goto out_one_pinned;
6467 * ->active_balance synchronizes accesses to
6468 * ->active_balance_work. Once set, it's cleared
6469 * only after active load balance is finished.
6471 if (!busiest->active_balance) {
6472 busiest->active_balance = 1;
6473 busiest->push_cpu = this_cpu;
6476 raw_spin_unlock_irqrestore(&busiest->lock, flags);
6478 if (active_balance) {
6479 stop_one_cpu_nowait(cpu_of(busiest),
6480 active_load_balance_cpu_stop, busiest,
6481 &busiest->active_balance_work);
6485 * We've kicked active balancing, reset the failure
6488 sd->nr_balance_failed = sd->cache_nice_tries+1;
6491 sd->nr_balance_failed = 0;
6493 if (likely(!active_balance)) {
6494 /* We were unbalanced, so reset the balancing interval */
6495 sd->balance_interval = sd->min_interval;
6498 * If we've begun active balancing, start to back off. This
6499 * case may not be covered by the all_pinned logic if there
6500 * is only 1 task on the busy runqueue (because we don't call
6503 if (sd->balance_interval < sd->max_interval)
6504 sd->balance_interval *= 2;
6510 schedstat_inc(sd, lb_balanced[idle]);
6512 sd->nr_balance_failed = 0;
6515 /* tune up the balancing interval */
6516 if (((env.flags & LBF_ALL_PINNED) &&
6517 sd->balance_interval < MAX_PINNED_INTERVAL) ||
6518 (sd->balance_interval < sd->max_interval))
6519 sd->balance_interval *= 2;
6527 * idle_balance is called by schedule() if this_cpu is about to become
6528 * idle. Attempts to pull tasks from other CPUs.
6530 void idle_balance(int this_cpu, struct rq *this_rq)
6532 struct sched_domain *sd;
6533 int pulled_task = 0;
6534 unsigned long next_balance = jiffies + HZ;
6537 this_rq->idle_stamp = rq_clock(this_rq);
6539 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6543 * Drop the rq->lock, but keep IRQ/preempt disabled.
6545 raw_spin_unlock(&this_rq->lock);
6547 update_blocked_averages(this_cpu);
6549 for_each_domain(this_cpu, sd) {
6550 unsigned long interval;
6551 int continue_balancing = 1;
6552 u64 t0, domain_cost;
6554 if (!(sd->flags & SD_LOAD_BALANCE))
6557 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6560 if (sd->flags & SD_BALANCE_NEWIDLE) {
6561 t0 = sched_clock_cpu(this_cpu);
6563 /* If we've pulled tasks over stop searching: */
6564 pulled_task = load_balance(this_cpu, this_rq,
6566 &continue_balancing);
6568 domain_cost = sched_clock_cpu(this_cpu) - t0;
6569 if (domain_cost > sd->max_newidle_lb_cost)
6570 sd->max_newidle_lb_cost = domain_cost;
6572 curr_cost += domain_cost;
6575 interval = msecs_to_jiffies(sd->balance_interval);
6576 if (time_after(next_balance, sd->last_balance + interval))
6577 next_balance = sd->last_balance + interval;
6579 this_rq->idle_stamp = 0;
6585 raw_spin_lock(&this_rq->lock);
6587 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6589 * We are going idle. next_balance may be set based on
6590 * a busy processor. So reset next_balance.
6592 this_rq->next_balance = next_balance;
6595 if (curr_cost > this_rq->max_idle_balance_cost)
6596 this_rq->max_idle_balance_cost = curr_cost;
6600 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6601 * running tasks off the busiest CPU onto idle CPUs. It requires at
6602 * least 1 task to be running on each physical CPU where possible, and
6603 * avoids physical / logical imbalances.
6605 static int active_load_balance_cpu_stop(void *data)
6607 struct rq *busiest_rq = data;
6608 int busiest_cpu = cpu_of(busiest_rq);
6609 int target_cpu = busiest_rq->push_cpu;
6610 struct rq *target_rq = cpu_rq(target_cpu);
6611 struct sched_domain *sd;
6613 raw_spin_lock_irq(&busiest_rq->lock);
6615 /* make sure the requested cpu hasn't gone down in the meantime */
6616 if (unlikely(busiest_cpu != smp_processor_id() ||
6617 !busiest_rq->active_balance))
6620 /* Is there any task to move? */
6621 if (busiest_rq->nr_running <= 1)
6625 * This condition is "impossible", if it occurs
6626 * we need to fix it. Originally reported by
6627 * Bjorn Helgaas on a 128-cpu setup.
6629 BUG_ON(busiest_rq == target_rq);
6631 /* move a task from busiest_rq to target_rq */
6632 double_lock_balance(busiest_rq, target_rq);
6634 /* Search for an sd spanning us and the target CPU. */
6636 for_each_domain(target_cpu, sd) {
6637 if ((sd->flags & SD_LOAD_BALANCE) &&
6638 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6643 struct lb_env env = {
6645 .dst_cpu = target_cpu,
6646 .dst_rq = target_rq,
6647 .src_cpu = busiest_rq->cpu,
6648 .src_rq = busiest_rq,
6652 schedstat_inc(sd, alb_count);
6654 if (move_one_task(&env))
6655 schedstat_inc(sd, alb_pushed);
6657 schedstat_inc(sd, alb_failed);
6660 double_unlock_balance(busiest_rq, target_rq);
6662 busiest_rq->active_balance = 0;
6663 raw_spin_unlock_irq(&busiest_rq->lock);
6667 #ifdef CONFIG_NO_HZ_COMMON
6669 * idle load balancing details
6670 * - When one of the busy CPUs notice that there may be an idle rebalancing
6671 * needed, they will kick the idle load balancer, which then does idle
6672 * load balancing for all the idle CPUs.
6675 cpumask_var_t idle_cpus_mask;
6677 unsigned long next_balance; /* in jiffy units */
6678 } nohz ____cacheline_aligned;
6680 static inline int find_new_ilb(void)
6682 int ilb = cpumask_first(nohz.idle_cpus_mask);
6684 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6691 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6692 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6693 * CPU (if there is one).
6695 static void nohz_balancer_kick(void)
6699 nohz.next_balance++;
6701 ilb_cpu = find_new_ilb();
6703 if (ilb_cpu >= nr_cpu_ids)
6706 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
6709 * Use smp_send_reschedule() instead of resched_cpu().
6710 * This way we generate a sched IPI on the target cpu which
6711 * is idle. And the softirq performing nohz idle load balance
6712 * will be run before returning from the IPI.
6714 smp_send_reschedule(ilb_cpu);
6718 static inline void nohz_balance_exit_idle(int cpu)
6720 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6721 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6722 atomic_dec(&nohz.nr_cpus);
6723 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6727 static inline void set_cpu_sd_state_busy(void)
6729 struct sched_domain *sd;
6730 int cpu = smp_processor_id();
6733 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6735 if (!sd || !sd->nohz_idle)
6739 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6744 void set_cpu_sd_state_idle(void)
6746 struct sched_domain *sd;
6747 int cpu = smp_processor_id();
6750 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6752 if (!sd || sd->nohz_idle)
6756 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6762 * This routine will record that the cpu is going idle with tick stopped.
6763 * This info will be used in performing idle load balancing in the future.
6765 void nohz_balance_enter_idle(int cpu)
6768 * If this cpu is going down, then nothing needs to be done.
6770 if (!cpu_active(cpu))
6773 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6776 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6777 atomic_inc(&nohz.nr_cpus);
6778 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6781 static int sched_ilb_notifier(struct notifier_block *nfb,
6782 unsigned long action, void *hcpu)
6784 switch (action & ~CPU_TASKS_FROZEN) {
6786 nohz_balance_exit_idle(smp_processor_id());
6794 static DEFINE_SPINLOCK(balancing);
6797 * Scale the max load_balance interval with the number of CPUs in the system.
6798 * This trades load-balance latency on larger machines for less cross talk.
6800 void update_max_interval(void)
6802 max_load_balance_interval = HZ*num_online_cpus()/10;
6806 * It checks each scheduling domain to see if it is due to be balanced,
6807 * and initiates a balancing operation if so.
6809 * Balancing parameters are set up in init_sched_domains.
6811 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
6813 int continue_balancing = 1;
6815 unsigned long interval;
6816 struct sched_domain *sd;
6817 /* Earliest time when we have to do rebalance again */
6818 unsigned long next_balance = jiffies + 60*HZ;
6819 int update_next_balance = 0;
6820 int need_serialize, need_decay = 0;
6823 update_blocked_averages(cpu);
6826 for_each_domain(cpu, sd) {
6828 * Decay the newidle max times here because this is a regular
6829 * visit to all the domains. Decay ~1% per second.
6831 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6832 sd->max_newidle_lb_cost =
6833 (sd->max_newidle_lb_cost * 253) / 256;
6834 sd->next_decay_max_lb_cost = jiffies + HZ;
6837 max_cost += sd->max_newidle_lb_cost;
6839 if (!(sd->flags & SD_LOAD_BALANCE))
6843 * Stop the load balance at this level. There is another
6844 * CPU in our sched group which is doing load balancing more
6847 if (!continue_balancing) {
6853 interval = sd->balance_interval;
6854 if (idle != CPU_IDLE)
6855 interval *= sd->busy_factor;
6857 /* scale ms to jiffies */
6858 interval = msecs_to_jiffies(interval);
6859 interval = clamp(interval, 1UL, max_load_balance_interval);
6861 need_serialize = sd->flags & SD_SERIALIZE;
6863 if (need_serialize) {
6864 if (!spin_trylock(&balancing))
6868 if (time_after_eq(jiffies, sd->last_balance + interval)) {
6869 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
6871 * The LBF_DST_PINNED logic could have changed
6872 * env->dst_cpu, so we can't know our idle
6873 * state even if we migrated tasks. Update it.
6875 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
6877 sd->last_balance = jiffies;
6880 spin_unlock(&balancing);
6882 if (time_after(next_balance, sd->last_balance + interval)) {
6883 next_balance = sd->last_balance + interval;
6884 update_next_balance = 1;
6889 * Ensure the rq-wide value also decays but keep it at a
6890 * reasonable floor to avoid funnies with rq->avg_idle.
6892 rq->max_idle_balance_cost =
6893 max((u64)sysctl_sched_migration_cost, max_cost);
6898 * next_balance will be updated only when there is a need.
6899 * When the cpu is attached to null domain for ex, it will not be
6902 if (likely(update_next_balance))
6903 rq->next_balance = next_balance;
6906 #ifdef CONFIG_NO_HZ_COMMON
6908 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
6909 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6911 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
6913 int this_cpu = this_rq->cpu;
6917 if (idle != CPU_IDLE ||
6918 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6921 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
6922 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
6926 * If this cpu gets work to do, stop the load balancing
6927 * work being done for other cpus. Next load
6928 * balancing owner will pick it up.
6933 rq = cpu_rq(balance_cpu);
6935 raw_spin_lock_irq(&rq->lock);
6936 update_rq_clock(rq);
6937 update_idle_cpu_load(rq);
6938 raw_spin_unlock_irq(&rq->lock);
6940 rebalance_domains(rq, CPU_IDLE);
6942 if (time_after(this_rq->next_balance, rq->next_balance))
6943 this_rq->next_balance = rq->next_balance;
6945 nohz.next_balance = this_rq->next_balance;
6947 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
6951 * Current heuristic for kicking the idle load balancer in the presence
6952 * of an idle cpu is the system.
6953 * - This rq has more than one task.
6954 * - At any scheduler domain level, this cpu's scheduler group has multiple
6955 * busy cpu's exceeding the group's power.
6956 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6957 * domain span are idle.
6959 static inline int nohz_kick_needed(struct rq *rq)
6961 unsigned long now = jiffies;
6962 struct sched_domain *sd;
6963 struct sched_group_power *sgp;
6964 int nr_busy, cpu = rq->cpu;
6966 if (unlikely(rq->idle_balance))
6970 * We may be recently in ticked or tickless idle mode. At the first
6971 * busy tick after returning from idle, we will update the busy stats.
6973 set_cpu_sd_state_busy();
6974 nohz_balance_exit_idle(cpu);
6977 * None are in tickless mode and hence no need for NOHZ idle load
6980 if (likely(!atomic_read(&nohz.nr_cpus)))
6983 if (time_before(now, nohz.next_balance))
6986 if (rq->nr_running >= 2)
6990 sd = rcu_dereference(per_cpu(sd_busy, cpu));
6993 sgp = sd->groups->sgp;
6994 nr_busy = atomic_read(&sgp->nr_busy_cpus);
6997 goto need_kick_unlock;
7000 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7002 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7003 sched_domain_span(sd)) < cpu))
7004 goto need_kick_unlock;
7015 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7019 * run_rebalance_domains is triggered when needed from the scheduler tick.
7020 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7022 static void run_rebalance_domains(struct softirq_action *h)
7024 struct rq *this_rq = this_rq();
7025 enum cpu_idle_type idle = this_rq->idle_balance ?
7026 CPU_IDLE : CPU_NOT_IDLE;
7028 rebalance_domains(this_rq, idle);
7031 * If this cpu has a pending nohz_balance_kick, then do the
7032 * balancing on behalf of the other idle cpus whose ticks are
7035 nohz_idle_balance(this_rq, idle);
7038 static inline int on_null_domain(struct rq *rq)
7040 return !rcu_dereference_sched(rq->sd);
7044 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
7046 void trigger_load_balance(struct rq *rq)
7048 /* Don't need to rebalance while attached to NULL domain */
7049 if (unlikely(on_null_domain(rq)))
7052 if (time_after_eq(jiffies, rq->next_balance))
7053 raise_softirq(SCHED_SOFTIRQ);
7054 #ifdef CONFIG_NO_HZ_COMMON
7055 if (nohz_kick_needed(rq))
7056 nohz_balancer_kick();
7060 static void rq_online_fair(struct rq *rq)
7065 static void rq_offline_fair(struct rq *rq)
7069 /* Ensure any throttled groups are reachable by pick_next_task */
7070 unthrottle_offline_cfs_rqs(rq);
7073 #endif /* CONFIG_SMP */
7076 * scheduler tick hitting a task of our scheduling class:
7078 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7080 struct cfs_rq *cfs_rq;
7081 struct sched_entity *se = &curr->se;
7083 for_each_sched_entity(se) {
7084 cfs_rq = cfs_rq_of(se);
7085 entity_tick(cfs_rq, se, queued);
7088 if (numabalancing_enabled)
7089 task_tick_numa(rq, curr);
7091 update_rq_runnable_avg(rq, 1);
7095 * called on fork with the child task as argument from the parent's context
7096 * - child not yet on the tasklist
7097 * - preemption disabled
7099 static void task_fork_fair(struct task_struct *p)
7101 struct cfs_rq *cfs_rq;
7102 struct sched_entity *se = &p->se, *curr;
7103 int this_cpu = smp_processor_id();
7104 struct rq *rq = this_rq();
7105 unsigned long flags;
7107 raw_spin_lock_irqsave(&rq->lock, flags);
7109 update_rq_clock(rq);
7111 cfs_rq = task_cfs_rq(current);
7112 curr = cfs_rq->curr;
7115 * Not only the cpu but also the task_group of the parent might have
7116 * been changed after parent->se.parent,cfs_rq were copied to
7117 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7118 * of child point to valid ones.
7121 __set_task_cpu(p, this_cpu);
7124 update_curr(cfs_rq);
7127 se->vruntime = curr->vruntime;
7128 place_entity(cfs_rq, se, 1);
7130 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
7132 * Upon rescheduling, sched_class::put_prev_task() will place
7133 * 'current' within the tree based on its new key value.
7135 swap(curr->vruntime, se->vruntime);
7136 resched_task(rq->curr);
7139 se->vruntime -= cfs_rq->min_vruntime;
7141 raw_spin_unlock_irqrestore(&rq->lock, flags);
7145 * Priority of the task has changed. Check to see if we preempt
7149 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7155 * Reschedule if we are currently running on this runqueue and
7156 * our priority decreased, or if we are not currently running on
7157 * this runqueue and our priority is higher than the current's
7159 if (rq->curr == p) {
7160 if (p->prio > oldprio)
7161 resched_task(rq->curr);
7163 check_preempt_curr(rq, p, 0);
7166 static void switched_from_fair(struct rq *rq, struct task_struct *p)
7168 struct sched_entity *se = &p->se;
7169 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7172 * Ensure the task's vruntime is normalized, so that when its
7173 * switched back to the fair class the enqueue_entity(.flags=0) will
7174 * do the right thing.
7176 * If it was on_rq, then the dequeue_entity(.flags=0) will already
7177 * have normalized the vruntime, if it was !on_rq, then only when
7178 * the task is sleeping will it still have non-normalized vruntime.
7180 if (!se->on_rq && p->state != TASK_RUNNING) {
7182 * Fix up our vruntime so that the current sleep doesn't
7183 * cause 'unlimited' sleep bonus.
7185 place_entity(cfs_rq, se, 0);
7186 se->vruntime -= cfs_rq->min_vruntime;
7191 * Remove our load from contribution when we leave sched_fair
7192 * and ensure we don't carry in an old decay_count if we
7195 if (se->avg.decay_count) {
7196 __synchronize_entity_decay(se);
7197 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7203 * We switched to the sched_fair class.
7205 static void switched_to_fair(struct rq *rq, struct task_struct *p)
7211 * We were most likely switched from sched_rt, so
7212 * kick off the schedule if running, otherwise just see
7213 * if we can still preempt the current task.
7216 resched_task(rq->curr);
7218 check_preempt_curr(rq, p, 0);
7221 /* Account for a task changing its policy or group.
7223 * This routine is mostly called to set cfs_rq->curr field when a task
7224 * migrates between groups/classes.
7226 static void set_curr_task_fair(struct rq *rq)
7228 struct sched_entity *se = &rq->curr->se;
7230 for_each_sched_entity(se) {
7231 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7233 set_next_entity(cfs_rq, se);
7234 /* ensure bandwidth has been allocated on our new cfs_rq */
7235 account_cfs_rq_runtime(cfs_rq, 0);
7239 void init_cfs_rq(struct cfs_rq *cfs_rq)
7241 cfs_rq->tasks_timeline = RB_ROOT;
7242 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7243 #ifndef CONFIG_64BIT
7244 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7247 atomic64_set(&cfs_rq->decay_counter, 1);
7248 atomic_long_set(&cfs_rq->removed_load, 0);
7252 #ifdef CONFIG_FAIR_GROUP_SCHED
7253 static void task_move_group_fair(struct task_struct *p, int on_rq)
7255 struct cfs_rq *cfs_rq;
7257 * If the task was not on the rq at the time of this cgroup movement
7258 * it must have been asleep, sleeping tasks keep their ->vruntime
7259 * absolute on their old rq until wakeup (needed for the fair sleeper
7260 * bonus in place_entity()).
7262 * If it was on the rq, we've just 'preempted' it, which does convert
7263 * ->vruntime to a relative base.
7265 * Make sure both cases convert their relative position when migrating
7266 * to another cgroup's rq. This does somewhat interfere with the
7267 * fair sleeper stuff for the first placement, but who cares.
7270 * When !on_rq, vruntime of the task has usually NOT been normalized.
7271 * But there are some cases where it has already been normalized:
7273 * - Moving a forked child which is waiting for being woken up by
7274 * wake_up_new_task().
7275 * - Moving a task which has been woken up by try_to_wake_up() and
7276 * waiting for actually being woken up by sched_ttwu_pending().
7278 * To prevent boost or penalty in the new cfs_rq caused by delta
7279 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7281 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
7285 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
7286 set_task_rq(p, task_cpu(p));
7288 cfs_rq = cfs_rq_of(&p->se);
7289 p->se.vruntime += cfs_rq->min_vruntime;
7292 * migrate_task_rq_fair() will have removed our previous
7293 * contribution, but we must synchronize for ongoing future
7296 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7297 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
7302 void free_fair_sched_group(struct task_group *tg)
7306 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7308 for_each_possible_cpu(i) {
7310 kfree(tg->cfs_rq[i]);
7319 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7321 struct cfs_rq *cfs_rq;
7322 struct sched_entity *se;
7325 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7328 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7332 tg->shares = NICE_0_LOAD;
7334 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7336 for_each_possible_cpu(i) {
7337 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7338 GFP_KERNEL, cpu_to_node(i));
7342 se = kzalloc_node(sizeof(struct sched_entity),
7343 GFP_KERNEL, cpu_to_node(i));
7347 init_cfs_rq(cfs_rq);
7348 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7359 void unregister_fair_sched_group(struct task_group *tg, int cpu)
7361 struct rq *rq = cpu_rq(cpu);
7362 unsigned long flags;
7365 * Only empty task groups can be destroyed; so we can speculatively
7366 * check on_list without danger of it being re-added.
7368 if (!tg->cfs_rq[cpu]->on_list)
7371 raw_spin_lock_irqsave(&rq->lock, flags);
7372 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7373 raw_spin_unlock_irqrestore(&rq->lock, flags);
7376 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7377 struct sched_entity *se, int cpu,
7378 struct sched_entity *parent)
7380 struct rq *rq = cpu_rq(cpu);
7384 init_cfs_rq_runtime(cfs_rq);
7386 tg->cfs_rq[cpu] = cfs_rq;
7389 /* se could be NULL for root_task_group */
7394 se->cfs_rq = &rq->cfs;
7396 se->cfs_rq = parent->my_q;
7399 /* guarantee group entities always have weight */
7400 update_load_set(&se->load, NICE_0_LOAD);
7401 se->parent = parent;
7404 static DEFINE_MUTEX(shares_mutex);
7406 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7409 unsigned long flags;
7412 * We can't change the weight of the root cgroup.
7417 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7419 mutex_lock(&shares_mutex);
7420 if (tg->shares == shares)
7423 tg->shares = shares;
7424 for_each_possible_cpu(i) {
7425 struct rq *rq = cpu_rq(i);
7426 struct sched_entity *se;
7429 /* Propagate contribution to hierarchy */
7430 raw_spin_lock_irqsave(&rq->lock, flags);
7432 /* Possible calls to update_curr() need rq clock */
7433 update_rq_clock(rq);
7434 for_each_sched_entity(se)
7435 update_cfs_shares(group_cfs_rq(se));
7436 raw_spin_unlock_irqrestore(&rq->lock, flags);
7440 mutex_unlock(&shares_mutex);
7443 #else /* CONFIG_FAIR_GROUP_SCHED */
7445 void free_fair_sched_group(struct task_group *tg) { }
7447 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7452 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7454 #endif /* CONFIG_FAIR_GROUP_SCHED */
7457 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
7459 struct sched_entity *se = &task->se;
7460 unsigned int rr_interval = 0;
7463 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7466 if (rq->cfs.load.weight)
7467 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
7473 * All the scheduling class methods:
7475 const struct sched_class fair_sched_class = {
7476 .next = &idle_sched_class,
7477 .enqueue_task = enqueue_task_fair,
7478 .dequeue_task = dequeue_task_fair,
7479 .yield_task = yield_task_fair,
7480 .yield_to_task = yield_to_task_fair,
7482 .check_preempt_curr = check_preempt_wakeup,
7484 .pick_next_task = pick_next_task_fair,
7485 .put_prev_task = put_prev_task_fair,
7488 .select_task_rq = select_task_rq_fair,
7489 .migrate_task_rq = migrate_task_rq_fair,
7491 .rq_online = rq_online_fair,
7492 .rq_offline = rq_offline_fair,
7494 .task_waking = task_waking_fair,
7497 .set_curr_task = set_curr_task_fair,
7498 .task_tick = task_tick_fair,
7499 .task_fork = task_fork_fair,
7501 .prio_changed = prio_changed_fair,
7502 .switched_from = switched_from_fair,
7503 .switched_to = switched_to_fair,
7505 .get_rr_interval = get_rr_interval_fair,
7507 #ifdef CONFIG_FAIR_GROUP_SCHED
7508 .task_move_group = task_move_group_fair,
7512 #ifdef CONFIG_SCHED_DEBUG
7513 void print_cfs_stats(struct seq_file *m, int cpu)
7515 struct cfs_rq *cfs_rq;
7518 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
7519 print_cfs_rq(m, cpu, cfs_rq);
7524 __init void init_sched_fair_class(void)
7527 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7529 #ifdef CONFIG_NO_HZ_COMMON
7530 nohz.next_balance = jiffies;
7531 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7532 cpu_notifier(sched_ilb_notifier, 0);