1 #include <linux/cgroup.h>
3 #include <linux/kernel.h>
4 #include <linux/percpu.h>
5 #include <linux/printk.h>
6 #include <linux/reciprocal_div.h>
7 #include <linux/rcupdate.h>
8 #include <linux/slab.h>
12 unsigned int sysctl_sched_cfs_boost __read_mostly;
15 * System energy normalization constants
17 static struct target_nrg {
18 unsigned long min_power;
19 unsigned long max_power;
20 struct reciprocal_value rdiv;
21 } schedtune_target_nrg;
23 /* Performance Boost region (B) threshold params */
24 static int perf_boost_idx;
26 /* Performance Constraint region (C) threshold params */
27 static int perf_constrain_idx;
30 * Performance-Energy (P-E) Space thresholds constants
32 struct threshold_params {
38 * System specific P-E space thresholds constants
40 static struct threshold_params
43 { 0, 4 }, /* >= 10% */
44 { 1, 4 }, /* >= 20% */
45 { 2, 4 }, /* >= 30% */
46 { 3, 4 }, /* >= 40% */
47 { 4, 3 }, /* >= 50% */
48 { 4, 2 }, /* >= 60% */
49 { 4, 1 }, /* >= 70% */
50 { 4, 0 }, /* >= 80% */
55 __schedtune_accept_deltas(int nrg_delta, int cap_delta,
56 int perf_boost_idx, int perf_constrain_idx)
58 int payoff = -INT_MAX;
60 /* Performance Boost (B) region */
61 if (nrg_delta > 0 && cap_delta > 0) {
63 * Evaluate "Performance Boost" vs "Energy Increase"
65 * cap_delta / nrg_delta < cap_gain / nrg_gain
67 * nrg_delta * cap_gain > cap_delta * nrg_gain
69 payoff = nrg_delta * threshold_gains[perf_boost_idx].cap_gain;
70 payoff -= cap_delta * threshold_gains[perf_boost_idx].nrg_gain;
74 /* Performance Constraint (C) region */
75 if (nrg_delta < 0 && cap_delta < 0) {
77 * Evaluate "Performance Boost" vs "Energy Increase"
79 * cap_delta / nrg_delta > cap_gain / nrg_gain
81 * cap_delta * nrg_gain > nrg_delta * cap_gain
83 payoff = cap_delta * threshold_gains[perf_constrain_idx].nrg_gain;
84 payoff -= nrg_delta * threshold_gains[perf_constrain_idx].cap_gain;
88 /* Default: reject schedule candidate */
92 #ifdef CONFIG_CGROUP_SCHEDTUNE
95 * EAS scheduler tunables for task groups.
98 /* SchdTune tunables for a group of tasks */
100 /* SchedTune CGroup subsystem */
101 struct cgroup_subsys_state css;
103 /* Boost group allocated ID */
106 /* Boost value for tasks on that SchedTune CGroup */
109 /* Performance Boost (B) region threshold params */
112 /* Performance Constraint (C) region threshold params */
113 int perf_constrain_idx;
116 static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
118 return css ? container_of(css, struct schedtune, css) : NULL;
121 static inline struct schedtune *task_schedtune(struct task_struct *tsk)
123 return css_st(task_css(tsk, schedtune_cgrp_id));
126 static inline struct schedtune *parent_st(struct schedtune *st)
128 return css_st(st->css.parent);
132 * SchedTune root control group
133 * The root control group is used to defined a system-wide boosting tuning,
134 * which is applied to all tasks in the system.
135 * Task specific boost tuning could be specified by creating and
136 * configuring a child control group under the root one.
137 * By default, system-wide boosting is disabled, i.e. no boosting is applied
138 * to tasks which are not into a child control group.
140 static struct schedtune
144 .perf_constrain_idx = 0,
148 schedtune_accept_deltas(int nrg_delta, int cap_delta,
149 struct task_struct *task)
151 struct schedtune *ct;
153 int perf_constrain_idx;
155 /* Optimal (O) region */
156 if (nrg_delta < 0 && cap_delta > 0)
159 /* Suboptimal (S) region */
160 if (nrg_delta > 0 && cap_delta < 0)
163 /* Get task specific perf Boost/Constraints indexes */
165 ct = task_schedtune(task);
166 perf_boost_idx = ct->perf_boost_idx;
167 perf_constrain_idx = ct->perf_constrain_idx;
170 return __schedtune_accept_deltas(nrg_delta, cap_delta,
171 perf_boost_idx, perf_constrain_idx);
175 * Maximum number of boost groups to support
176 * When per-task boosting is used we still allow only limited number of
177 * boost groups for two main reasons:
178 * 1. on a real system we usually have only few classes of workloads which
179 * make sense to boost with different values (e.g. background vs foreground
180 * tasks, interactive vs low-priority tasks)
181 * 2. a limited number allows for a simpler and more memory/time efficient
182 * implementation especially for the computation of the per-CPU boost
185 #define BOOSTGROUPS_COUNT 4
187 /* Array of configured boostgroups */
188 static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
193 /* SchedTune boost groups
194 * Keep track of all the boost groups which impact on CPU, for example when a
195 * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
196 * likely with different boost values.
197 * Since on each system we expect only a limited number of boost groups, here
198 * we use a simple array to keep track of the metrics required to compute the
199 * maximum per-CPU boosting value.
201 struct boost_groups {
202 /* Maximum boost value for all RUNNABLE tasks on a CPU */
205 /* The boost for tasks on that boost group */
207 /* Count of RUNNABLE tasks on that boost group */
209 } group[BOOSTGROUPS_COUNT];
212 /* Boost groups affecting each CPU in the system */
213 DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
216 schedtune_cpu_update(int cpu)
218 struct boost_groups *bg;
222 bg = &per_cpu(cpu_boost_groups, cpu);
224 /* The root boost group is always active */
225 boost_max = bg->group[0].boost;
226 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
228 * A boost group affects a CPU only if it has
229 * RUNNABLE tasks on that CPU
231 if (bg->group[idx].tasks == 0)
233 boost_max = max(boost_max, bg->group[idx].boost);
236 bg->boost_max = boost_max;
240 schedtune_boostgroup_update(int idx, int boost)
242 struct boost_groups *bg;
247 /* Update per CPU boost groups */
248 for_each_possible_cpu(cpu) {
249 bg = &per_cpu(cpu_boost_groups, cpu);
252 * Keep track of current boost values to compute the per CPU
253 * maximum only when it has been affected by the new value of
254 * the updated boost group
256 cur_boost_max = bg->boost_max;
257 old_boost = bg->group[idx].boost;
259 /* Update the boost value of this boost group */
260 bg->group[idx].boost = boost;
262 /* Check if this update increase current max */
263 if (boost > cur_boost_max && bg->group[idx].tasks) {
264 bg->boost_max = boost;
268 /* Check if this update has decreased current max */
269 if (cur_boost_max == old_boost && old_boost > boost)
270 schedtune_cpu_update(cpu);
277 schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
279 struct boost_groups *bg;
282 bg = &per_cpu(cpu_boost_groups, cpu);
284 /* Update boosted tasks count while avoiding to make it negative */
285 if (task_count < 0 && bg->group[idx].tasks <= -task_count)
286 bg->group[idx].tasks = 0;
288 bg->group[idx].tasks += task_count;
290 /* Boost group activation or deactivation on that RQ */
291 tasks = bg->group[idx].tasks;
292 if (tasks == 1 || tasks == 0)
293 schedtune_cpu_update(cpu);
297 * NOTE: This function must be called while holding the lock on the CPU RQ
299 void schedtune_enqueue_task(struct task_struct *p, int cpu)
301 struct schedtune *st;
305 * When a task is marked PF_EXITING by do_exit() it's going to be
306 * dequeued and enqueued multiple times in the exit path.
307 * Thus we avoid any further update, since we do not want to change
308 * CPU boosting while the task is exiting.
310 if (p->flags & PF_EXITING)
313 /* Get task boost group */
315 st = task_schedtune(p);
319 schedtune_tasks_update(p, cpu, idx, 1);
323 * NOTE: This function must be called while holding the lock on the CPU RQ
325 void schedtune_dequeue_task(struct task_struct *p, int cpu)
327 struct schedtune *st;
331 * When a task is marked PF_EXITING by do_exit() it's going to be
332 * dequeued and enqueued multiple times in the exit path.
333 * Thus we avoid any further update, since we do not want to change
334 * CPU boosting while the task is exiting.
335 * The last dequeue will be done by cgroup exit() callback.
337 if (p->flags & PF_EXITING)
340 /* Get task boost group */
342 st = task_schedtune(p);
346 schedtune_tasks_update(p, cpu, idx, -1);
349 int schedtune_cpu_boost(int cpu)
351 struct boost_groups *bg;
353 bg = &per_cpu(cpu_boost_groups, cpu);
354 return bg->boost_max;
357 int schedtune_task_boost(struct task_struct *p)
359 struct schedtune *st;
362 /* Get task boost value */
364 st = task_schedtune(p);
365 task_boost = st->boost;
372 boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
374 struct schedtune *st = css_st(css);
380 boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
383 struct schedtune *st = css_st(css);
385 if (boost < 0 || boost > 100)
389 if (css == &root_schedtune.css)
390 sysctl_sched_cfs_boost = boost;
392 /* Update CPU boost */
393 schedtune_boostgroup_update(st->idx, st->boost);
398 static struct cftype files[] = {
401 .read_u64 = boost_read,
402 .write_u64 = boost_write,
408 schedtune_boostgroup_init(struct schedtune *st)
410 struct boost_groups *bg;
413 /* Keep track of allocated boost groups */
414 allocated_group[st->idx] = st;
416 /* Initialize the per CPU boost groups */
417 for_each_possible_cpu(cpu) {
418 bg = &per_cpu(cpu_boost_groups, cpu);
419 bg->group[st->idx].boost = 0;
420 bg->group[st->idx].tasks = 0;
429 struct boost_groups *bg;
432 /* Initialize the per CPU boost groups */
433 for_each_possible_cpu(cpu) {
434 bg = &per_cpu(cpu_boost_groups, cpu);
435 memset(bg, 0, sizeof(struct boost_groups));
438 pr_info(" schedtune configured to support %d boost groups\n",
443 static struct cgroup_subsys_state *
444 schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
446 struct schedtune *st;
451 return &root_schedtune.css;
454 /* Allow only single level hierachies */
455 if (parent_css != &root_schedtune.css) {
456 pr_err("Nested SchedTune boosting groups not allowed\n");
457 return ERR_PTR(-ENOMEM);
460 /* Allow only a limited number of boosting groups */
461 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
462 if (!allocated_group[idx])
464 if (idx == BOOSTGROUPS_COUNT) {
465 pr_err("Trying to create more than %d SchedTune boosting groups\n",
467 return ERR_PTR(-ENOSPC);
470 st = kzalloc(sizeof(*st), GFP_KERNEL);
474 /* Initialize per CPUs boost group support */
476 if (schedtune_boostgroup_init(st))
484 return ERR_PTR(-ENOMEM);
488 schedtune_boostgroup_release(struct schedtune *st)
490 /* Reset this boost group */
491 schedtune_boostgroup_update(st->idx, 0);
493 /* Keep track of allocated boost groups */
494 allocated_group[st->idx] = NULL;
498 schedtune_css_free(struct cgroup_subsys_state *css)
500 struct schedtune *st = css_st(css);
502 schedtune_boostgroup_release(st);
506 struct cgroup_subsys schedtune_cgrp_subsys = {
507 .css_alloc = schedtune_css_alloc,
508 .css_free = schedtune_css_free,
509 .legacy_cftypes = files,
513 #else /* CONFIG_CGROUP_SCHEDTUNE */
516 schedtune_accept_deltas(int nrg_delta, int cap_delta,
517 struct task_struct *task)
519 /* Optimal (O) region */
520 if (nrg_delta < 0 && cap_delta > 0)
523 /* Suboptimal (S) region */
524 if (nrg_delta > 0 && cap_delta < 0)
527 return __schedtune_accept_deltas(nrg_delta, cap_delta,
528 perf_boost_idx, perf_constrain_idx);
531 #endif /* CONFIG_CGROUP_SCHEDTUNE */
534 sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
535 void __user *buffer, size_t *lenp,
538 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
543 /* Performance Boost (B) region threshold params */
544 perf_boost_idx = sysctl_sched_cfs_boost;
545 perf_boost_idx /= 10;
547 /* Performance Constraint (C) region threshold params */
548 perf_constrain_idx = 100 - sysctl_sched_cfs_boost;
549 perf_constrain_idx /= 10;
555 * System energy normalization
556 * Returns the normalized value, in the range [0..SCHED_LOAD_SCALE],
557 * corresponding to the specified energy variation.
560 schedtune_normalize_energy(int energy_diff)
565 #ifdef CONFIG_SCHED_DEBUG
566 /* Check for boundaries */
567 max_delta = schedtune_target_nrg.max_power;
568 max_delta -= schedtune_target_nrg.min_power;
569 WARN_ON(abs(energy_diff) >= max_delta);
572 /* Do scaling using positive numbers to increase the range */
573 normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
575 /* Scale by energy magnitude */
576 normalized_nrg <<= SCHED_LOAD_SHIFT;
578 /* Normalize on max energy for target platform */
579 normalized_nrg = reciprocal_divide(
580 normalized_nrg, schedtune_target_nrg.rdiv);
582 return (energy_diff < 0) ? -normalized_nrg : normalized_nrg;
585 #ifdef CONFIG_SCHED_DEBUG
587 schedtune_test_nrg(unsigned long delta_pwr)
589 unsigned long test_delta_pwr;
590 unsigned long test_norm_pwr;
594 * Check normalization constants using some constant system
597 pr_info("schedtune: verify normalization constants...\n");
598 for (idx = 0; idx < 6; ++idx) {
599 test_delta_pwr = delta_pwr >> idx;
601 /* Normalize on max energy for target platform */
602 test_norm_pwr = reciprocal_divide(
603 test_delta_pwr << SCHED_LOAD_SHIFT,
604 schedtune_target_nrg.rdiv);
606 pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
607 idx, test_delta_pwr, test_norm_pwr);
611 #define schedtune_test_nrg(delta_pwr)
615 * Compute the min/max power consumption of a cluster and all its CPUs
618 schedtune_add_cluster_nrg(
619 struct sched_domain *sd,
620 struct sched_group *sg,
621 struct target_nrg *ste)
623 struct sched_domain *sd2;
624 struct sched_group *sg2;
626 struct cpumask *cluster_cpus;
629 unsigned long min_pwr;
630 unsigned long max_pwr;
633 /* Get Cluster energy using EM data for the first CPU */
634 cluster_cpus = sched_group_cpus(sg);
635 snprintf(str, 32, "CLUSTER[%*pbl]",
636 cpumask_pr_args(cluster_cpus));
638 min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
639 max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
640 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
641 str, min_pwr, max_pwr);
644 * Keep track of this cluster's energy in the computation of the
645 * overall system energy
647 ste->min_power += min_pwr;
648 ste->max_power += max_pwr;
650 /* Get CPU energy using EM data for each CPU in the group */
651 for_each_cpu(cpu, cluster_cpus) {
652 /* Get a SD view for the specific CPU */
653 for_each_domain(cpu, sd2) {
654 /* Get the CPU group */
656 min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
657 max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
659 ste->min_power += min_pwr;
660 ste->max_power += max_pwr;
662 snprintf(str, 32, "CPU[%d]", cpu);
663 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
664 str, min_pwr, max_pwr);
667 * Assume we have EM data only at the CPU and
668 * the upper CLUSTER level
670 BUG_ON(!cpumask_equal(
671 sched_group_cpus(sg),
672 sched_group_cpus(sd2->parent->groups)
680 * Initialize the constants required to compute normalized energy.
681 * The values of these constants depends on the EM data for the specific
682 * target system and topology.
683 * Thus, this function is expected to be called by the code
684 * that bind the EM to the topology information.
687 schedtune_init_late(void)
689 struct target_nrg *ste = &schedtune_target_nrg;
690 unsigned long delta_pwr = 0;
691 struct sched_domain *sd;
692 struct sched_group *sg;
694 pr_info("schedtune: init normalization constants...\n");
701 * When EAS is in use, we always have a pointer to the highest SD
702 * which provides EM data.
704 sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
706 pr_info("schedtune: no energy model data\n");
712 schedtune_add_cluster_nrg(sd, sg, ste);
713 } while (sg = sg->next, sg != sd->groups);
717 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
718 "SYSTEM", ste->min_power, ste->max_power);
720 /* Compute normalization constants */
721 delta_pwr = ste->max_power - ste->min_power;
722 ste->rdiv = reciprocal_value(delta_pwr);
723 pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
724 ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
726 schedtune_test_nrg(delta_pwr);
733 late_initcall(schedtune_init_late);