config CPU_FREQ_GOV_SCHED
bool "'sched' cpufreq governor"
depends on CPU_FREQ
+ depends on SMP
select CPU_FREQ_GOV_COMMON
help
'sched' - this governor scales cpu frequency from the
#define for_each_possible_sd_level(level) \
for (level = 0; level < NR_SD_LEVELS; level++)
+#ifdef CONFIG_SMP
+
extern struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
void init_sched_energy_costs(void);
+#else
+
+#define init_sched_energy_costs() do { } while (0)
+
+#endif /* CONFIG_SMP */
+
#endif
__entry->cpu_scale_factor)
);
+#ifdef CONFIG_SMP
+
/*
* Tracepoint for accounting sched averages for tasks.
*/
__entry->payoff, __entry->region)
);
+#endif /* CONFIG_SMP */
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
config SCHED_TUNE
bool "Boosting for CFS tasks (EXPERIMENTAL)"
+ depends on SMP
help
This option enables the system-wide support for task boosting.
When this support is enabled a new sysctl interface is exposed to
endif
obj-y += core.o loadavg.o clock.o cputime.o
-obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o energy.o
+obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
}
#endif
+#ifdef CONFIG_SMP
+static bool cpu_overutilized(int cpu);
static inline unsigned long boosted_cpu_util(int cpu);
+#else
+#define boosted_cpu_util(cpu) cpu_util(cpu)
+#endif
+#ifdef CONFIG_SMP
static void update_capacity_of(int cpu)
{
unsigned long req_cap;
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
}
-
-static bool cpu_overutilized(int cpu);
+#endif
/*
* The enqueue_task method is called before nr_running is
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+#ifdef CONFIG_SMP
int task_new = flags & ENQUEUE_WAKEUP_NEW;
int task_wakeup = flags & ENQUEUE_WAKEUP;
+#endif
for_each_sched_entity(se) {
if (se->on_rq)
update_cfs_shares(cfs_rq);
}
- if (!se) {
+ if (!se)
add_nr_running(rq, 1);
+
+#ifdef CONFIG_SMP
+
+ if (!se) {
if (!task_new && !rq->rd->overutilized &&
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
if (task_new || task_wakeup)
update_capacity_of(cpu_of(rq));
}
+#endif /* CONFIG_SMP */
+
hrtick_update(rq);
}
update_cfs_shares(cfs_rq);
}
- if (!se) {
+ if (!se)
sub_nr_running(rq, 1);
+
+#ifdef CONFIG_SMP
+
+ if (!se) {
schedtune_dequeue_task(p, cpu_of(rq));
/*
set_cfs_cpu_capacity(cpu_of(rq), false, 0);
}
}
+
+#endif /* CONFIG_SMP */
+
hrtick_update(rq);
}
{
remove_entity_load_avg(&p->se);
}
+#else
+#define task_fits_max(p, cpu) true
#endif /* CONFIG_SMP */
static unsigned long
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
+#ifdef CONFIG_SMP
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
rq->rd->overutilized = true;
rq->misfit_task = !task_fits_max(curr, rq->cpu);
+#endif
+
}
/*
#ifdef CONFIG_SMP
+extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
extern void init_entity_runnable_average(struct sched_entity *se);
-extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
-
static inline void __add_nr_running(struct rq *rq, unsigned count)
{
unsigned prev_nr = rq->nr_running;