*/
TRACE_EVENT(sched_boost_cpu,
- TP_PROTO(int cpu, unsigned long util, unsigned long margin),
+ TP_PROTO(int cpu, unsigned long util, long margin),
TP_ARGS(cpu, util, margin),
TP_STRUCT__entry(
__field( int, cpu )
__field( unsigned long, util )
- __field( unsigned long, margin )
+ __field(long, margin )
),
TP_fast_assign(
__entry->margin = margin;
),
- TP_printk("cpu=%d util=%lu margin=%lu",
+ TP_printk("cpu=%d util=%lu margin=%ld",
__entry->cpu,
__entry->util,
__entry->margin)
TRACE_EVENT(sched_tune_tasks_update,
TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
- unsigned int boost, unsigned int max_boost),
+ int boost, int max_boost),
TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
__field( int, cpu )
__field( int, tasks )
__field( int, idx )
- __field( unsigned int, boost )
- __field( unsigned int, max_boost )
+ __field( int, boost )
+ __field( int, max_boost )
),
TP_fast_assign(
),
TP_printk("pid=%d comm=%s "
- "cpu=%d tasks=%d idx=%d boost=%u max_boost=%u",
+ "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
__entry->pid, __entry->comm,
__entry->cpu, __entry->tasks, __entry->idx,
__entry->boost, __entry->max_boost)
*/
TRACE_EVENT(sched_boost_task,
- TP_PROTO(struct task_struct *tsk, unsigned long util, unsigned long margin),
+ TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
TP_ARGS(tsk, util, margin),
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( unsigned long, util )
- __field( unsigned long, margin )
+ __field( long, margin )
),
__entry->margin = margin;
),
- TP_printk("comm=%s pid=%d util=%lu margin=%lu",
+ TP_printk("comm=%s pid=%d util=%lu margin=%ld",
__entry->comm, __entry->pid,
__entry->util,
__entry->margin)
#ifdef CONFIG_SCHED_TUNE
-static unsigned long
-schedtune_margin(unsigned long signal, unsigned long boost)
+static long
+schedtune_margin(unsigned long signal, long boost)
{
- unsigned long long margin = 0;
+ long long margin = 0;
/*
* Signal proportional compensation (SPC)
*
* The Boost (B) value is used to compute a Margin (M) which is
* proportional to the complement of the original Signal (S):
- * M = B * (SCHED_LOAD_SCALE - S)
+ * M = B * (SCHED_LOAD_SCALE - S), if B is positive
+ * M = B * S, if B is negative
* The obtained M could be used by the caller to "boost" S.
*/
- margin = SCHED_LOAD_SCALE - signal;
- margin *= boost;
-
+ if (boost >= 0) {
+ margin = SCHED_LOAD_SCALE - signal;
+ margin *= boost;
+ } else
+ margin = -signal * boost;
/*
* Fast integer division by constant:
* Constant : (C) = 100
margin *= 1311;
margin >>= 17;
+ if (boost < 0)
+ margin *= -1;
return margin;
}
-static inline unsigned int
+static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
{
- unsigned int boost;
+ int boost;
#ifdef CONFIG_CGROUP_SCHEDTUNE
boost = schedtune_cpu_boost(cpu);
return schedtune_margin(util, boost);
}
-static inline unsigned long
+static inline long
schedtune_task_margin(struct task_struct *task)
{
- unsigned int boost;
+ int boost;
unsigned long util;
- unsigned long margin;
+ long margin;
#ifdef CONFIG_CGROUP_SCHEDTUNE
boost = schedtune_task_boost(task);
#else /* CONFIG_SCHED_TUNE */
-static inline unsigned int
+static inline int
schedtune_cpu_margin(unsigned long util, int cpu)
{
return 0;
}
-static inline unsigned int
+static inline int
schedtune_task_margin(struct task_struct *task)
{
return 0;
boosted_cpu_util(int cpu)
{
unsigned long util = cpu_util(cpu);
- unsigned long margin = schedtune_cpu_margin(util, cpu);
+ long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
boosted_task_util(struct task_struct *task)
{
unsigned long util = task_util(task);
- unsigned long margin = schedtune_task_margin(task);
+ long margin = schedtune_task_margin(task);
trace_sched_boost_task(task, util, margin);
*/
struct boost_groups {
/* Maximum boost value for all RUNNABLE tasks on a CPU */
- unsigned boost_max;
+ bool idle;
+ int boost_max;
struct {
/* The boost for tasks on that boost group */
- unsigned boost;
+ int boost;
/* Count of RUNNABLE tasks on that boost group */
unsigned tasks;
} group[BOOSTGROUPS_COUNT];
schedtune_cpu_update(int cpu)
{
struct boost_groups *bg;
- unsigned boost_max;
+ int boost_max;
int idx;
bg = &per_cpu(cpu_boost_groups, cpu);
*/
if (bg->group[idx].tasks == 0)
continue;
+
boost_max = max(boost_max, bg->group[idx].boost);
}
-
+ /* Ensures boost_max is non-negative when all cgroup boost values
+ * are neagtive. Avoids under-accounting of cpu capacity which may cause
+ * task stacking and frequency spikes.*/
+ boost_max = max(boost_max, 0);
bg->boost_max = boost_max;
}
return task_boost;
}
-static u64
+static s64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct schedtune *st = css_st(css);
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 boost)
+ s64 boost)
{
struct schedtune *st = css_st(css);
+ unsigned threshold_idx;
+ int boost_pct;
- if (boost < 0 || boost > 100)
+ if (boost < -100 || boost > 100)
return -EINVAL;
st->boost = boost;
static struct cftype files[] = {
{
.name = "boost",
- .read_u64 = boost_read,
- .write_u64 = boost_write,
+ .read_s64 = boost_read,
+ .write_s64 = boost_write,
},
{ } /* terminate */
};