u64 cputime_speedadj_timestamp;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
+ spinlock_t target_freq_lock; /*protects target freq */
unsigned int target_freq;
unsigned int floor_freq;
+ unsigned int max_freq;
u64 floor_validate_time;
u64 hispeed_validate_time;
struct rw_semaphore enable_sem;
static spinlock_t speedchange_cpumask_lock;
static struct mutex gov_lock;
+/* Target load. Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 90
static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
struct cpufreq_interactive_tunables {
int usage_count;
-
/* Hi speed to bump to from lo speed when load burst (default max) */
unsigned int hispeed_freq;
-
/* Go to hi speed when CPU load at or above this value. */
#define DEFAULT_GO_HISPEED_LOAD 99
unsigned long go_hispeed_load;
-
/* Target load. Lower values result in higher CPU speeds. */
spinlock_t target_loads_lock;
unsigned int *target_loads;
int ntarget_loads;
-
/*
* The minimum amount of time to spend at a frequency before we can ramp
* down.
*/
#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
unsigned long min_sample_time;
-
/*
* The sample rate of the timer used to increase frequency
*/
unsigned long timer_rate;
-
/*
* Wait this long before raising speed above hispeed, by default a
* single timer interval.
spinlock_t above_hispeed_delay_lock;
unsigned int *above_hispeed_delay;
int nabove_hispeed_delay;
-
/* Non-zero means indefinite speed boost active */
int boost_val;
- /* Duration of a boost pulse in usecs */
+ /* Duration of a boot pulse in usecs */
int boostpulse_duration_val;
/* End time of boost pulse in ktime converted to usecs */
u64 boostpulse_endtime;
-
/*
* Max additional time to wait in idle, beyond timer_rate, at speeds
* above minimum before wakeup to reduce speed, or -1 if unnecessary.
*/
#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
int timer_slack_val;
-
bool io_is_busy;
};
spin_lock_irqsave(&pcpu->load_lock, flags);
pcpu->time_in_idle =
get_cpu_idle_time(smp_processor_id(),
- &pcpu->time_in_idle_timestamp,
- tunables->io_is_busy);
+ &pcpu->time_in_idle_timestamp,
+ tunables->io_is_busy);
pcpu->cputime_speedadj = 0;
pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
mod_timer_pinned(&pcpu->cpu_timer, expires);
if (tunables->timer_slack_val >= 0 &&
- pcpu->target_freq > pcpu->policy->min) {
+ pcpu->target_freq > pcpu->policy->min) {
expires += usecs_to_jiffies(tunables->timer_slack_val);
mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
}
* function.
*/
static void cpufreq_interactive_timer_start(
- struct cpufreq_interactive_tunables *tunables, int cpu)
+ struct cpufreq_interactive_tunables *tunables, int cpu)
{
struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
- unsigned long expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+ unsigned long expires = jiffies +
+ usecs_to_jiffies(tunables->timer_rate);
unsigned long flags;
pcpu->cpu_timer.expires = expires;
add_timer_on(&pcpu->cpu_timer, cpu);
if (tunables->timer_slack_val >= 0 &&
- pcpu->target_freq > pcpu->policy->min) {
+ pcpu->target_freq > pcpu->policy->min) {
expires += usecs_to_jiffies(tunables->timer_slack_val);
pcpu->cpu_slack_timer.expires = expires;
add_timer_on(&pcpu->cpu_slack_timer, cpu);
}
static unsigned int freq_to_above_hispeed_delay(
- struct cpufreq_interactive_tunables *tunables,
- unsigned int freq)
+ struct cpufreq_interactive_tunables *tunables,
+ unsigned int freq)
{
int i;
unsigned int ret;
return ret;
}
-static unsigned int freq_to_targetload(struct cpufreq_interactive_tunables
- *tunables, unsigned int freq)
+static unsigned int freq_to_targetload(
+ struct cpufreq_interactive_tunables *tunables, unsigned int freq)
{
int i;
unsigned int ret;
spin_lock_irqsave(&tunables->target_loads_lock, flags);
for (i = 0; i < tunables->ntarget_loads - 1 &&
- freq >= tunables->target_loads[i+1]; i += 2)
+ freq >= tunables->target_loads[i+1]; i += 2)
;
ret = tunables->target_loads[i];
if (WARN_ON_ONCE(!delta_time))
goto rearm;
+ spin_lock_irqsave(&pcpu->target_freq_lock, flags);
do_div(cputime_speedadj, delta_time);
loadadjfreq = (unsigned int)cputime_speedadj * 100;
cpu_load = loadadjfreq / pcpu->target_freq;
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
goto rearm;
}
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_L,
- &index))
+ &index)) {
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
goto rearm;
+ }
new_freq = pcpu->freq_table[index].frequency;
trace_cpufreq_interactive_notyet(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
goto rearm;
}
}
trace_cpufreq_interactive_already(
data, cpu_load, pcpu->target_freq,
pcpu->policy->cur, new_freq);
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
goto rearm_if_notmax;
}
pcpu->policy->cur, new_freq);
pcpu->target_freq = new_freq;
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
cpumask_set_cpu(data, &speedchange_cpumask);
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
{
int i;
int anyboost = 0;
- unsigned long flags;
+ unsigned long flags[2];
struct cpufreq_interactive_cpuinfo *pcpu;
struct cpufreq_interactive_tunables *tunables;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
for_each_online_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
tunables = pcpu->policy->governor_data;
+ spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
if (pcpu->target_freq < tunables->hispeed_freq) {
pcpu->target_freq = tunables->hispeed_freq;
cpumask_set_cpu(i, &speedchange_cpumask);
pcpu->floor_freq = tunables->hispeed_freq;
pcpu->floor_validate_time = ktime_to_us(ktime_get());
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
}
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
if (anyboost)
wake_up_process(speedchange_task);
return ERR_PTR(err);
}
-static ssize_t show_target_loads(struct cpufreq_interactive_tunables *tunables,
- char *buf)
+static ssize_t show_target_loads(
+ struct cpufreq_interactive_tunables *tunables,
+ char *buf)
{
int i;
ssize_t ret = 0;
ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
i & 0x1 ? ":" : " ");
- ret += sprintf(buf + ret, "\n");
+ sprintf(buf + ret - 1, "\n");
spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
return ret;
}
-static ssize_t store_target_loads(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
+static ssize_t store_target_loads(
+ struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
{
int ntokens;
unsigned int *new_target_loads = NULL;
return count;
}
-static ssize_t show_above_hispeed_delay(struct cpufreq_interactive_tunables
- *tunables, char *buf)
+static ssize_t show_above_hispeed_delay(
+ struct cpufreq_interactive_tunables *tunables, char *buf)
{
int i;
ssize_t ret = 0;
tunables->above_hispeed_delay[i],
i & 0x1 ? ":" : " ");
- ret += sprintf(buf + ret, "\n");
+ sprintf(buf + ret - 1, "\n");
spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
return ret;
}
-static ssize_t store_above_hispeed_delay(struct cpufreq_interactive_tunables
- *tunables, const char *buf, size_t count)
+static ssize_t store_above_hispeed_delay(
+ struct cpufreq_interactive_tunables *tunables,
+ const char *buf, size_t count)
{
int ntokens;
unsigned int *new_above_hispeed_delay = NULL;
trace_cpufreq_interactive_boost("on");
cpufreq_interactive_boost();
} else {
+ tunables->boostpulse_endtime = ktime_to_us(ktime_get());
trace_cpufreq_interactive_unboost("off");
}
show_store_gov_pol_sys(boostpulse_duration);
show_store_gov_pol_sys(io_is_busy);
+#define gov_sys_attr_rw(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name) \
+ gov_sys_attr_rw(_name); \
+ gov_pol_attr_rw(_name)
+
gov_sys_pol_attr_rw(target_loads);
gov_sys_pol_attr_rw(above_hispeed_delay);
gov_sys_pol_attr_rw(hispeed_freq);
struct cpufreq_interactive_cpuinfo *pcpu;
struct cpufreq_frequency_table *freq_table;
struct cpufreq_interactive_tunables *tunables;
+ unsigned long flags;
if (have_governor_per_policy())
tunables = policy->governor_data;
return -ENOMEM;
}
- rc = sysfs_create_group(get_governor_parent_kobj(policy),
- get_sysfs_attr());
- if (rc) {
- kfree(tunables);
- return rc;
- }
-
tunables->usage_count = 1;
tunables->above_hispeed_delay = default_above_hispeed_delay;
tunables->nabove_hispeed_delay =
spin_lock_init(&tunables->target_loads_lock);
spin_lock_init(&tunables->above_hispeed_delay_lock);
+ policy->governor_data = tunables;
+ if (!have_governor_per_policy())
+ common_tunables = tunables;
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr());
+ if (rc) {
+ kfree(tunables);
+ policy->governor_data = NULL;
+ if (!have_governor_per_policy())
+ common_tunables = NULL;
+ return rc;
+ }
+
if (!policy->governor->initialized) {
idle_notifier_register(&cpufreq_interactive_idle_nb);
cpufreq_register_notifier(&cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
- policy->governor_data = tunables;
- if (!have_governor_per_policy())
- common_tunables = tunables;
-
break;
case CPUFREQ_GOV_POLICY_EXIT:
ktime_to_us(ktime_get());
pcpu->hispeed_validate_time =
pcpu->floor_validate_time;
+ pcpu->max_freq = policy->max;
down_write(&pcpu->enable_sem);
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
cpufreq_interactive_timer_start(tunables, j);
pcpu->governor_enabled = 1;
up_write(&pcpu->enable_sem);
for_each_cpu(j, policy->cpus) {
pcpu = &per_cpu(cpuinfo, j);
- /* hold write semaphore to avoid race */
- down_write(&pcpu->enable_sem);
+ down_read(&pcpu->enable_sem);
if (pcpu->governor_enabled == 0) {
- up_write(&pcpu->enable_sem);
+ up_read(&pcpu->enable_sem);
continue;
}
- /* update target_freq firstly */
+ spin_lock_irqsave(&pcpu->target_freq_lock, flags);
if (policy->max < pcpu->target_freq)
pcpu->target_freq = policy->max;
else if (policy->min > pcpu->target_freq)
pcpu->target_freq = policy->min;
- /* Reschedule timer.
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+ up_read(&pcpu->enable_sem);
+
+ /* Reschedule timer only if policy->max is raised.
* Delete the timers, else the timer callback may
* return without re-arm the timer when failed
* acquire the semaphore. This race may cause timer
* stopped unexpectedly.
*/
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer_start(tunables, j);
- up_write(&pcpu->enable_sem);
+
+ if (policy->max > pcpu->max_freq) {
+ down_write(&pcpu->enable_sem);
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer_start(tunables, j);
+ up_write(&pcpu->enable_sem);
+ }
+
+ pcpu->max_freq = policy->max;
}
break;
}
init_timer(&pcpu->cpu_slack_timer);
pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
spin_lock_init(&pcpu->load_lock);
+ spin_lock_init(&pcpu->target_freq_lock);
init_rwsem(&pcpu->enable_sem);
}