Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index a494fbd7dbe818e3add477e03c19ad59e52641bb..b7539fdd0004c2a15c1a644a0f283c6a7184ae66 100644 (file)
@@ -45,8 +45,10 @@ struct cpufreq_interactive_cpuinfo {
        u64 cputime_speedadj_timestamp;
        struct cpufreq_policy *policy;
        struct cpufreq_frequency_table *freq_table;
+       spinlock_t target_freq_lock; /*protects target freq */
        unsigned int target_freq;
        unsigned int floor_freq;
+       unsigned int max_freq;
        u64 floor_validate_time;
        u64 hispeed_validate_time;
        struct rw_semaphore enable_sem;
@@ -61,6 +63,7 @@ static cpumask_t speedchange_cpumask;
 static spinlock_t speedchange_cpumask_lock;
 static struct mutex gov_lock;
 
+/* Target load.  Lower values result in higher CPU speeds. */
 #define DEFAULT_TARGET_LOAD 90
 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
 
@@ -71,31 +74,25 @@ static unsigned int default_above_hispeed_delay[] = {
 
 struct cpufreq_interactive_tunables {
        int usage_count;
-
        /* Hi speed to bump to from lo speed when load burst (default max) */
        unsigned int hispeed_freq;
-
        /* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 99
        unsigned long go_hispeed_load;
-
        /* Target load. Lower values result in higher CPU speeds. */
        spinlock_t target_loads_lock;
        unsigned int *target_loads;
        int ntarget_loads;
-
        /*
         * The minimum amount of time to spend at a frequency before we can ramp
         * down.
         */
 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
        unsigned long min_sample_time;
-
        /*
         * The sample rate of the timer used to increase frequency
         */
        unsigned long timer_rate;
-
        /*
         * Wait this long before raising speed above hispeed, by default a
         * single timer interval.
@@ -103,21 +100,18 @@ struct cpufreq_interactive_tunables {
        spinlock_t above_hispeed_delay_lock;
        unsigned int *above_hispeed_delay;
        int nabove_hispeed_delay;
-
        /* Non-zero means indefinite speed boost active */
        int boost_val;
-       /* Duration of a boost pulse in usecs */
+       /* Duration of a boot pulse in usecs */
        int boostpulse_duration_val;
        /* End time of boost pulse in ktime converted to usecs */
        u64 boostpulse_endtime;
-
        /*
         * Max additional time to wait in idle, beyond timer_rate, at speeds
         * above minimum before wakeup to reduce speed, or -1 if unnecessary.
         */
 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
        int timer_slack_val;
-
        bool io_is_busy;
 };
 
@@ -137,15 +131,15 @@ static void cpufreq_interactive_timer_resched(
        spin_lock_irqsave(&pcpu->load_lock, flags);
        pcpu->time_in_idle =
                get_cpu_idle_time(smp_processor_id(),
-                                    &pcpu->time_in_idle_timestamp,
-                                    tunables->io_is_busy);
+                                 &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
        pcpu->cputime_speedadj = 0;
        pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
        expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
        mod_timer_pinned(&pcpu->cpu_timer, expires);
 
        if (tunables->timer_slack_val >= 0 &&
-                       pcpu->target_freq > pcpu->policy->min) {
+           pcpu->target_freq > pcpu->policy->min) {
                expires += usecs_to_jiffies(tunables->timer_slack_val);
                mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
        }
@@ -158,16 +152,17 @@ static void cpufreq_interactive_timer_resched(
  * function.
  */
 static void cpufreq_interactive_timer_start(
-               struct cpufreq_interactive_tunables *tunables, int cpu)
+       struct cpufreq_interactive_tunables *tunables, int cpu)
 {
        struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
-       unsigned long expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+       unsigned long expires = jiffies +
+               usecs_to_jiffies(tunables->timer_rate);
        unsigned long flags;
 
        pcpu->cpu_timer.expires = expires;
        add_timer_on(&pcpu->cpu_timer, cpu);
        if (tunables->timer_slack_val >= 0 &&
-                       pcpu->target_freq > pcpu->policy->min) {
+           pcpu->target_freq > pcpu->policy->min) {
                expires += usecs_to_jiffies(tunables->timer_slack_val);
                pcpu->cpu_slack_timer.expires = expires;
                add_timer_on(&pcpu->cpu_slack_timer, cpu);
@@ -183,8 +178,8 @@ static void cpufreq_interactive_timer_start(
 }
 
 static unsigned int freq_to_above_hispeed_delay(
-               struct cpufreq_interactive_tunables *tunables,
-               unsigned int freq)
+       struct cpufreq_interactive_tunables *tunables,
+       unsigned int freq)
 {
        int i;
        unsigned int ret;
@@ -201,8 +196,8 @@ static unsigned int freq_to_above_hispeed_delay(
        return ret;
 }
 
-static unsigned int freq_to_targetload(struct cpufreq_interactive_tunables
-               *tunables, unsigned int freq)
+static unsigned int freq_to_targetload(
+       struct cpufreq_interactive_tunables *tunables, unsigned int freq)
 {
        int i;
        unsigned int ret;
@@ -211,7 +206,7 @@ static unsigned int freq_to_targetload(struct cpufreq_interactive_tunables
        spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
        for (i = 0; i < tunables->ntarget_loads - 1 &&
-                       freq >= tunables->target_loads[i+1]; i += 2)
+                   freq >= tunables->target_loads[i+1]; i += 2)
                ;
 
        ret = tunables->target_loads[i];
@@ -366,6 +361,7 @@ static void cpufreq_interactive_timer(unsigned long data)
        if (WARN_ON_ONCE(!delta_time))
                goto rearm;
 
+       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
        do_div(cputime_speedadj, delta_time);
        loadadjfreq = (unsigned int)cputime_speedadj * 100;
        cpu_load = loadadjfreq / pcpu->target_freq;
@@ -391,6 +387,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                trace_cpufreq_interactive_notyet(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm;
        }
 
@@ -398,8 +395,10 @@ static void cpufreq_interactive_timer(unsigned long data)
 
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_L,
-                                          &index))
+                                          &index)) {
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm;
+       }
 
        new_freq = pcpu->freq_table[index].frequency;
 
@@ -413,6 +412,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                        trace_cpufreq_interactive_notyet(
                                data, cpu_load, pcpu->target_freq,
                                pcpu->policy->cur, new_freq);
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                        goto rearm;
                }
        }
@@ -434,6 +434,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                trace_cpufreq_interactive_already(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm_if_notmax;
        }
 
@@ -441,6 +442,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                                         pcpu->policy->cur, new_freq);
 
        pcpu->target_freq = new_freq;
+       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
        spin_lock_irqsave(&speedchange_cpumask_lock, flags);
        cpumask_set_cpu(data, &speedchange_cpumask);
        spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
@@ -584,16 +586,17 @@ static void cpufreq_interactive_boost(void)
 {
        int i;
        int anyboost = 0;
-       unsigned long flags;
+       unsigned long flags[2];
        struct cpufreq_interactive_cpuinfo *pcpu;
        struct cpufreq_interactive_tunables *tunables;
 
-       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
        for_each_online_cpu(i) {
                pcpu = &per_cpu(cpuinfo, i);
                tunables = pcpu->policy->governor_data;
 
+               spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
                if (pcpu->target_freq < tunables->hispeed_freq) {
                        pcpu->target_freq = tunables->hispeed_freq;
                        cpumask_set_cpu(i, &speedchange_cpumask);
@@ -609,9 +612,10 @@ static void cpufreq_interactive_boost(void)
 
                pcpu->floor_freq = tunables->hispeed_freq;
                pcpu->floor_validate_time = ktime_to_us(ktime_get());
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
        }
 
-       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
        if (anyboost)
                wake_up_process(speedchange_task);
@@ -706,8 +710,9 @@ err:
        return ERR_PTR(err);
 }
 
-static ssize_t show_target_loads(struct cpufreq_interactive_tunables *tunables,
-               char *buf)
+static ssize_t show_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       char *buf)
 {
        int i;
        ssize_t ret = 0;
@@ -719,13 +724,14 @@ static ssize_t show_target_loads(struct cpufreq_interactive_tunables *tunables,
                ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       sprintf(buf + ret - 1, "\n");
        spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
        return ret;
 }
 
-static ssize_t store_target_loads(struct cpufreq_interactive_tunables *tunables,
-               const char *buf, size_t count)
+static ssize_t store_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
 {
        int ntokens;
        unsigned int *new_target_loads = NULL;
@@ -744,8 +750,8 @@ static ssize_t store_target_loads(struct cpufreq_interactive_tunables *tunables,
        return count;
 }
 
-static ssize_t show_above_hispeed_delay(struct cpufreq_interactive_tunables
-               *tunables, char *buf)
+static ssize_t show_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables, char *buf)
 {
        int i;
        ssize_t ret = 0;
@@ -758,13 +764,14 @@ static ssize_t show_above_hispeed_delay(struct cpufreq_interactive_tunables
                               tunables->above_hispeed_delay[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       sprintf(buf + ret - 1, "\n");
        spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
        return ret;
 }
 
-static ssize_t store_above_hispeed_delay(struct cpufreq_interactive_tunables
-               *tunables, const char *buf, size_t count)
+static ssize_t store_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
 {
        int ntokens;
        unsigned int *new_above_hispeed_delay = NULL;
@@ -902,6 +909,7 @@ static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
                trace_cpufreq_interactive_boost("on");
                cpufreq_interactive_boost();
        } else {
+               tunables->boostpulse_endtime = ktime_to_us(ktime_get());
                trace_cpufreq_interactive_unboost("off");
        }
 
@@ -1012,6 +1020,18 @@ store_gov_pol_sys(boostpulse);
 show_store_gov_pol_sys(boostpulse_duration);
 show_store_gov_pol_sys(io_is_busy);
 
+#define gov_sys_attr_rw(_name)                                         \
+static struct global_attr _name##_gov_sys =                            \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)                                         \
+static struct freq_attr _name##_gov_pol =                              \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)                                     \
+       gov_sys_attr_rw(_name);                                         \
+       gov_pol_attr_rw(_name)
+
 gov_sys_pol_attr_rw(target_loads);
 gov_sys_pol_attr_rw(above_hispeed_delay);
 gov_sys_pol_attr_rw(hispeed_freq);
@@ -1107,6 +1127,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
        struct cpufreq_interactive_cpuinfo *pcpu;
        struct cpufreq_frequency_table *freq_table;
        struct cpufreq_interactive_tunables *tunables;
+       unsigned long flags;
 
        if (have_governor_per_policy())
                tunables = policy->governor_data;
@@ -1131,13 +1152,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        return -ENOMEM;
                }
 
-               rc = sysfs_create_group(get_governor_parent_kobj(policy),
-                               get_sysfs_attr());
-               if (rc) {
-                       kfree(tunables);
-                       return rc;
-               }
-
                tunables->usage_count = 1;
                tunables->above_hispeed_delay = default_above_hispeed_delay;
                tunables->nabove_hispeed_delay =
@@ -1153,16 +1167,26 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                spin_lock_init(&tunables->target_loads_lock);
                spin_lock_init(&tunables->above_hispeed_delay_lock);
 
+               policy->governor_data = tunables;
+               if (!have_governor_per_policy())
+                       common_tunables = tunables;
+
+               rc = sysfs_create_group(get_governor_parent_kobj(policy),
+                               get_sysfs_attr());
+               if (rc) {
+                       kfree(tunables);
+                       policy->governor_data = NULL;
+                       if (!have_governor_per_policy())
+                               common_tunables = NULL;
+                       return rc;
+               }
+
                if (!policy->governor->initialized) {
                        idle_notifier_register(&cpufreq_interactive_idle_nb);
                        cpufreq_register_notifier(&cpufreq_notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
                }
 
-               policy->governor_data = tunables;
-               if (!have_governor_per_policy())
-                       common_tunables = tunables;
-
                break;
 
        case CPUFREQ_GOV_POLICY_EXIT:
@@ -1199,7 +1223,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                                ktime_to_us(ktime_get());
                        pcpu->hispeed_validate_time =
                                pcpu->floor_validate_time;
+                       pcpu->max_freq = policy->max;
                        down_write(&pcpu->enable_sem);
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
                        cpufreq_interactive_timer_start(tunables, j);
                        pcpu->governor_enabled = 1;
                        up_write(&pcpu->enable_sem);
@@ -1232,29 +1259,37 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                for_each_cpu(j, policy->cpus) {
                        pcpu = &per_cpu(cpuinfo, j);
 
-                       /* hold write semaphore to avoid race */
-                       down_write(&pcpu->enable_sem);
+                       down_read(&pcpu->enable_sem);
                        if (pcpu->governor_enabled == 0) {
-                               up_write(&pcpu->enable_sem);
+                               up_read(&pcpu->enable_sem);
                                continue;
                        }
 
-                       /* update target_freq firstly */
+                       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
                        if (policy->max < pcpu->target_freq)
                                pcpu->target_freq = policy->max;
                        else if (policy->min > pcpu->target_freq)
                                pcpu->target_freq = policy->min;
 
-                       /* Reschedule timer.
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       up_read(&pcpu->enable_sem);
+
+                       /* Reschedule timer only if policy->max is raised.
                         * Delete the timers, else the timer callback may
                         * return without re-arm the timer when failed
                         * acquire the semaphore. This race may cause timer
                         * stopped unexpectedly.
                         */
-                       del_timer_sync(&pcpu->cpu_timer);
-                       del_timer_sync(&pcpu->cpu_slack_timer);
-                       cpufreq_interactive_timer_start(tunables, j);
-                       up_write(&pcpu->enable_sem);
+
+                       if (policy->max > pcpu->max_freq) {
+                               down_write(&pcpu->enable_sem);
+                               del_timer_sync(&pcpu->cpu_timer);
+                               del_timer_sync(&pcpu->cpu_slack_timer);
+                               cpufreq_interactive_timer_start(tunables, j);
+                               up_write(&pcpu->enable_sem);
+                       }
+
+                       pcpu->max_freq = policy->max;
                }
                break;
        }
@@ -1290,6 +1325,7 @@ static int __init cpufreq_interactive_init(void)
                init_timer(&pcpu->cpu_slack_timer);
                pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
                spin_lock_init(&pcpu->load_lock);
+               spin_lock_init(&pcpu->target_freq_lock);
                init_rwsem(&pcpu->enable_sem);
        }