Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index 9f63bd1e4feee26b697ae8deb76bf5fb1ea817cb..b7539fdd0004c2a15c1a644a0f283c6a7184ae66 100644 (file)
@@ -30,8 +30,7 @@
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
-#include <linux/kernel_stat.h>
-#include <asm/cputime.h>
+#include "cpufreq_governor.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
@@ -46,8 +45,10 @@ struct cpufreq_interactive_cpuinfo {
        u64 cputime_speedadj_timestamp;
        struct cpufreq_policy *policy;
        struct cpufreq_frequency_table *freq_table;
+       spinlock_t target_freq_lock; /*protects target freq */
        unsigned int target_freq;
        unsigned int floor_freq;
+       unsigned int max_freq;
        u64 floor_validate_time;
        u64 hispeed_validate_time;
        struct rw_semaphore enable_sem;
@@ -119,44 +120,6 @@ struct cpufreq_interactive_tunables *common_tunables;
 
 static struct attribute_group *get_sysfs_attr(void);
 
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
-                                                 cputime64_t *wall)
-{
-       u64 idle_time;
-       u64 cur_wall_time;
-       u64 busy_time;
-
-       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
-       busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
-       idle_time = cur_wall_time - busy_time;
-       if (wall)
-               *wall = jiffies_to_usecs(cur_wall_time);
-
-       return jiffies_to_usecs(idle_time);
-}
-
-static inline cputime64_t get_cpu_idle_time(
-       unsigned int cpu,
-       cputime64_t *wall,
-       bool io_is_busy)
-{
-       u64 idle_time = get_cpu_idle_time_us(cpu, wall);
-
-       if (idle_time == -1ULL)
-               idle_time = get_cpu_idle_time_jiffy(cpu, wall);
-       else if (!io_is_busy)
-               idle_time += get_cpu_iowait_time_us(cpu, wall);
-
-       return idle_time;
-}
-
 static void cpufreq_interactive_timer_resched(
        struct cpufreq_interactive_cpuinfo *pcpu)
 {
@@ -348,13 +311,13 @@ static u64 update_load(int cpu)
                pcpu->policy->governor_data;
        u64 now;
        u64 now_idle;
-       unsigned int delta_idle;
-       unsigned int delta_time;
+       u64 delta_idle;
+       u64 delta_time;
        u64 active_time;
 
        now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
-       delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
-       delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+       delta_idle = (now_idle - pcpu->time_in_idle);
+       delta_time = (now - pcpu->time_in_idle_timestamp);
 
        if (delta_time <= delta_idle)
                active_time = 0;
@@ -398,6 +361,7 @@ static void cpufreq_interactive_timer(unsigned long data)
        if (WARN_ON_ONCE(!delta_time))
                goto rearm;
 
+       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
        do_div(cputime_speedadj, delta_time);
        loadadjfreq = (unsigned int)cputime_speedadj * 100;
        cpu_load = loadadjfreq / pcpu->target_freq;
@@ -423,6 +387,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                trace_cpufreq_interactive_notyet(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm;
        }
 
@@ -430,8 +395,10 @@ static void cpufreq_interactive_timer(unsigned long data)
 
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_L,
-                                          &index))
+                                          &index)) {
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm;
+       }
 
        new_freq = pcpu->freq_table[index].frequency;
 
@@ -445,6 +412,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                        trace_cpufreq_interactive_notyet(
                                data, cpu_load, pcpu->target_freq,
                                pcpu->policy->cur, new_freq);
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                        goto rearm;
                }
        }
@@ -466,6 +434,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                trace_cpufreq_interactive_already(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
                goto rearm_if_notmax;
        }
 
@@ -473,6 +442,7 @@ static void cpufreq_interactive_timer(unsigned long data)
                                         pcpu->policy->cur, new_freq);
 
        pcpu->target_freq = new_freq;
+       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
        spin_lock_irqsave(&speedchange_cpumask_lock, flags);
        cpumask_set_cpu(data, &speedchange_cpumask);
        spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
@@ -616,16 +586,17 @@ static void cpufreq_interactive_boost(void)
 {
        int i;
        int anyboost = 0;
-       unsigned long flags;
+       unsigned long flags[2];
        struct cpufreq_interactive_cpuinfo *pcpu;
        struct cpufreq_interactive_tunables *tunables;
 
-       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
        for_each_online_cpu(i) {
                pcpu = &per_cpu(cpuinfo, i);
                tunables = pcpu->policy->governor_data;
 
+               spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
                if (pcpu->target_freq < tunables->hispeed_freq) {
                        pcpu->target_freq = tunables->hispeed_freq;
                        cpumask_set_cpu(i, &speedchange_cpumask);
@@ -641,9 +612,10 @@ static void cpufreq_interactive_boost(void)
 
                pcpu->floor_freq = tunables->hispeed_freq;
                pcpu->floor_validate_time = ktime_to_us(ktime_get());
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
        }
 
-       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
        if (anyboost)
                wake_up_process(speedchange_task);
@@ -937,6 +909,7 @@ static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
                trace_cpufreq_interactive_boost("on");
                cpufreq_interactive_boost();
        } else {
+               tunables->boostpulse_endtime = ktime_to_us(ktime_get());
                trace_cpufreq_interactive_unboost("off");
        }
 
@@ -1154,6 +1127,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
        struct cpufreq_interactive_cpuinfo *pcpu;
        struct cpufreq_frequency_table *freq_table;
        struct cpufreq_interactive_tunables *tunables;
+       unsigned long flags;
 
        if (have_governor_per_policy())
                tunables = policy->governor_data;
@@ -1178,13 +1152,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        return -ENOMEM;
                }
 
-               rc = sysfs_create_group(get_governor_parent_kobj(policy),
-                               get_sysfs_attr());
-               if (rc) {
-                       kfree(tunables);
-                       return rc;
-               }
-
                tunables->usage_count = 1;
                tunables->above_hispeed_delay = default_above_hispeed_delay;
                tunables->nabove_hispeed_delay =
@@ -1200,16 +1167,26 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                spin_lock_init(&tunables->target_loads_lock);
                spin_lock_init(&tunables->above_hispeed_delay_lock);
 
+               policy->governor_data = tunables;
+               if (!have_governor_per_policy())
+                       common_tunables = tunables;
+
+               rc = sysfs_create_group(get_governor_parent_kobj(policy),
+                               get_sysfs_attr());
+               if (rc) {
+                       kfree(tunables);
+                       policy->governor_data = NULL;
+                       if (!have_governor_per_policy())
+                               common_tunables = NULL;
+                       return rc;
+               }
+
                if (!policy->governor->initialized) {
                        idle_notifier_register(&cpufreq_interactive_idle_nb);
                        cpufreq_register_notifier(&cpufreq_notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
                }
 
-               policy->governor_data = tunables;
-               if (!have_governor_per_policy())
-                       common_tunables = tunables;
-
                break;
 
        case CPUFREQ_GOV_POLICY_EXIT:
@@ -1246,6 +1223,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                                ktime_to_us(ktime_get());
                        pcpu->hispeed_validate_time =
                                pcpu->floor_validate_time;
+                       pcpu->max_freq = policy->max;
                        down_write(&pcpu->enable_sem);
                        del_timer_sync(&pcpu->cpu_timer);
                        del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1281,29 +1259,37 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                for_each_cpu(j, policy->cpus) {
                        pcpu = &per_cpu(cpuinfo, j);
 
-                       /* hold write semaphore to avoid race */
-                       down_write(&pcpu->enable_sem);
+                       down_read(&pcpu->enable_sem);
                        if (pcpu->governor_enabled == 0) {
-                               up_write(&pcpu->enable_sem);
+                               up_read(&pcpu->enable_sem);
                                continue;
                        }
 
-                       /* update target_freq firstly */
+                       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
                        if (policy->max < pcpu->target_freq)
                                pcpu->target_freq = policy->max;
                        else if (policy->min > pcpu->target_freq)
                                pcpu->target_freq = policy->min;
 
-                       /* Reschedule timer.
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       up_read(&pcpu->enable_sem);
+
+                       /* Reschedule timer only if policy->max is raised.
                         * Delete the timers, else the timer callback may
                         * return without re-arm the timer when failed
                         * acquire the semaphore. This race may cause timer
                         * stopped unexpectedly.
                         */
-                       del_timer_sync(&pcpu->cpu_timer);
-                       del_timer_sync(&pcpu->cpu_slack_timer);
-                       cpufreq_interactive_timer_start(tunables, j);
-                       up_write(&pcpu->enable_sem);
+
+                       if (policy->max > pcpu->max_freq) {
+                               down_write(&pcpu->enable_sem);
+                               del_timer_sync(&pcpu->cpu_timer);
+                               del_timer_sync(&pcpu->cpu_slack_timer);
+                               cpufreq_interactive_timer_start(tunables, j);
+                               up_write(&pcpu->enable_sem);
+                       }
+
+                       pcpu->max_freq = policy->max;
                }
                break;
        }
@@ -1339,6 +1325,7 @@ static int __init cpufreq_interactive_init(void)
                init_timer(&pcpu->cpu_slack_timer);
                pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
                spin_lock_init(&pcpu->load_lock);
+               spin_lock_init(&pcpu->target_freq_lock);
                init_rwsem(&pcpu->enable_sem);
        }