cpuquiet: Updated balanced governor to use the runnable threads patch
authorSai Charan Gurrappadi <sgurrappadi@nvidia.com>
Tue, 19 Jun 2012 01:22:33 +0000 (18:22 -0700)
committerHuang, Tao <huangtao@rock-chips.com>
Mon, 18 May 2015 08:07:06 +0000 (16:07 +0800)
The balanced governor now looks at the average number of runnable
threads when bringing cores online and offline. The balanced governor
parameters have also been updated to reflect a similar patch for
autohotplug.

Change-Id: I8dac26659ba43d95a68830c6cc268591a7f03f80
Signed-off-by: Sai Charan Gurrappadi <sgurrappadi@nvidia.com>
Reviewed-on: http://git-master/r/111282
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Peter De Schrijver <pdeschrijver@nvidia.com>
Reviewed-by: Peter Boonstoppel <pboonstoppel@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Aleksandr Frid <afrid@nvidia.com>
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
Rebase-Id: Raeb86f51e0e21475ee9b96f97ce95f00883dac11

drivers/cpuquiet/governors/balanced.c

index 270bbf5d1f2897177a1f3d793e7df140f47aa255..968344e220d909a281cff554812ba8ed8994458c 100644 (file)
@@ -72,12 +72,13 @@ struct balanced_attribute {
 }
 
 /* configurable parameters */
-static unsigned long balance_level = 75;
+static unsigned long balance_level = 60;
 static unsigned long idle_bottom_freq;
 static unsigned long idle_top_freq;
 static unsigned long up_delay;
 static unsigned long down_delay;
-
+static unsigned long last_change_time;
+static unsigned long load_sample_rate = 20; // msec
 static struct workqueue_struct *balanced_wq;
 static struct delayed_work balanced_work;
 static BALANCED_STATE balanced_state;
@@ -106,7 +107,7 @@ static void calculate_load_timer(unsigned long data)
                do_div(idle_time, elapsed_time);
                *load = 100 - idle_time;
        }
-       mod_timer(&load_timer, jiffies + msecs_to_jiffies(100));
+       mod_timer(&load_timer, jiffies + msecs_to_jiffies(load_sample_rate));
 }
 
 static void start_load_timer(void)
@@ -183,6 +184,14 @@ static unsigned int count_slow_cpus(unsigned int limit)
        return cnt;
 }
 
+#define NR_FSHIFT      2
+static unsigned int nr_run_thresholds[] = {
+/*      1,  2,  3,  4 - on-line cpus target */
+       5,  9, 10, UINT_MAX /* avg run threads * 4 (e.g., 9 = 2.25 threads) */
+};
+static unsigned int nr_run_hysteresis = 2;     /* 0.5 thread */
+static unsigned int nr_run_last;
+
 static CPU_SPEED_BALANCE balanced_speed_balance(void)
 {
        unsigned long highest_speed = cpu_highest_speed();
@@ -190,14 +199,27 @@ static CPU_SPEED_BALANCE balanced_speed_balance(void)
        unsigned long skewed_speed = balanced_speed / 2;
        unsigned int nr_cpus = num_online_cpus();
        unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
+       unsigned int avg_nr_run = avg_nr_running();
+       unsigned int nr_run;
 
        /* balanced: freq targets for all CPUs are above 50% of highest speed
           biased: freq target for at least one CPU is below 50% threshold
           skewed: freq targets for at least 2 CPUs are below 25% threshold */
-       if (count_slow_cpus(skewed_speed) >= 2 || nr_cpus > max_cpus)
+       for (nr_run = 1; nr_run < ARRAY_SIZE(nr_run_thresholds); nr_run++) {
+               unsigned int nr_threshold = nr_run_thresholds[nr_run - 1];
+               if (nr_run_last <= nr_run)
+                       nr_threshold += nr_run_hysteresis;
+               if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
+                       break;
+       }
+       nr_run_last = nr_run;
+
+       if (count_slow_cpus(skewed_speed) >= 2 || nr_cpus > max_cpus ||
+               nr_run < nr_cpus)
                return CPU_SPEED_SKEWED;
 
-       if (count_slow_cpus(balanced_speed) >= 1 || nr_cpus == max_cpus)
+       if (count_slow_cpus(balanced_speed) >= 1 || nr_cpus == max_cpus ||
+               nr_run <= nr_cpus)
                return CPU_SPEED_BIASED;
 
        return CPU_SPEED_BALANCED;
@@ -207,6 +229,8 @@ static void balanced_work_func(struct work_struct *work)
 {
        bool up = false;
        unsigned int cpu = nr_cpu_ids;
+       unsigned long now = jiffies;
+
        CPU_SPEED_BALANCE balance;
 
        switch (balanced_state) {
@@ -217,7 +241,7 @@ static void balanced_work_func(struct work_struct *work)
                if (cpu < nr_cpu_ids) {
                        up = false;
                        queue_delayed_work(balanced_wq,
-                                                &balanced_work, down_delay);
+                                                &balanced_work, up_delay);
                } else
                        stop_load_timer();
                break;
@@ -250,7 +274,11 @@ static void balanced_work_func(struct work_struct *work)
                       __func__, balanced_state);
        }
 
+       if (!up && ((now - last_change_time) < down_delay))
+               cpu = nr_cpu_ids;
+
        if (cpu < nr_cpu_ids) {
+               last_change_time = now;
                if (up)
                        cpuquiet_wake_cpu(cpu);
                else
@@ -294,7 +322,7 @@ static int balanced_cpufreq_transition(struct notifier_block *nb,
                        if (cpu_freq <= idle_bottom_freq) {
                                balanced_state = DOWN;
                                queue_delayed_work(balanced_wq,
-                                       &balanced_work, down_delay);
+                                       &balanced_work, up_delay);
                                start_load_timer();
                        }
                        break;
@@ -357,6 +385,7 @@ BALANCED_ATTRIBUTE(idle_bottom_freq, 0644);
 BALANCED_ATTRIBUTE(idle_top_freq, 0644);
 BALANCED_ATTRIBUTE(up_delay, 0644);
 BALANCED_ATTRIBUTE(down_delay, 0644);
+BALANCED_ATTRIBUTE(load_sample_rate, 0644);
 
 static struct attribute *balanced_attributes[] = {
        &balance_level_attr.attr,
@@ -419,6 +448,7 @@ static int balanced_start(void)
 {
        int err, count;
        struct cpufreq_frequency_table *table;
+       struct cpufreq_freqs initial_freq;
 
        err = balanced_sysfs();
        if (err)
@@ -431,8 +461,8 @@ static int balanced_start(void)
 
        INIT_DELAYED_WORK(&balanced_work, balanced_work_func);
 
-       up_delay = msecs_to_jiffies(1000);
-       down_delay = msecs_to_jiffies(2000);
+       up_delay = msecs_to_jiffies(100);
+       down_delay = msecs_to_jiffies(500);
 
        table = cpufreq_frequency_get_table(0);
        for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
@@ -447,6 +477,11 @@ static int balanced_start(void)
        init_timer(&load_timer);
        load_timer.function = calculate_load_timer;
 
+       /*FIXME: Kick start the state machine by faking a freq notification*/
+       initial_freq.new = cpufreq_get(0);
+       if (initial_freq.new != 0)
+               balanced_cpufreq_transition(NULL, CPUFREQ_RESUMECHANGE,
+                                               &initial_freq);
        return 0;
 }