Merge branch 'pm-cpufreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 7 Nov 2015 00:30:49 +0000 (01:30 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 7 Nov 2015 00:30:49 +0000 (01:30 +0100)
* pm-cpufreq:
  cpufreq: s5pv210-cpufreq: fix wrong do_div() usage
  MAINTAINERS: update for intel P-state driver
  cpufreq: governor: Quit work-handlers early if governor is stopped
  intel_pstate: decrease number of "HWP enabled" messages
  cpufreq: arm_big_little: fix frequency check when bL switcher is active

MAINTAINERS
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/s5pv210-cpufreq.c

index caddb49b207dcc278557d6df04f3fccf08c0a6c6..08a1378c70eddbab380cac98bce4588f13ae07e2 100644 (file)
@@ -5440,7 +5440,8 @@ S:        Supported
 F:     drivers/idle/intel_idle.c
 
 INTEL PSTATE DRIVER
-M:     Kristen Carlson Accardi <kristen@linux.intel.com>
+M:     Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+M:     Len Brown <lenb@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/cpufreq/intel_pstate.c
index f1e42f8ce0fcc75a5e67ae4e5365d098d83cfc32..c5d256caa664a63731e0cb7db6f5b00e31c750d8 100644 (file)
@@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
                        __func__, cpu, old_cluster, new_cluster, new_rate);
 
        ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+       if (!ret) {
+               /*
+                * FIXME: clk_set_rate hasn't returned an error here however it
+                * may be that clk_change_rate failed due to hardware or
+                * firmware issues and wasn't able to report that due to the
+                * current design of the clk core layer. To work around this
+                * problem we will read back the clock rate and check it is
+                * correct. This needs to be removed once clk core is fixed.
+                */
+               if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+                       ret = -EIO;
+       }
+
        if (WARN_ON(ret)) {
                pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
                                new_cluster);
@@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
                mutex_unlock(&cluster_lock[old_cluster]);
        }
 
-       /*
-        * FIXME: clk_set_rate has to handle the case where clk_change_rate
-        * can fail due to hardware or firmware issues. Until the clk core
-        * layer is fixed, we can check here. In most of the cases we will
-        * be reading only the cached value anyway. This needs to  be removed
-        * once clk core is fixed.
-        */
-       if (bL_cpufreq_get_rate(cpu) != new_rate)
-               return -EIO;
        return 0;
 }
 
index 11258c4c1b175be37827c201a8d7a422a5d3eda3..b260576ddb129c9bf485a12a954d0b1db30e08c5 100644 (file)
@@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
 {
        int i;
 
-       mutex_lock(&cpufreq_governor_lock);
-       if (!policy->governor_enabled)
-               goto out_unlock;
-
        if (!all_cpus) {
                /*
                 * Use raw_smp_processor_id() to avoid preemptible warnings.
@@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
                for_each_cpu(i, policy->cpus)
                        __gov_queue_work(i, dbs_data, delay);
        }
-
-out_unlock:
-       mutex_unlock(&cpufreq_governor_lock);
 }
 EXPORT_SYMBOL_GPL(gov_queue_work);
 
@@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work)
        struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
                                                 dwork.work);
        struct cpu_common_dbs_info *shared = cdbs->shared;
-       struct cpufreq_policy *policy = shared->policy;
-       struct dbs_data *dbs_data = policy->governor_data;
+       struct cpufreq_policy *policy;
+       struct dbs_data *dbs_data;
        unsigned int sampling_rate, delay;
        bool modify_all = true;
 
        mutex_lock(&shared->timer_mutex);
 
+       policy = shared->policy;
+
+       /*
+        * Governor might already be disabled and there is no point continuing
+        * with the work-handler.
+        */
+       if (!policy)
+               goto unlock;
+
+       dbs_data = policy->governor_data;
+
        if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
                struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
@@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work)
        delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
        gov_queue_work(dbs_data, policy, delay, modify_all);
 
+unlock:
        mutex_unlock(&shared->timer_mutex);
 }
 
@@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
        if (!shared || !shared->policy)
                return -EBUSY;
 
+       /*
+        * Work-handler must see this updated, as it should not proceed any
+        * further after governor is disabled. And so timer_mutex is taken while
+        * updating this value.
+        */
+       mutex_lock(&shared->timer_mutex);
+       shared->policy = NULL;
+       mutex_unlock(&shared->timer_mutex);
+
        gov_cancel_work(dbs_data, policy);
 
-       shared->policy = NULL;
        mutex_destroy(&shared->timer_mutex);
        return 0;
 }
index 93a3c635ea2741fa4296c013b564ec6a7c27b827..2e31d097def6b884262295e0d46d0faa6418bd81 100644 (file)
@@ -684,8 +684,6 @@ static void __init intel_pstate_sysfs_expose_params(void)
 
 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
-       pr_info("intel_pstate: HWP enabled\n");
-
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
@@ -1557,8 +1555,10 @@ static int __init intel_pstate_init(void)
        if (!all_cpu_data)
                return -ENOMEM;
 
-       if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
+       if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+               pr_info("intel_pstate: HWP enabled\n");
                hwp_active++;
+       }
 
        if (!hwp_active && hwp_only)
                goto out;
@@ -1593,8 +1593,10 @@ static int __init intel_pstate_setup(char *str)
 
        if (!strcmp(str, "disable"))
                no_load = 1;
-       if (!strcmp(str, "no_hwp"))
+       if (!strcmp(str, "no_hwp")) {
+               pr_info("intel_pstate: HWP disabled\n");
                no_hwp = 1;
+       }
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
index 9e231f52150c404ebd92e6d74ea6a24b5642576a..051a8a8224cd7ade8b846e62fc27d0733ba8f73b 100644 (file)
@@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
        /* Find current DRAM frequency */
        tmp = s5pv210_dram_conf[ch].freq;
 
-       do_div(tmp, freq);
+       tmp /= freq;
 
        tmp1 = s5pv210_dram_conf[ch].refresh;
 
-       do_div(tmp1, tmp);
+       tmp1 /= tmp;
 
        __raw_writel(tmp1, reg);
 }