sched: hmp: Fix build breakage when not using CONFIG_SCHED_HMP
authorChris Redpath <Chris.Redpath@arm.com>
Fri, 22 Nov 2013 13:19:18 +0000 (13:19 +0000)
committerJon Medhurst <tixy@linaro.org>
Fri, 22 Nov 2013 14:15:38 +0000 (14:15 +0000)
hmp_variable_scale_convert was used without guards in
__update_entity_runnable_avg. Guard it.

Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
Signed-off-by: Jon Medhurst <tixy@linaro.org>
kernel/sched/fair.c

index c7d808ee0a367072f39d846d578c37557b4c0fba..8a4a02740f0a9cb4a0f8c54c74673bfbf001b7f9 100644 (file)
@@ -1210,6 +1210,7 @@ static u32 __compute_runnable_contrib(u64 n)
        return contrib + runnable_avg_yN_sum[n];
 }
 
+#ifdef CONFIG_SCHED_HMP
 #define HMP_VARIABLE_SCALE_SHIFT 16ULL
 struct hmp_global_attr {
        struct attribute attr;
@@ -1291,6 +1292,7 @@ struct cpufreq_extents {
 
 static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
 #endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_SCHED_HMP */
 
 /* We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -1336,8 +1338,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
 #endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
 
        delta = now - sa->last_runnable_update;
-
+#ifdef CONFIG_SCHED_HMP
        delta = hmp_variable_scale_convert(delta);
+#endif
        /*
         * This should only happen when time goes backwards, which it
         * unfortunately does during sched clock init when we swap over to TSC.