sched: Fix domain iteration
[firefly-linux-kernel-4.4.55.git] / kernel / sched / fair.c
index 940e6d17cf96a333fd7ea0c4543e2578effd4638..54cbaa4e7b37c571463017ddbc663c01f2604d59 100644 (file)
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
        if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
 unsigned long scale_rt_power(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, available;
+       u64 total, available, age_stamp, avg;
 
-       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       /*
+        * Since we're reading these variables without serialization make sure
+        * we read them once before doing sanity checks on them.
+        */
+       age_stamp = ACCESS_ONCE(rq->age_stamp);
+       avg = ACCESS_ONCE(rq->rt_avg);
+
+       total = sched_avg_period() + (rq->clock - age_stamp);
 
-       if (unlikely(total < rq->rt_avg)) {
+       if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
                available = 0;
        } else {
-               available = total - rq->rt_avg;
+               available = total - avg;
        }
 
        if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,11 +3581,26 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
        power = 0;
 
-       group = child->groups;
-       do {
-               power += group->sgp->power;
-               group = group->next;
-       } while (group != child->groups);
+       if (child->flags & SD_OVERLAP) {
+               /*
+                * SD_OVERLAP domains cannot assume that child groups
+                * span the current group.
+                */
+
+               for_each_cpu(cpu, sched_group_cpus(sdg))
+                       power += power_of(cpu);
+       } else  {
+               /*
+                * !SD_OVERLAP domains can assume that child groups
+                * span the current group.
+                */ 
+
+               group = child->groups;
+               do {
+                       power += group->sgp->power;
+                       group = group->next;
+               } while (group != child->groups);
+       }
 
        sdg->sgp->power = power;
 }
@@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        int i;
 
        if (local_group)
-               balance_cpu = group_first_cpu(group);
+               balance_cpu = group_balance_cpu(group);
 
        /* Tally up the load of all CPUs in the group */
        max_cpu_load = 0;
@@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                /* Bias balancing toward cpus of our domain */
                if (local_group) {
-                       if (idle_cpu(i) && !first_idle_cpu) {
+                       if (idle_cpu(i) && !first_idle_cpu &&
+                                       cpumask_test_cpu(i, sched_group_mask(group))) {
                                first_idle_cpu = 1;
                                balance_cpu = i;
                        }