Merge branch 'linus' into sched/core
[firefly-linux-kernel-4.4.55.git] / kernel / sched_fair.c
index b4da534f4b8cb7708ca873ec3491dad7edc76759..806d1b227a21060aac100994a8992266c00b59b5 100644 (file)
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * effect of the currently running task from the load
         * of the current CPU:
         */
+       rcu_read_lock();
        if (sync) {
                tg = task_group(current);
                weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
                balanced = this_eff_load <= prev_eff_load;
        } else
                balanced = true;
+       rcu_read_unlock();
 
        /*
         * If the currently running task will sleep within
@@ -2425,14 +2427,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
         * domains. In the newly idle case, we will allow all the cpu's
         * to do the newly idle load balance.
         */
-       if (idle != CPU_NEWLY_IDLE && local_group &&
-           balance_cpu != this_cpu) {
-               *balance = 0;
-               return;
+       if (idle != CPU_NEWLY_IDLE && local_group) {
+               if (balance_cpu != this_cpu) {
+                       *balance = 0;
+                       return;
+               }
+               update_group_power(sd, this_cpu);
        }
 
-       update_group_power(sd, this_cpu);
-
        /* Adjust by relative CPU power of the group */
        sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
@@ -3596,6 +3598,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
                }
 
                raw_spin_lock_irq(&this_rq->lock);
+               update_rq_clock(this_rq);
                update_cpu_load(this_rq);
                raw_spin_unlock_irq(&this_rq->lock);