xen/granttable: Support transitive grants
[firefly-linux-kernel-4.4.55.git] / kernel / sched_fair.c
index 1ca2cd44d64afed367d643251d9539f5a28b3e0b..5c9e67923b7cfd7826903c17322c3f0c55de5d74 100644 (file)
@@ -1106,6 +1106,8 @@ static void
 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
        unsigned long ideal_runtime, delta_exec;
+       struct sched_entity *se;
+       s64 delta;
 
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
@@ -1127,16 +1129,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        if (delta_exec < sysctl_sched_min_granularity)
                return;
 
-       if (cfs_rq->nr_running > 1) {
-               struct sched_entity *se = __pick_first_entity(cfs_rq);
-               s64 delta = curr->vruntime - se->vruntime;
+       se = __pick_first_entity(cfs_rq);
+       delta = curr->vruntime - se->vruntime;
 
-               if (delta < 0)
-                       return;
+       if (delta < 0)
+               return;
 
-               if (delta > ideal_runtime)
-                       resched_task(rq_of(cfs_rq)->curr);
-       }
+       if (delta > ideal_runtime)
+               resched_task(rq_of(cfs_rq)->curr);
 }
 
 static void
@@ -2183,7 +2183,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                /* Skip over this group if it has no CPUs allowed */
                if (!cpumask_intersects(sched_group_cpus(group),
-                                       &p->cpus_allowed))
+                                       tsk_cpus_allowed(p)))
                        continue;
 
                local_group = cpumask_test_cpu(this_cpu,
@@ -2229,7 +2229,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
        int i;
 
        /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
+       for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
                load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2273,7 +2273,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
 
-               for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
                        if (idle_cpu(i)) {
                                target = i;
                                break;
@@ -2316,7 +2316,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int sync = wake_flags & WF_SYNC;
 
        if (sd_flag & SD_BALANCE_WAKE) {
-               if (cpumask_test_cpu(cpu, &p->cpus_allowed))
+               if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                        want_affine = 1;
                new_cpu = prev_cpu;
        }
@@ -2697,7 +2697,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
         * 2) cannot be migrated to this CPU due to cpus_allowed, or
         * 3) are cache-hot on their current CPU.
         */
-       if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
+       if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
                schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
                return 0;
        }
@@ -4087,7 +4087,7 @@ redo:
                         * moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
-                                             &busiest->curr->cpus_allowed)) {
+                                       tsk_cpus_allowed(busiest->curr))) {
                                raw_spin_unlock_irqrestore(&busiest->lock,
                                                            flags);
                                all_pinned = 1;
@@ -4269,22 +4269,6 @@ out_unlock:
 }
 
 #ifdef CONFIG_NO_HZ
-
-static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
-
-static void trigger_sched_softirq(void *data)
-{
-       raise_softirq_irqoff(SCHED_SOFTIRQ);
-}
-
-static inline void init_sched_softirq_csd(struct call_single_data *csd)
-{
-       csd->func = trigger_sched_softirq;
-       csd->info = NULL;
-       csd->flags = 0;
-       csd->priv = 0;
-}
-
 /*
  * idle load balancing details
  * - One of the idle CPUs nominates itself as idle load_balancer, while
@@ -4450,11 +4434,16 @@ static void nohz_balancer_kick(int cpu)
        }
 
        if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
-               struct call_single_data *cp;
-
                cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
-               cp = &per_cpu(remote_sched_softirq_cb, cpu);
-               __smp_call_function_single(ilb_cpu, cp, 0);
+
+               smp_mb();
+               /*
+                * Use smp_send_reschedule() instead of resched_cpu().
+                * This way we generate a sched IPI on the target cpu which
+                * is idle. And the softirq performing nohz idle load balance
+                * will be run before returning from the IPI.
+                */
+               smp_send_reschedule(ilb_cpu);
        }
        return;
 }
@@ -4687,7 +4676,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        if (time_before(now, nohz.next_balance))
                return 0;
 
-       if (rq->idle_at_tick)
+       if (idle_cpu(cpu))
                return 0;
 
        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
@@ -4723,7 +4712,7 @@ static void run_rebalance_domains(struct softirq_action *h)
 {
        int this_cpu = smp_processor_id();
        struct rq *this_rq = cpu_rq(this_cpu);
-       enum cpu_idle_type idle = this_rq->idle_at_tick ?
+       enum cpu_idle_type idle = this_rq->idle_balance ?
                                                CPU_IDLE : CPU_NOT_IDLE;
 
        rebalance_domains(this_cpu, idle);