ALSA: sound/pci/azt3328.h: no variables for enums
[firefly-linux-kernel-4.4.55.git] / kernel / sched_rt.c
index f721b52acd8d1bd1c4e547bc1f049983ff61920d..47ceac9e8552f309930c9f9af4d4131fd18ca8f3 100644 (file)
@@ -161,7 +161,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
        return &rt_rq->tg->rt_bandwidth;
 }
 
-#else
+#else /* !CONFIG_RT_GROUP_SCHED */
 
 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
@@ -226,48 +226,10 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
        return &def_rt_bandwidth;
 }
 
-#endif
-
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
-{
-       int i, idle = 1;
-       cpumask_t span;
-
-       if (rt_b->rt_runtime == RUNTIME_INF)
-               return 1;
-
-       span = sched_rt_period_mask();
-       for_each_cpu_mask(i, span) {
-               int enqueue = 0;
-               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
-               struct rq *rq = rq_of_rt_rq(rt_rq);
-
-               spin_lock(&rq->lock);
-               if (rt_rq->rt_time) {
-                       u64 runtime;
-
-                       spin_lock(&rt_rq->rt_runtime_lock);
-                       runtime = rt_rq->rt_runtime;
-                       rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
-                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
-                               rt_rq->rt_throttled = 0;
-                               enqueue = 1;
-                       }
-                       if (rt_rq->rt_time || rt_rq->rt_nr_running)
-                               idle = 0;
-                       spin_unlock(&rt_rq->rt_runtime_lock);
-               }
-
-               if (enqueue)
-                       sched_rt_rq_enqueue(rt_rq);
-               spin_unlock(&rq->lock);
-       }
-
-       return idle;
-}
+#endif /* CONFIG_RT_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
-static int balance_runtime(struct rt_rq *rt_rq)
+static int do_balance_runtime(struct rt_rq *rt_rq)
 {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
        struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -374,7 +336,6 @@ static void disable_runtime(struct rq *rq)
 
 static void __enable_runtime(struct rq *rq)
 {
-       struct root_domain *rd = rq->rd;
        struct rt_rq *rt_rq;
 
        if (unlikely(!scheduler_running))
@@ -401,7 +362,65 @@ static void enable_runtime(struct rq *rq)
        spin_unlock_irqrestore(&rq->lock, flags);
 }
 
-#endif
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+       int more = 0;
+
+       if (rt_rq->rt_time > rt_rq->rt_runtime) {
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               more = do_balance_runtime(rt_rq);
+               spin_lock(&rt_rq->rt_runtime_lock);
+       }
+
+       return more;
+}
+#else /* !CONFIG_SMP */
+static inline int balance_runtime(struct rt_rq *rt_rq)
+{
+       return 0;
+}
+#endif /* CONFIG_SMP */
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+       int i, idle = 1;
+       cpumask_t span;
+
+       if (rt_b->rt_runtime == RUNTIME_INF)
+               return 1;
+
+       span = sched_rt_period_mask();
+       for_each_cpu_mask(i, span) {
+               int enqueue = 0;
+               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               spin_lock(&rq->lock);
+               if (rt_rq->rt_time) {
+                       u64 runtime;
+
+                       spin_lock(&rt_rq->rt_runtime_lock);
+                       if (rt_rq->rt_throttled)
+                               balance_runtime(rt_rq);
+                       runtime = rt_rq->rt_runtime;
+                       rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+                               rt_rq->rt_throttled = 0;
+                               enqueue = 1;
+                       }
+                       if (rt_rq->rt_time || rt_rq->rt_nr_running)
+                               idle = 0;
+                       spin_unlock(&rt_rq->rt_runtime_lock);
+               } else if (rt_rq->rt_nr_running)
+                       idle = 0;
+
+               if (enqueue)
+                       sched_rt_rq_enqueue(rt_rq);
+               spin_unlock(&rq->lock);
+       }
+
+       return idle;
+}
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 {
@@ -428,17 +447,10 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
        if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
                return 0;
 
-#ifdef CONFIG_SMP
-       if (rt_rq->rt_time > runtime) {
-               spin_unlock(&rt_rq->rt_runtime_lock);
-               balance_runtime(rt_rq);
-               spin_lock(&rt_rq->rt_runtime_lock);
-
-               runtime = sched_rt_runtime(rt_rq);
-               if (runtime == RUNTIME_INF)
-                       return 0;
-       }
-#endif
+       balance_runtime(rt_rq);
+       runtime = sched_rt_runtime(rt_rq);
+       if (runtime == RUNTIME_INF)
+               return 0;
 
        if (rt_rq->rt_time > runtime) {
                rt_rq->rt_throttled = 1;
@@ -571,14 +583,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 #endif
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        struct rt_prio_array *array = &rt_rq->active;
        struct rt_rq *group_rq = group_rt_rq(rt_se);
        struct list_head *queue = array->queue + rt_se_prio(rt_se);
 
-       if (group_rq && rt_rq_throttled(group_rq))
+       /*
+        * Don't enqueue the group if its throttled, or when empty.
+        * The latter is a consequence of the former when a child group
+        * get throttled and the current group doesn't have any other
+        * active members.
+        */
+       if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
 
        if (rt_se->nr_cpus_allowed == 1)
@@ -591,7 +609,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
        inc_rt_tasks(rt_se, rt_rq);
 }
 
-static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        struct rt_prio_array *array = &rt_rq->active;
@@ -607,11 +625,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
  * Because the prio of an upper entry depends on the lower
  * entries, we must remove entries top - down.
  */
-static void dequeue_rt_stack(struct task_struct *p)
+static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
 {
-       struct sched_rt_entity *rt_se, *back = NULL;
+       struct sched_rt_entity *back = NULL;
 
-       rt_se = &p->rt;
        for_each_sched_rt_entity(rt_se) {
                rt_se->back = back;
                back = rt_se;
@@ -619,7 +636,26 @@ static void dequeue_rt_stack(struct task_struct *p)
 
        for (rt_se = back; rt_se; rt_se = rt_se->back) {
                if (on_rt_rq(rt_se))
-                       dequeue_rt_entity(rt_se);
+                       __dequeue_rt_entity(rt_se);
+       }
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+{
+       dequeue_rt_stack(rt_se);
+       for_each_sched_rt_entity(rt_se)
+               __enqueue_rt_entity(rt_se);
+}
+
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+       dequeue_rt_stack(rt_se);
+
+       for_each_sched_rt_entity(rt_se) {
+               struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+               if (rt_rq && rt_rq->rt_nr_running)
+                       __enqueue_rt_entity(rt_se);
        }
 }
 
@@ -633,32 +669,19 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
        if (wakeup)
                rt_se->timeout = 0;
 
-       dequeue_rt_stack(p);
+       enqueue_rt_entity(rt_se);
 
-       /*
-        * enqueue everybody, bottom - up.
-        */
-       for_each_sched_rt_entity(rt_se)
-               enqueue_rt_entity(rt_se);
+       inc_cpu_load(rq, p->se.load.weight);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
 {
        struct sched_rt_entity *rt_se = &p->rt;
-       struct rt_rq *rt_rq;
 
        update_curr_rt(rq);
+       dequeue_rt_entity(rt_se);
 
-       dequeue_rt_stack(p);
-
-       /*
-        * re-enqueue all non-empty rt_rq entities.
-        */
-       for_each_sched_rt_entity(rt_se) {
-               rt_rq = group_rt_rq(rt_se);
-               if (rt_rq && rt_rq->rt_nr_running)
-                       enqueue_rt_entity(rt_se);
-       }
+       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
@@ -670,8 +693,11 @@ void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 {
        struct rt_prio_array *array = &rt_rq->active;
 
-       list_del_init(&rt_se->run_list);
-       list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+       if (on_rt_rq(rt_se)) {
+               list_del_init(&rt_se->run_list);
+               list_add_tail(&rt_se->run_list,
+                             array->queue + rt_se_prio(rt_se));
+       }
 }
 
 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
@@ -1433,3 +1459,17 @@ static const struct sched_class rt_sched_class = {
        .prio_changed           = prio_changed_rt,
        .switched_to            = switched_to_rt,
 };
+
+#ifdef CONFIG_SCHED_DEBUG
+extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
+
+static void print_rt_stats(struct seq_file *m, int cpu)
+{
+       struct rt_rq *rt_rq;
+
+       rcu_read_lock();
+       for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
+               print_rt_rq(m, cpu, rt_rq);
+       rcu_read_unlock();
+}
+#endif /* CONFIG_SCHED_DEBUG */