sched/cputime: Fix clock_nanosleep()/clock_gettime() inconsistency
[firefly-linux-kernel-4.4.55.git] / kernel / sched / core.c
index 44999505e1bf6894bdcdb48531a49e5a07b88ac5..24beb9bb4c3e228ac17e8b931f37c44091987d18 100644 (file)
@@ -2474,44 +2474,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
-/*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-{
-       u64 ns = 0;
-
-       /*
-        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-        * project cycles that may never be accounted to this
-        * thread, breaking clock_gettime().
-        */
-       if (task_current(rq, p) && task_on_rq_queued(p)) {
-               update_rq_clock(rq);
-               ns = rq_clock_task(rq) - p->se.exec_start;
-               if ((s64)ns < 0)
-                       ns = 0;
-       }
-
-       return ns;
-}
-
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 {
        unsigned long flags;
        struct rq *rq;
-       u64 ns = 0;
+       u64 ns;
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
        /*
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 #endif
 
        rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
+               update_rq_clock(rq);
+               p->sched_class->update_curr(rq);
+       }
+       ns = p->se.sum_exec_runtime;
        task_rq_unlock(rq, p, &flags);
 
        return ns;
@@ -2951,6 +2922,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
 }
 NOKPROBE_SYMBOL(preempt_schedule);
 EXPORT_SYMBOL(preempt_schedule);
+
+#ifdef CONFIG_CONTEXT_TRACKING
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
+{
+       enum ctx_state prev_ctx;
+
+       if (likely(!preemptible()))
+               return;
+
+       do {
+               __preempt_count_add(PREEMPT_ACTIVE);
+               /*
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
+                */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
+
+               __preempt_count_sub(PREEMPT_ACTIVE);
+               barrier();
+       } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #endif /* CONFIG_PREEMPT */
 
 /*
@@ -6327,6 +6339,10 @@ static void sched_init_numa(void)
                if (!sched_debug())
                        break;
        }
+
+       if (!level)
+               return;
+
        /*
         * 'level' contains the number of unique distances, excluding the
         * identity distance node_distance(i,i).
@@ -7403,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                put_prev_task(rq, tsk);
 
-       tg = container_of(task_css_check(tsk, cpu_cgrp_id,
-                               lockdep_is_held(&tsk->sighand->siglock)),
+       /*
+        * All callers are synchronized by task_rq_lock(); we do not use RCU
+        * which is pointless here. Thus, we pass "true" to task_css_check()
+        * to prevent lockdep warnings.
+        */
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
@@ -7833,6 +7853,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
        sched_offline_group(tg);
 }
 
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+       sched_move_task(task);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
@@ -8205,6 +8230,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,