DEBUG: sched: add tracepoint for RD overutilized
authorPatrick Bellasi <patrick.bellasi@arm.com>
Wed, 10 Feb 2016 09:24:36 +0000 (09:24 +0000)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 14 Sep 2016 09:32:22 +0000 (15:02 +0530)
Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index fa1b3df836bc8c704253dd148b56fa93cabb2652..c50310a7fd6d7b096762329dbdb31cb1edb51b5d 100644 (file)
@@ -937,6 +937,26 @@ TRACE_EVENT(sched_tune_filter,
                __entry->payoff, __entry->region)
 );
 
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+       TP_PROTO(bool overutilized),
+
+       TP_ARGS(overutilized),
+
+       TP_STRUCT__entry(
+               __field( bool,  overutilized    )
+       ),
+
+       TP_fast_assign(
+               __entry->overutilized   = overutilized;
+       ),
+
+       TP_printk("overutilized=%d",
+               __entry->overutilized ? 1 : 0)
+);
 #ifdef CONFIG_SCHED_WALT
 struct rq;
 
index 781e7676df8955eb9dc9070645d10b7c0feb9a57..9139e153671a499495983331ba929d4e052534f5 100644 (file)
@@ -4258,8 +4258,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        if (!se) {
                walt_inc_cumulative_runnable_avg(rq, p);
                if (!task_new && !rq->rd->overutilized &&
-                   cpu_overutilized(rq->cpu))
+                   cpu_overutilized(rq->cpu)) {
                        rq->rd->overutilized = true;
+                       trace_sched_overutilized(true);
+               }
 
                /*
                 * We want to potentially trigger a freq switch
@@ -7524,12 +7526,17 @@ next_group:
                        env->dst_rq->rd->overload = overload;
 
                /* Update over-utilization (tipping point, U >= 0) indicator */
-               if (env->dst_rq->rd->overutilized != overutilized)
+               if (env->dst_rq->rd->overutilized != overutilized) {
                        env->dst_rq->rd->overutilized = overutilized;
+                       trace_sched_overutilized(overutilized);
+               }
        } else {
-               if (!env->dst_rq->rd->overutilized && overutilized)
+               if (!env->dst_rq->rd->overutilized && overutilized) {
                        env->dst_rq->rd->overutilized = true;
+                       trace_sched_overutilized(true);
+               }
        }
+
 }
 
 /**
@@ -8969,8 +8976,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
                task_tick_numa(rq, curr);
 
 #ifdef CONFIG_SMP
-       if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
+       if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
                rq->rd->overutilized = true;
+               trace_sched_overutilized(true);
+       }
 
        rq->misfit_task = !task_fits_max(curr, rq->cpu);
 #endif