sched: Add ftrace events for entity load-tracking
authorMorten Rasmussen <Morten.Rasmussen@arm.com>
Fri, 14 Sep 2012 13:38:15 +0000 (14:38 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:25 +0000 (11:12 +0100)
Adds ftrace events for key variables related to the entity
load-tracking to help debugging scheduler behaviour. Allows tracing
of load contribution and runqueue residency ratio for both entities
and runqueues as well as entity CPU usage ratio.

Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index e5586caff67a973c962a3ed6bf43d4cc01664083..496445daa54e0a092bab6bb844c4ba6fb08f407f 100644 (file)
@@ -430,6 +430,131 @@ TRACE_EVENT(sched_pi_setprio,
                        __entry->oldprio, __entry->newprio)
 );
 
+/*
+ * Tracepoint for showing tracked load contribution.
+ */
+TRACE_EVENT(sched_task_load_contrib,
+
+       TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
+
+       TP_ARGS(tsk, load_contrib),
+
+       TP_STRUCT__entry(
+               __array(char, comm, TASK_COMM_LEN)
+               __field(pid_t, pid)
+               __field(unsigned long, load_contrib)
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid            = tsk->pid;
+               __entry->load_contrib   = load_contrib;
+       ),
+
+       TP_printk("comm=%s pid=%d load_contrib=%lu",
+                       __entry->comm, __entry->pid,
+                       __entry->load_contrib)
+);
+
+/*
+ * Tracepoint for showing tracked task runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_runnable_ratio,
+
+       TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+       TP_ARGS(tsk, ratio),
+
+       TP_STRUCT__entry(
+               __array(char, comm, TASK_COMM_LEN)
+               __field(pid_t, pid)
+               __field(unsigned long, ratio)
+       ),
+
+       TP_fast_assign(
+       memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid   = tsk->pid;
+               __entry->ratio = ratio;
+       ),
+
+       TP_printk("comm=%s pid=%d ratio=%lu",
+                       __entry->comm, __entry->pid,
+                       __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_rq_runnable_ratio,
+
+       TP_PROTO(int cpu, unsigned long ratio),
+
+       TP_ARGS(cpu, ratio),
+
+       TP_STRUCT__entry(
+               __field(int, cpu)
+               __field(unsigned long, ratio)
+       ),
+
+       TP_fast_assign(
+               __entry->cpu   = cpu;
+               __entry->ratio = ratio;
+       ),
+
+       TP_printk("cpu=%d ratio=%lu",
+                       __entry->cpu,
+                       __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable load.
+ */
+TRACE_EVENT(sched_rq_runnable_load,
+
+       TP_PROTO(int cpu, u64 load),
+
+       TP_ARGS(cpu, load),
+
+       TP_STRUCT__entry(
+               __field(int, cpu)
+               __field(u64, load)
+       ),
+
+       TP_fast_assign(
+               __entry->cpu  = cpu;
+               __entry->load = load;
+       ),
+
+       TP_printk("cpu=%d load=%llu",
+                       __entry->cpu,
+                       __entry->load)
+);
+
+/*
+ * Tracepoint for showing tracked task cpu usage ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_usage_ratio,
+
+       TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+       TP_ARGS(tsk, ratio),
+
+       TP_STRUCT__entry(
+               __array(char, comm, TASK_COMM_LEN)
+               __field(pid_t, pid)
+               __field(unsigned long, ratio)
+       ),
+
+       TP_fast_assign(
+       memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid   = tsk->pid;
+               __entry->ratio = ratio;
+       ),
+
+       TP_printk("comm=%s pid=%d ratio=%lu",
+                       __entry->comm, __entry->pid,
+                       __entry->ratio)
+);
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index a6b451d9cd3633c223c671e2919449a82f0d4037..928268df35427bbedad2322b6c09adf3a3b11f24 100644 (file)
@@ -1428,9 +1428,11 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
        contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
        contrib /= (se->avg.runnable_avg_period + 1);
        se->avg.load_avg_contrib = scale_load(contrib);
+       trace_sched_task_load_contrib(task_of(se), se->avg.load_avg_contrib);
        contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD);
        contrib /= (se->avg.runnable_avg_period + 1);
        se->avg.load_avg_ratio = scale_load(contrib);
+       trace_sched_task_runnable_ratio(task_of(se), se->avg.load_avg_ratio);
 }
 
 /* Compute the current contribution to load_avg by se, return any delta */
@@ -1521,9 +1523,14 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
 
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
 {
+       u32 contrib;
        __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable,
                                     runnable);
        __update_tg_runnable_avg(&rq->avg, &rq->cfs);
+       contrib = rq->avg.runnable_avg_sum * scale_load_down(1024);
+       contrib /= (rq->avg.runnable_avg_period + 1);
+       trace_sched_rq_runnable_ratio(cpu_of(rq), scale_load(contrib));
+       trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg);
 }
 
 /* Add the load generated by se into cfs_rq's child load-average */