sched: Add HMP task migration ftrace event
authorMorten Rasmussen <Morten.Rasmussen@arm.com>
Fri, 14 Sep 2012 13:38:16 +0000 (14:38 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:25 +0000 (11:12 +0100)
Adds ftrace event for tracing task migrations using HMP
optimized scheduling.

Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index 496445daa54e0a092bab6bb844c4ba6fb08f407f..203e8e9933ba56dd354e4528ea02072aa225e398 100644 (file)
@@ -555,6 +555,34 @@ TRACE_EVENT(sched_task_usage_ratio,
                        __entry->comm, __entry->pid,
                        __entry->ratio)
 );
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+TRACE_EVENT(sched_hmp_migrate,
+
+       TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+       TP_ARGS(tsk, dest, force),
+
+       TP_STRUCT__entry(
+               __array(char, comm, TASK_COMM_LEN)
+               __field(pid_t, pid)
+               __field(int,  dest)
+               __field(int,  force)
+       ),
+
+       TP_fast_assign(
+       memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid   = tsk->pid;
+               __entry->dest  = dest;
+               __entry->force = force;
+       ),
+
+       TP_printk("comm=%s pid=%d dest=%d force=%d",
+                       __entry->comm, __entry->pid,
+                       __entry->dest, __entry->force)
+);
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index 928268df35427bbedad2322b6c09adf3a3b11f24..ecbeb90adcff50c507263882b75c63d322e880ad 100644 (file)
@@ -3570,10 +3570,16 @@ unlock:
        rcu_read_unlock();
 
 #ifdef CONFIG_SCHED_HMP
-       if (hmp_up_migration(prev_cpu, &p->se))
-               return hmp_select_faster_cpu(p, prev_cpu);
-       if (hmp_down_migration(prev_cpu, &p->se))
-               return hmp_select_slower_cpu(p, prev_cpu);
+       if (hmp_up_migration(prev_cpu, &p->se)) {
+               new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+               trace_sched_hmp_migrate(p, new_cpu, 0);
+               return new_cpu;
+       }
+       if (hmp_down_migration(prev_cpu, &p->se)) {
+               new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+               trace_sched_hmp_migrate(p, new_cpu, 0);
+               return new_cpu;
+       }
        /* Make sure that the task stays in its previous hmp domain */
        if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus))
                return prev_cpu;
@@ -6074,6 +6080,7 @@ static void hmp_force_up_migration(int this_cpu)
                                target->push_cpu = hmp_select_faster_cpu(p, cpu);
                                target->migrate_task = p;
                                force = 1;
+                               trace_sched_hmp_migrate(p, target->push_cpu, 1);
                        }
                }
                raw_spin_unlock_irqrestore(&target->lock, flags);