5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
15 #include "util/parse-options.h"
16 #include "util/trace-event.h"
18 #include "util/debug.h"
20 #include <sys/prctl.h>
21 #include <sys/resource.h>
23 #include <semaphore.h>
26 #include <api/fs/fs.h>
28 #define PR_SET_NAME 15 /* Set process name */
32 #define MAX_PID 1024000
41 unsigned long nr_events;
42 unsigned long curr_event;
43 struct sched_atom **atoms;
54 enum sched_event_type {
58 SCHED_EVENT_MIGRATION,
62 enum sched_event_type type;
68 struct task_desc *wakee;
71 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
81 struct list_head list;
82 enum thread_state state;
90 struct list_head work_list;
91 struct thread *thread;
100 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
104 struct trace_sched_handler {
105 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
106 struct perf_sample *sample, struct machine *machine);
108 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
109 struct perf_sample *sample, struct machine *machine);
111 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
112 struct perf_sample *sample, struct machine *machine);
114 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
115 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
116 struct machine *machine);
118 int (*migrate_task_event)(struct perf_sched *sched,
119 struct perf_evsel *evsel,
120 struct perf_sample *sample,
121 struct machine *machine);
125 struct perf_tool tool;
126 const char *sort_order;
127 unsigned long nr_tasks;
128 struct task_desc **pid_to_task;
129 struct task_desc **tasks;
130 const struct trace_sched_handler *tp_handler;
131 pthread_mutex_t start_work_mutex;
132 pthread_mutex_t work_done_wait_mutex;
135 * Track the current task - that way we can know whether there's any
136 * weird events, such as a task being switched away that is not current.
139 u32 curr_pid[MAX_CPUS];
140 struct thread *curr_thread[MAX_CPUS];
141 char next_shortname1;
142 char next_shortname2;
143 unsigned int replay_repeat;
144 unsigned long nr_run_events;
145 unsigned long nr_sleep_events;
146 unsigned long nr_wakeup_events;
147 unsigned long nr_sleep_corrections;
148 unsigned long nr_run_events_optimized;
149 unsigned long targetless_wakeups;
150 unsigned long multitarget_wakeups;
151 unsigned long nr_runs;
152 unsigned long nr_timestamps;
153 unsigned long nr_unordered_timestamps;
154 unsigned long nr_context_switch_bugs;
155 unsigned long nr_events;
156 unsigned long nr_lost_chunks;
157 unsigned long nr_lost_events;
158 u64 run_measurement_overhead;
159 u64 sleep_measurement_overhead;
162 u64 runavg_cpu_usage;
163 u64 parent_cpu_usage;
164 u64 runavg_parent_cpu_usage;
170 u64 cpu_last_switched[MAX_CPUS];
171 struct rb_root atom_root, sorted_atom_root;
172 struct list_head sort_list, cmp_pid;
175 static u64 get_nsecs(void)
179 clock_gettime(CLOCK_MONOTONIC, &ts);
181 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
184 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
186 u64 T0 = get_nsecs(), T1;
190 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
193 static void sleep_nsecs(u64 nsecs)
197 ts.tv_nsec = nsecs % 999999999;
198 ts.tv_sec = nsecs / 999999999;
200 nanosleep(&ts, NULL);
203 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
205 u64 T0, T1, delta, min_delta = 1000000000ULL;
208 for (i = 0; i < 10; i++) {
210 burn_nsecs(sched, 0);
213 min_delta = min(min_delta, delta);
215 sched->run_measurement_overhead = min_delta;
217 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
220 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
222 u64 T0, T1, delta, min_delta = 1000000000ULL;
225 for (i = 0; i < 10; i++) {
230 min_delta = min(min_delta, delta);
233 sched->sleep_measurement_overhead = min_delta;
235 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
238 static struct sched_atom *
239 get_new_event(struct task_desc *task, u64 timestamp)
241 struct sched_atom *event = zalloc(sizeof(*event));
242 unsigned long idx = task->nr_events;
245 event->timestamp = timestamp;
249 size = sizeof(struct sched_atom *) * task->nr_events;
250 task->atoms = realloc(task->atoms, size);
251 BUG_ON(!task->atoms);
253 task->atoms[idx] = event;
258 static struct sched_atom *last_event(struct task_desc *task)
260 if (!task->nr_events)
263 return task->atoms[task->nr_events - 1];
266 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
267 u64 timestamp, u64 duration)
269 struct sched_atom *event, *curr_event = last_event(task);
272 * optimize an existing RUN event by merging this one
275 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
276 sched->nr_run_events_optimized++;
277 curr_event->duration += duration;
281 event = get_new_event(task, timestamp);
283 event->type = SCHED_EVENT_RUN;
284 event->duration = duration;
286 sched->nr_run_events++;
289 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
290 u64 timestamp, struct task_desc *wakee)
292 struct sched_atom *event, *wakee_event;
294 event = get_new_event(task, timestamp);
295 event->type = SCHED_EVENT_WAKEUP;
296 event->wakee = wakee;
298 wakee_event = last_event(wakee);
299 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
300 sched->targetless_wakeups++;
303 if (wakee_event->wait_sem) {
304 sched->multitarget_wakeups++;
308 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
309 sem_init(wakee_event->wait_sem, 0, 0);
310 wakee_event->specific_wait = 1;
311 event->wait_sem = wakee_event->wait_sem;
313 sched->nr_wakeup_events++;
316 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
317 u64 timestamp, u64 task_state __maybe_unused)
319 struct sched_atom *event = get_new_event(task, timestamp);
321 event->type = SCHED_EVENT_SLEEP;
323 sched->nr_sleep_events++;
326 static struct task_desc *register_pid(struct perf_sched *sched,
327 unsigned long pid, const char *comm)
329 struct task_desc *task;
332 if (sched->pid_to_task == NULL) {
333 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
335 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
337 if (pid >= (unsigned long)pid_max) {
338 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
339 sizeof(struct task_desc *))) == NULL);
340 while (pid >= (unsigned long)pid_max)
341 sched->pid_to_task[pid_max++] = NULL;
344 task = sched->pid_to_task[pid];
349 task = zalloc(sizeof(*task));
351 task->nr = sched->nr_tasks;
352 strcpy(task->comm, comm);
354 * every task starts in sleeping state - this gets ignored
355 * if there's no wakeup pointing to this sleep state:
357 add_sched_event_sleep(sched, task, 0, 0);
359 sched->pid_to_task[pid] = task;
361 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
362 BUG_ON(!sched->tasks);
363 sched->tasks[task->nr] = task;
366 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
372 static void print_task_traces(struct perf_sched *sched)
374 struct task_desc *task;
377 for (i = 0; i < sched->nr_tasks; i++) {
378 task = sched->tasks[i];
379 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
380 task->nr, task->comm, task->pid, task->nr_events);
384 static void add_cross_task_wakeups(struct perf_sched *sched)
386 struct task_desc *task1, *task2;
389 for (i = 0; i < sched->nr_tasks; i++) {
390 task1 = sched->tasks[i];
392 if (j == sched->nr_tasks)
394 task2 = sched->tasks[j];
395 add_sched_event_wakeup(sched, task1, 0, task2);
399 static void perf_sched__process_event(struct perf_sched *sched,
400 struct sched_atom *atom)
404 switch (atom->type) {
405 case SCHED_EVENT_RUN:
406 burn_nsecs(sched, atom->duration);
408 case SCHED_EVENT_SLEEP:
410 ret = sem_wait(atom->wait_sem);
413 case SCHED_EVENT_WAKEUP:
415 ret = sem_post(atom->wait_sem);
418 case SCHED_EVENT_MIGRATION:
425 static u64 get_cpu_usage_nsec_parent(void)
431 err = getrusage(RUSAGE_SELF, &ru);
434 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
435 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
440 static int self_open_counters(void)
442 struct perf_event_attr attr;
443 char sbuf[STRERR_BUFSIZE];
446 memset(&attr, 0, sizeof(attr));
448 attr.type = PERF_TYPE_SOFTWARE;
449 attr.config = PERF_COUNT_SW_TASK_CLOCK;
451 fd = sys_perf_event_open(&attr, 0, -1, -1,
452 perf_event_open_cloexec_flag());
455 pr_err("Error: sys_perf_event_open() syscall returned "
456 "with %d (%s)\n", fd,
457 strerror_r(errno, sbuf, sizeof(sbuf)));
461 static u64 get_cpu_usage_nsec_self(int fd)
466 ret = read(fd, &runtime, sizeof(runtime));
467 BUG_ON(ret != sizeof(runtime));
472 struct sched_thread_parms {
473 struct task_desc *task;
474 struct perf_sched *sched;
478 static void *thread_func(void *ctx)
480 struct sched_thread_parms *parms = ctx;
481 struct task_desc *this_task = parms->task;
482 struct perf_sched *sched = parms->sched;
483 u64 cpu_usage_0, cpu_usage_1;
484 unsigned long i, ret;
490 sprintf(comm2, ":%s", this_task->comm);
491 prctl(PR_SET_NAME, comm2);
495 ret = sem_post(&this_task->ready_for_work);
497 ret = pthread_mutex_lock(&sched->start_work_mutex);
499 ret = pthread_mutex_unlock(&sched->start_work_mutex);
502 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
504 for (i = 0; i < this_task->nr_events; i++) {
505 this_task->curr_event = i;
506 perf_sched__process_event(sched, this_task->atoms[i]);
509 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
510 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
511 ret = sem_post(&this_task->work_done_sem);
514 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
516 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
522 static void create_tasks(struct perf_sched *sched)
524 struct task_desc *task;
529 err = pthread_attr_init(&attr);
531 err = pthread_attr_setstacksize(&attr,
532 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
534 err = pthread_mutex_lock(&sched->start_work_mutex);
536 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
538 for (i = 0; i < sched->nr_tasks; i++) {
539 struct sched_thread_parms *parms = malloc(sizeof(*parms));
540 BUG_ON(parms == NULL);
541 parms->task = task = sched->tasks[i];
542 parms->sched = sched;
543 parms->fd = self_open_counters();
544 sem_init(&task->sleep_sem, 0, 0);
545 sem_init(&task->ready_for_work, 0, 0);
546 sem_init(&task->work_done_sem, 0, 0);
547 task->curr_event = 0;
548 err = pthread_create(&task->thread, &attr, thread_func, parms);
553 static void wait_for_tasks(struct perf_sched *sched)
555 u64 cpu_usage_0, cpu_usage_1;
556 struct task_desc *task;
557 unsigned long i, ret;
559 sched->start_time = get_nsecs();
560 sched->cpu_usage = 0;
561 pthread_mutex_unlock(&sched->work_done_wait_mutex);
563 for (i = 0; i < sched->nr_tasks; i++) {
564 task = sched->tasks[i];
565 ret = sem_wait(&task->ready_for_work);
567 sem_init(&task->ready_for_work, 0, 0);
569 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
572 cpu_usage_0 = get_cpu_usage_nsec_parent();
574 pthread_mutex_unlock(&sched->start_work_mutex);
576 for (i = 0; i < sched->nr_tasks; i++) {
577 task = sched->tasks[i];
578 ret = sem_wait(&task->work_done_sem);
580 sem_init(&task->work_done_sem, 0, 0);
581 sched->cpu_usage += task->cpu_usage;
585 cpu_usage_1 = get_cpu_usage_nsec_parent();
586 if (!sched->runavg_cpu_usage)
587 sched->runavg_cpu_usage = sched->cpu_usage;
588 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
590 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
591 if (!sched->runavg_parent_cpu_usage)
592 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
593 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
594 sched->parent_cpu_usage)/10;
596 ret = pthread_mutex_lock(&sched->start_work_mutex);
599 for (i = 0; i < sched->nr_tasks; i++) {
600 task = sched->tasks[i];
601 sem_init(&task->sleep_sem, 0, 0);
602 task->curr_event = 0;
606 static void run_one_test(struct perf_sched *sched)
608 u64 T0, T1, delta, avg_delta, fluct;
611 wait_for_tasks(sched);
615 sched->sum_runtime += delta;
618 avg_delta = sched->sum_runtime / sched->nr_runs;
619 if (delta < avg_delta)
620 fluct = avg_delta - delta;
622 fluct = delta - avg_delta;
623 sched->sum_fluct += fluct;
625 sched->run_avg = delta;
626 sched->run_avg = (sched->run_avg * 9 + delta) / 10;
628 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
630 printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
632 printf("cpu: %0.2f / %0.2f",
633 (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
637 * rusage statistics done by the parent, these are less
638 * accurate than the sched->sum_exec_runtime based statistics:
640 printf(" [%0.2f / %0.2f]",
641 (double)sched->parent_cpu_usage/1e6,
642 (double)sched->runavg_parent_cpu_usage/1e6);
647 if (sched->nr_sleep_corrections)
648 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
649 sched->nr_sleep_corrections = 0;
652 static void test_calibrations(struct perf_sched *sched)
657 burn_nsecs(sched, 1e6);
660 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
666 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
670 replay_wakeup_event(struct perf_sched *sched,
671 struct perf_evsel *evsel, struct perf_sample *sample,
672 struct machine *machine __maybe_unused)
674 const char *comm = perf_evsel__strval(evsel, sample, "comm");
675 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
676 struct task_desc *waker, *wakee;
679 printf("sched_wakeup event %p\n", evsel);
681 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
684 waker = register_pid(sched, sample->tid, "<unknown>");
685 wakee = register_pid(sched, pid, comm);
687 add_sched_event_wakeup(sched, waker, sample->time, wakee);
691 static int replay_switch_event(struct perf_sched *sched,
692 struct perf_evsel *evsel,
693 struct perf_sample *sample,
694 struct machine *machine __maybe_unused)
696 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
697 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
698 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
699 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
700 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
701 struct task_desc *prev, __maybe_unused *next;
702 u64 timestamp0, timestamp = sample->time;
703 int cpu = sample->cpu;
707 printf("sched_switch event %p\n", evsel);
709 if (cpu >= MAX_CPUS || cpu < 0)
712 timestamp0 = sched->cpu_last_switched[cpu];
714 delta = timestamp - timestamp0;
719 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
723 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
724 prev_comm, prev_pid, next_comm, next_pid, delta);
726 prev = register_pid(sched, prev_pid, prev_comm);
727 next = register_pid(sched, next_pid, next_comm);
729 sched->cpu_last_switched[cpu] = timestamp;
731 add_sched_event_run(sched, prev, timestamp, delta);
732 add_sched_event_sleep(sched, prev, timestamp, prev_state);
737 static int replay_fork_event(struct perf_sched *sched,
738 union perf_event *event,
739 struct machine *machine)
741 struct thread *child, *parent;
743 child = machine__findnew_thread(machine, event->fork.pid,
745 parent = machine__findnew_thread(machine, event->fork.ppid,
748 if (child == NULL || parent == NULL) {
749 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
755 printf("fork event\n");
756 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
757 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
760 register_pid(sched, parent->tid, thread__comm_str(parent));
761 register_pid(sched, child->tid, thread__comm_str(child));
765 struct sort_dimension {
768 struct list_head list;
772 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
774 struct sort_dimension *sort;
777 BUG_ON(list_empty(list));
779 list_for_each_entry(sort, list, list) {
780 ret = sort->cmp(l, r);
788 static struct work_atoms *
789 thread_atoms_search(struct rb_root *root, struct thread *thread,
790 struct list_head *sort_list)
792 struct rb_node *node = root->rb_node;
793 struct work_atoms key = { .thread = thread };
796 struct work_atoms *atoms;
799 atoms = container_of(node, struct work_atoms, node);
801 cmp = thread_lat_cmp(sort_list, &key, atoms);
803 node = node->rb_left;
805 node = node->rb_right;
807 BUG_ON(thread != atoms->thread);
815 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
816 struct list_head *sort_list)
818 struct rb_node **new = &(root->rb_node), *parent = NULL;
821 struct work_atoms *this;
824 this = container_of(*new, struct work_atoms, node);
827 cmp = thread_lat_cmp(sort_list, data, this);
830 new = &((*new)->rb_left);
832 new = &((*new)->rb_right);
835 rb_link_node(&data->node, parent, new);
836 rb_insert_color(&data->node, root);
839 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
841 struct work_atoms *atoms = zalloc(sizeof(*atoms));
843 pr_err("No memory at %s\n", __func__);
847 atoms->thread = thread__get(thread);
848 INIT_LIST_HEAD(&atoms->work_list);
849 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
853 static char sched_out_state(u64 prev_state)
855 const char *str = TASK_STATE_TO_CHAR_STR;
857 return str[prev_state];
861 add_sched_out_event(struct work_atoms *atoms,
865 struct work_atom *atom = zalloc(sizeof(*atom));
867 pr_err("Non memory at %s", __func__);
871 atom->sched_out_time = timestamp;
873 if (run_state == 'R') {
874 atom->state = THREAD_WAIT_CPU;
875 atom->wake_up_time = atom->sched_out_time;
878 list_add_tail(&atom->list, &atoms->work_list);
883 add_runtime_event(struct work_atoms *atoms, u64 delta,
884 u64 timestamp __maybe_unused)
886 struct work_atom *atom;
888 BUG_ON(list_empty(&atoms->work_list));
890 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
892 atom->runtime += delta;
893 atoms->total_runtime += delta;
897 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
899 struct work_atom *atom;
902 if (list_empty(&atoms->work_list))
905 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
907 if (atom->state != THREAD_WAIT_CPU)
910 if (timestamp < atom->wake_up_time) {
911 atom->state = THREAD_IGNORE;
915 atom->state = THREAD_SCHED_IN;
916 atom->sched_in_time = timestamp;
918 delta = atom->sched_in_time - atom->wake_up_time;
919 atoms->total_lat += delta;
920 if (delta > atoms->max_lat) {
921 atoms->max_lat = delta;
922 atoms->max_lat_at = timestamp;
927 static int latency_switch_event(struct perf_sched *sched,
928 struct perf_evsel *evsel,
929 struct perf_sample *sample,
930 struct machine *machine)
932 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
933 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
934 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
935 struct work_atoms *out_events, *in_events;
936 struct thread *sched_out, *sched_in;
937 u64 timestamp0, timestamp = sample->time;
938 int cpu = sample->cpu;
941 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
943 timestamp0 = sched->cpu_last_switched[cpu];
944 sched->cpu_last_switched[cpu] = timestamp;
946 delta = timestamp - timestamp0;
951 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
955 sched_out = machine__findnew_thread(machine, -1, prev_pid);
956 sched_in = machine__findnew_thread(machine, -1, next_pid);
958 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
960 if (thread_atoms_insert(sched, sched_out))
962 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
964 pr_err("out-event: Internal tree error");
968 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
971 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
973 if (thread_atoms_insert(sched, sched_in))
975 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
977 pr_err("in-event: Internal tree error");
981 * Take came in we have not heard about yet,
982 * add in an initial atom in runnable state:
984 if (add_sched_out_event(in_events, 'R', timestamp))
987 add_sched_in_event(in_events, timestamp);
992 static int latency_runtime_event(struct perf_sched *sched,
993 struct perf_evsel *evsel,
994 struct perf_sample *sample,
995 struct machine *machine)
997 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
998 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
999 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1000 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1001 u64 timestamp = sample->time;
1002 int cpu = sample->cpu;
1004 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1006 if (thread_atoms_insert(sched, thread))
1008 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1010 pr_err("in-event: Internal tree error");
1013 if (add_sched_out_event(atoms, 'R', timestamp))
1017 add_runtime_event(atoms, runtime, timestamp);
1021 static int latency_wakeup_event(struct perf_sched *sched,
1022 struct perf_evsel *evsel,
1023 struct perf_sample *sample,
1024 struct machine *machine)
1026 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1027 struct work_atoms *atoms;
1028 struct work_atom *atom;
1029 struct thread *wakee;
1030 u64 timestamp = sample->time;
1032 wakee = machine__findnew_thread(machine, -1, pid);
1033 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1035 if (thread_atoms_insert(sched, wakee))
1037 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1039 pr_err("wakeup-event: Internal tree error");
1042 if (add_sched_out_event(atoms, 'S', timestamp))
1046 BUG_ON(list_empty(&atoms->work_list));
1048 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1051 * As we do not guarantee the wakeup event happens when
1052 * task is out of run queue, also may happen when task is
1053 * on run queue and wakeup only change ->state to TASK_RUNNING,
1054 * then we should not set the ->wake_up_time when wake up a
1055 * task which is on run queue.
1057 * You WILL be missing events if you've recorded only
1058 * one CPU, or are only looking at only one, so don't
1059 * skip in this case.
1061 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1064 sched->nr_timestamps++;
1065 if (atom->sched_out_time > timestamp) {
1066 sched->nr_unordered_timestamps++;
1070 atom->state = THREAD_WAIT_CPU;
1071 atom->wake_up_time = timestamp;
1075 static int latency_migrate_task_event(struct perf_sched *sched,
1076 struct perf_evsel *evsel,
1077 struct perf_sample *sample,
1078 struct machine *machine)
1080 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1081 u64 timestamp = sample->time;
1082 struct work_atoms *atoms;
1083 struct work_atom *atom;
1084 struct thread *migrant;
1087 * Only need to worry about migration when profiling one CPU.
1089 if (sched->profile_cpu == -1)
1092 migrant = machine__findnew_thread(machine, -1, pid);
1093 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1095 if (thread_atoms_insert(sched, migrant))
1097 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1098 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1100 pr_err("migration-event: Internal tree error");
1103 if (add_sched_out_event(atoms, 'R', timestamp))
1107 BUG_ON(list_empty(&atoms->work_list));
1109 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1110 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1112 sched->nr_timestamps++;
1114 if (atom->sched_out_time > timestamp)
1115 sched->nr_unordered_timestamps++;
1120 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1126 if (!work_list->nb_atoms)
1129 * Ignore idle threads:
1131 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1134 sched->all_runtime += work_list->total_runtime;
1135 sched->all_count += work_list->nb_atoms;
1137 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1139 for (i = 0; i < 24 - ret; i++)
1142 avg = work_list->total_lat / work_list->nb_atoms;
1144 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
1145 (double)work_list->total_runtime / 1e6,
1146 work_list->nb_atoms, (double)avg / 1e6,
1147 (double)work_list->max_lat / 1e6,
1148 (double)work_list->max_lat_at / 1e9);
1151 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1153 if (l->thread->tid < r->thread->tid)
1155 if (l->thread->tid > r->thread->tid)
1161 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1171 avgl = l->total_lat / l->nb_atoms;
1172 avgr = r->total_lat / r->nb_atoms;
1182 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1184 if (l->max_lat < r->max_lat)
1186 if (l->max_lat > r->max_lat)
1192 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1194 if (l->nb_atoms < r->nb_atoms)
1196 if (l->nb_atoms > r->nb_atoms)
1202 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1204 if (l->total_runtime < r->total_runtime)
1206 if (l->total_runtime > r->total_runtime)
1212 static int sort_dimension__add(const char *tok, struct list_head *list)
1215 static struct sort_dimension avg_sort_dimension = {
1219 static struct sort_dimension max_sort_dimension = {
1223 static struct sort_dimension pid_sort_dimension = {
1227 static struct sort_dimension runtime_sort_dimension = {
1231 static struct sort_dimension switch_sort_dimension = {
1235 struct sort_dimension *available_sorts[] = {
1236 &pid_sort_dimension,
1237 &avg_sort_dimension,
1238 &max_sort_dimension,
1239 &switch_sort_dimension,
1240 &runtime_sort_dimension,
1243 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1244 if (!strcmp(available_sorts[i]->name, tok)) {
1245 list_add_tail(&available_sorts[i]->list, list);
1254 static void perf_sched__sort_lat(struct perf_sched *sched)
1256 struct rb_node *node;
1259 struct work_atoms *data;
1260 node = rb_first(&sched->atom_root);
1264 rb_erase(node, &sched->atom_root);
1265 data = rb_entry(node, struct work_atoms, node);
1266 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1270 static int process_sched_wakeup_event(struct perf_tool *tool,
1271 struct perf_evsel *evsel,
1272 struct perf_sample *sample,
1273 struct machine *machine)
1275 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1277 if (sched->tp_handler->wakeup_event)
1278 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1283 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1284 struct perf_sample *sample, struct machine *machine)
1286 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1287 struct thread *sched_in;
1289 u64 timestamp0, timestamp = sample->time;
1291 int cpu, this_cpu = sample->cpu;
1293 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1295 if (this_cpu > sched->max_cpu)
1296 sched->max_cpu = this_cpu;
1298 timestamp0 = sched->cpu_last_switched[this_cpu];
1299 sched->cpu_last_switched[this_cpu] = timestamp;
1301 delta = timestamp - timestamp0;
1306 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1310 sched_in = machine__findnew_thread(machine, -1, next_pid);
1312 sched->curr_thread[this_cpu] = sched_in;
1317 if (!sched_in->shortname[0]) {
1318 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1320 * Don't allocate a letter-number for swapper:0
1321 * as a shortname. Instead, we use '.' for it.
1323 sched_in->shortname[0] = '.';
1324 sched_in->shortname[1] = ' ';
1326 sched_in->shortname[0] = sched->next_shortname1;
1327 sched_in->shortname[1] = sched->next_shortname2;
1329 if (sched->next_shortname1 < 'Z') {
1330 sched->next_shortname1++;
1332 sched->next_shortname1 = 'A';
1333 if (sched->next_shortname2 < '9')
1334 sched->next_shortname2++;
1336 sched->next_shortname2 = '0';
1342 for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
1343 if (cpu != this_cpu)
1348 if (sched->curr_thread[cpu])
1349 printf("%2s ", sched->curr_thread[cpu]->shortname);
1354 printf(" %12.6f secs ", (double)timestamp/1e9);
1355 if (new_shortname) {
1356 printf("%s => %s:%d\n",
1357 sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1365 static int process_sched_switch_event(struct perf_tool *tool,
1366 struct perf_evsel *evsel,
1367 struct perf_sample *sample,
1368 struct machine *machine)
1370 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1371 int this_cpu = sample->cpu, err = 0;
1372 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1373 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1375 if (sched->curr_pid[this_cpu] != (u32)-1) {
1377 * Are we trying to switch away a PID that is
1380 if (sched->curr_pid[this_cpu] != prev_pid)
1381 sched->nr_context_switch_bugs++;
1384 if (sched->tp_handler->switch_event)
1385 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1387 sched->curr_pid[this_cpu] = next_pid;
1391 static int process_sched_runtime_event(struct perf_tool *tool,
1392 struct perf_evsel *evsel,
1393 struct perf_sample *sample,
1394 struct machine *machine)
1396 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1398 if (sched->tp_handler->runtime_event)
1399 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1404 static int perf_sched__process_fork_event(struct perf_tool *tool,
1405 union perf_event *event,
1406 struct perf_sample *sample,
1407 struct machine *machine)
1409 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1411 /* run the fork event through the perf machineruy */
1412 perf_event__process_fork(tool, event, sample, machine);
1414 /* and then run additional processing needed for this command */
1415 if (sched->tp_handler->fork_event)
1416 return sched->tp_handler->fork_event(sched, event, machine);
1421 static int process_sched_migrate_task_event(struct perf_tool *tool,
1422 struct perf_evsel *evsel,
1423 struct perf_sample *sample,
1424 struct machine *machine)
1426 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1428 if (sched->tp_handler->migrate_task_event)
1429 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1434 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1435 struct perf_evsel *evsel,
1436 struct perf_sample *sample,
1437 struct machine *machine);
1439 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1440 union perf_event *event __maybe_unused,
1441 struct perf_sample *sample,
1442 struct perf_evsel *evsel,
1443 struct machine *machine)
1447 if (evsel->handler != NULL) {
1448 tracepoint_handler f = evsel->handler;
1449 err = f(tool, evsel, sample, machine);
1455 static int perf_sched__read_events(struct perf_sched *sched)
1457 const struct perf_evsel_str_handler handlers[] = {
1458 { "sched:sched_switch", process_sched_switch_event, },
1459 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1460 { "sched:sched_wakeup", process_sched_wakeup_event, },
1461 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1462 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1464 struct perf_session *session;
1465 struct perf_data_file file = {
1467 .mode = PERF_DATA_MODE_READ,
1471 session = perf_session__new(&file, false, &sched->tool);
1472 if (session == NULL) {
1473 pr_debug("No Memory for session\n");
1477 symbol__init(&session->header.env);
1479 if (perf_session__set_tracepoints_handlers(session, handlers))
1482 if (perf_session__has_traces(session, "record -R")) {
1483 int err = perf_session__process_events(session);
1485 pr_err("Failed to process events, error %d", err);
1489 sched->nr_events = session->evlist->stats.nr_events[0];
1490 sched->nr_lost_events = session->evlist->stats.total_lost;
1491 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1496 perf_session__delete(session);
1500 static void print_bad_events(struct perf_sched *sched)
1502 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1503 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1504 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1505 sched->nr_unordered_timestamps, sched->nr_timestamps);
1507 if (sched->nr_lost_events && sched->nr_events) {
1508 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1509 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1510 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1512 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1513 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1514 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1515 sched->nr_context_switch_bugs, sched->nr_timestamps);
1516 if (sched->nr_lost_events)
1517 printf(" (due to lost events?)");
1522 static int perf_sched__lat(struct perf_sched *sched)
1524 struct rb_node *next;
1528 if (perf_sched__read_events(sched))
1531 perf_sched__sort_lat(sched);
1533 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
1534 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1535 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1537 next = rb_first(&sched->sorted_atom_root);
1540 struct work_atoms *work_list;
1542 work_list = rb_entry(next, struct work_atoms, node);
1543 output_lat_thread(sched, work_list);
1544 next = rb_next(next);
1545 thread__zput(work_list->thread);
1548 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1549 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
1550 (double)sched->all_runtime / 1e6, sched->all_count);
1552 printf(" ---------------------------------------------------\n");
1554 print_bad_events(sched);
1560 static int perf_sched__map(struct perf_sched *sched)
1562 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1565 if (perf_sched__read_events(sched))
1567 print_bad_events(sched);
1571 static int perf_sched__replay(struct perf_sched *sched)
1575 calibrate_run_measurement_overhead(sched);
1576 calibrate_sleep_measurement_overhead(sched);
1578 test_calibrations(sched);
1580 if (perf_sched__read_events(sched))
1583 printf("nr_run_events: %ld\n", sched->nr_run_events);
1584 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
1585 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
1587 if (sched->targetless_wakeups)
1588 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
1589 if (sched->multitarget_wakeups)
1590 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1591 if (sched->nr_run_events_optimized)
1592 printf("run atoms optimized: %ld\n",
1593 sched->nr_run_events_optimized);
1595 print_task_traces(sched);
1596 add_cross_task_wakeups(sched);
1598 create_tasks(sched);
1599 printf("------------------------------------------------------------\n");
1600 for (i = 0; i < sched->replay_repeat; i++)
1601 run_one_test(sched);
1606 static void setup_sorting(struct perf_sched *sched, const struct option *options,
1607 const char * const usage_msg[])
1609 char *tmp, *tok, *str = strdup(sched->sort_order);
1611 for (tok = strtok_r(str, ", ", &tmp);
1612 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1613 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1614 error("Unknown --sort key: `%s'", tok);
1615 usage_with_options(usage_msg, options);
1621 sort_dimension__add("pid", &sched->cmp_pid);
1624 static int __cmd_record(int argc, const char **argv)
1626 unsigned int rec_argc, i, j;
1627 const char **rec_argv;
1628 const char * const record_args[] = {
1634 "-e", "sched:sched_switch",
1635 "-e", "sched:sched_stat_wait",
1636 "-e", "sched:sched_stat_sleep",
1637 "-e", "sched:sched_stat_iowait",
1638 "-e", "sched:sched_stat_runtime",
1639 "-e", "sched:sched_process_fork",
1640 "-e", "sched:sched_wakeup",
1641 "-e", "sched:sched_wakeup_new",
1642 "-e", "sched:sched_migrate_task",
1645 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1646 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1648 if (rec_argv == NULL)
1651 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1652 rec_argv[i] = strdup(record_args[i]);
1654 for (j = 1; j < (unsigned int)argc; j++, i++)
1655 rec_argv[i] = argv[j];
1657 BUG_ON(i != rec_argc);
1659 return cmd_record(i, rec_argv, NULL);
1662 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1664 const char default_sort_order[] = "avg, max, switch, runtime";
1665 struct perf_sched sched = {
1667 .sample = perf_sched__process_tracepoint_sample,
1668 .comm = perf_event__process_comm,
1669 .lost = perf_event__process_lost,
1670 .fork = perf_sched__process_fork_event,
1671 .ordered_events = true,
1673 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
1674 .sort_list = LIST_HEAD_INIT(sched.sort_list),
1675 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
1676 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1677 .sort_order = default_sort_order,
1678 .replay_repeat = 10,
1680 .next_shortname1 = 'A',
1681 .next_shortname2 = '0',
1683 const struct option latency_options[] = {
1684 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1685 "sort by key(s): runtime, switch, avg, max"),
1686 OPT_INCR('v', "verbose", &verbose,
1687 "be more verbose (show symbol address, etc)"),
1688 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1689 "CPU to profile on"),
1690 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1691 "dump raw trace in ASCII"),
1694 const struct option replay_options[] = {
1695 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1696 "repeat the workload replay N times (-1: infinite)"),
1697 OPT_INCR('v', "verbose", &verbose,
1698 "be more verbose (show symbol address, etc)"),
1699 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1700 "dump raw trace in ASCII"),
1703 const struct option sched_options[] = {
1704 OPT_STRING('i', "input", &input_name, "file",
1706 OPT_INCR('v', "verbose", &verbose,
1707 "be more verbose (show symbol address, etc)"),
1708 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1709 "dump raw trace in ASCII"),
1712 const char * const latency_usage[] = {
1713 "perf sched latency [<options>]",
1716 const char * const replay_usage[] = {
1717 "perf sched replay [<options>]",
1720 const char *const sched_subcommands[] = { "record", "latency", "map",
1721 "replay", "script", NULL };
1722 const char *sched_usage[] = {
1726 struct trace_sched_handler lat_ops = {
1727 .wakeup_event = latency_wakeup_event,
1728 .switch_event = latency_switch_event,
1729 .runtime_event = latency_runtime_event,
1730 .migrate_task_event = latency_migrate_task_event,
1732 struct trace_sched_handler map_ops = {
1733 .switch_event = map_switch_event,
1735 struct trace_sched_handler replay_ops = {
1736 .wakeup_event = replay_wakeup_event,
1737 .switch_event = replay_switch_event,
1738 .fork_event = replay_fork_event,
1742 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
1743 sched.curr_pid[i] = -1;
1745 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
1746 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1748 usage_with_options(sched_usage, sched_options);
1751 * Aliased to 'perf script' for now:
1753 if (!strcmp(argv[0], "script"))
1754 return cmd_script(argc, argv, prefix);
1756 if (!strncmp(argv[0], "rec", 3)) {
1757 return __cmd_record(argc, argv);
1758 } else if (!strncmp(argv[0], "lat", 3)) {
1759 sched.tp_handler = &lat_ops;
1761 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1763 usage_with_options(latency_usage, latency_options);
1765 setup_sorting(&sched, latency_options, latency_usage);
1766 return perf_sched__lat(&sched);
1767 } else if (!strcmp(argv[0], "map")) {
1768 sched.tp_handler = &map_ops;
1769 setup_sorting(&sched, latency_options, latency_usage);
1770 return perf_sched__map(&sched);
1771 } else if (!strncmp(argv[0], "rep", 3)) {
1772 sched.tp_handler = &replay_ops;
1774 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1776 usage_with_options(replay_usage, replay_options);
1778 return perf_sched__replay(&sched);
1780 usage_with_options(sched_usage, sched_options);