5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
15 #include "util/parse-options.h"
16 #include "util/trace-event.h"
18 #include "util/debug.h"
20 #include <sys/prctl.h>
21 #include <sys/resource.h>
23 #include <semaphore.h>
27 #define PR_SET_NAME 15 /* Set process name */
40 unsigned long nr_events;
41 unsigned long curr_event;
42 struct sched_atom **atoms;
53 enum sched_event_type {
57 SCHED_EVENT_MIGRATION,
61 enum sched_event_type type;
67 struct task_desc *wakee;
70 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
80 struct list_head list;
81 enum thread_state state;
89 struct list_head work_list;
90 struct thread *thread;
99 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
103 struct trace_sched_handler {
104 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
105 struct perf_sample *sample, struct machine *machine);
107 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
108 struct perf_sample *sample, struct machine *machine);
110 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
111 struct perf_sample *sample, struct machine *machine);
113 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
114 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
115 struct machine *machine);
117 int (*migrate_task_event)(struct perf_sched *sched,
118 struct perf_evsel *evsel,
119 struct perf_sample *sample,
120 struct machine *machine);
124 struct perf_tool tool;
125 const char *sort_order;
126 unsigned long nr_tasks;
127 struct task_desc *pid_to_task[MAX_PID];
128 struct task_desc **tasks;
129 const struct trace_sched_handler *tp_handler;
130 pthread_mutex_t start_work_mutex;
131 pthread_mutex_t work_done_wait_mutex;
134 * Track the current task - that way we can know whether there's any
135 * weird events, such as a task being switched away that is not current.
138 u32 curr_pid[MAX_CPUS];
139 struct thread *curr_thread[MAX_CPUS];
140 char next_shortname1;
141 char next_shortname2;
142 unsigned int replay_repeat;
143 unsigned long nr_run_events;
144 unsigned long nr_sleep_events;
145 unsigned long nr_wakeup_events;
146 unsigned long nr_sleep_corrections;
147 unsigned long nr_run_events_optimized;
148 unsigned long targetless_wakeups;
149 unsigned long multitarget_wakeups;
150 unsigned long nr_runs;
151 unsigned long nr_timestamps;
152 unsigned long nr_unordered_timestamps;
153 unsigned long nr_context_switch_bugs;
154 unsigned long nr_events;
155 unsigned long nr_lost_chunks;
156 unsigned long nr_lost_events;
157 u64 run_measurement_overhead;
158 u64 sleep_measurement_overhead;
161 u64 runavg_cpu_usage;
162 u64 parent_cpu_usage;
163 u64 runavg_parent_cpu_usage;
169 u64 cpu_last_switched[MAX_CPUS];
170 struct rb_root atom_root, sorted_atom_root;
171 struct list_head sort_list, cmp_pid;
174 static u64 get_nsecs(void)
178 clock_gettime(CLOCK_MONOTONIC, &ts);
180 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
183 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
185 u64 T0 = get_nsecs(), T1;
189 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
192 static void sleep_nsecs(u64 nsecs)
196 ts.tv_nsec = nsecs % 999999999;
197 ts.tv_sec = nsecs / 999999999;
199 nanosleep(&ts, NULL);
202 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
204 u64 T0, T1, delta, min_delta = 1000000000ULL;
207 for (i = 0; i < 10; i++) {
209 burn_nsecs(sched, 0);
212 min_delta = min(min_delta, delta);
214 sched->run_measurement_overhead = min_delta;
216 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
219 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
221 u64 T0, T1, delta, min_delta = 1000000000ULL;
224 for (i = 0; i < 10; i++) {
229 min_delta = min(min_delta, delta);
232 sched->sleep_measurement_overhead = min_delta;
234 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
237 static struct sched_atom *
238 get_new_event(struct task_desc *task, u64 timestamp)
240 struct sched_atom *event = zalloc(sizeof(*event));
241 unsigned long idx = task->nr_events;
244 event->timestamp = timestamp;
248 size = sizeof(struct sched_atom *) * task->nr_events;
249 task->atoms = realloc(task->atoms, size);
250 BUG_ON(!task->atoms);
252 task->atoms[idx] = event;
257 static struct sched_atom *last_event(struct task_desc *task)
259 if (!task->nr_events)
262 return task->atoms[task->nr_events - 1];
265 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
266 u64 timestamp, u64 duration)
268 struct sched_atom *event, *curr_event = last_event(task);
271 * optimize an existing RUN event by merging this one
274 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
275 sched->nr_run_events_optimized++;
276 curr_event->duration += duration;
280 event = get_new_event(task, timestamp);
282 event->type = SCHED_EVENT_RUN;
283 event->duration = duration;
285 sched->nr_run_events++;
288 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
289 u64 timestamp, struct task_desc *wakee)
291 struct sched_atom *event, *wakee_event;
293 event = get_new_event(task, timestamp);
294 event->type = SCHED_EVENT_WAKEUP;
295 event->wakee = wakee;
297 wakee_event = last_event(wakee);
298 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
299 sched->targetless_wakeups++;
302 if (wakee_event->wait_sem) {
303 sched->multitarget_wakeups++;
307 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
308 sem_init(wakee_event->wait_sem, 0, 0);
309 wakee_event->specific_wait = 1;
310 event->wait_sem = wakee_event->wait_sem;
312 sched->nr_wakeup_events++;
315 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
316 u64 timestamp, u64 task_state __maybe_unused)
318 struct sched_atom *event = get_new_event(task, timestamp);
320 event->type = SCHED_EVENT_SLEEP;
322 sched->nr_sleep_events++;
325 static struct task_desc *register_pid(struct perf_sched *sched,
326 unsigned long pid, const char *comm)
328 struct task_desc *task;
330 BUG_ON(pid >= MAX_PID);
332 task = sched->pid_to_task[pid];
337 task = zalloc(sizeof(*task));
339 task->nr = sched->nr_tasks;
340 strcpy(task->comm, comm);
342 * every task starts in sleeping state - this gets ignored
343 * if there's no wakeup pointing to this sleep state:
345 add_sched_event_sleep(sched, task, 0, 0);
347 sched->pid_to_task[pid] = task;
349 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
350 BUG_ON(!sched->tasks);
351 sched->tasks[task->nr] = task;
354 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
360 static void print_task_traces(struct perf_sched *sched)
362 struct task_desc *task;
365 for (i = 0; i < sched->nr_tasks; i++) {
366 task = sched->tasks[i];
367 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
368 task->nr, task->comm, task->pid, task->nr_events);
372 static void add_cross_task_wakeups(struct perf_sched *sched)
374 struct task_desc *task1, *task2;
377 for (i = 0; i < sched->nr_tasks; i++) {
378 task1 = sched->tasks[i];
380 if (j == sched->nr_tasks)
382 task2 = sched->tasks[j];
383 add_sched_event_wakeup(sched, task1, 0, task2);
387 static void perf_sched__process_event(struct perf_sched *sched,
388 struct sched_atom *atom)
392 switch (atom->type) {
393 case SCHED_EVENT_RUN:
394 burn_nsecs(sched, atom->duration);
396 case SCHED_EVENT_SLEEP:
398 ret = sem_wait(atom->wait_sem);
401 case SCHED_EVENT_WAKEUP:
403 ret = sem_post(atom->wait_sem);
406 case SCHED_EVENT_MIGRATION:
413 static u64 get_cpu_usage_nsec_parent(void)
419 err = getrusage(RUSAGE_SELF, &ru);
422 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
423 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
428 static int self_open_counters(void)
430 struct perf_event_attr attr;
431 char sbuf[STRERR_BUFSIZE];
434 memset(&attr, 0, sizeof(attr));
436 attr.type = PERF_TYPE_SOFTWARE;
437 attr.config = PERF_COUNT_SW_TASK_CLOCK;
439 fd = sys_perf_event_open(&attr, 0, -1, -1,
440 perf_event_open_cloexec_flag());
443 pr_err("Error: sys_perf_event_open() syscall returned "
444 "with %d (%s)\n", fd,
445 strerror_r(errno, sbuf, sizeof(sbuf)));
449 static u64 get_cpu_usage_nsec_self(int fd)
454 ret = read(fd, &runtime, sizeof(runtime));
455 BUG_ON(ret != sizeof(runtime));
460 struct sched_thread_parms {
461 struct task_desc *task;
462 struct perf_sched *sched;
465 static void *thread_func(void *ctx)
467 struct sched_thread_parms *parms = ctx;
468 struct task_desc *this_task = parms->task;
469 struct perf_sched *sched = parms->sched;
470 u64 cpu_usage_0, cpu_usage_1;
471 unsigned long i, ret;
477 sprintf(comm2, ":%s", this_task->comm);
478 prctl(PR_SET_NAME, comm2);
479 fd = self_open_counters();
483 ret = sem_post(&this_task->ready_for_work);
485 ret = pthread_mutex_lock(&sched->start_work_mutex);
487 ret = pthread_mutex_unlock(&sched->start_work_mutex);
490 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
492 for (i = 0; i < this_task->nr_events; i++) {
493 this_task->curr_event = i;
494 perf_sched__process_event(sched, this_task->atoms[i]);
497 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
498 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
499 ret = sem_post(&this_task->work_done_sem);
502 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
504 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
510 static void create_tasks(struct perf_sched *sched)
512 struct task_desc *task;
517 err = pthread_attr_init(&attr);
519 err = pthread_attr_setstacksize(&attr,
520 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
522 err = pthread_mutex_lock(&sched->start_work_mutex);
524 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
526 for (i = 0; i < sched->nr_tasks; i++) {
527 struct sched_thread_parms *parms = malloc(sizeof(*parms));
528 BUG_ON(parms == NULL);
529 parms->task = task = sched->tasks[i];
530 parms->sched = sched;
531 sem_init(&task->sleep_sem, 0, 0);
532 sem_init(&task->ready_for_work, 0, 0);
533 sem_init(&task->work_done_sem, 0, 0);
534 task->curr_event = 0;
535 err = pthread_create(&task->thread, &attr, thread_func, parms);
540 static void wait_for_tasks(struct perf_sched *sched)
542 u64 cpu_usage_0, cpu_usage_1;
543 struct task_desc *task;
544 unsigned long i, ret;
546 sched->start_time = get_nsecs();
547 sched->cpu_usage = 0;
548 pthread_mutex_unlock(&sched->work_done_wait_mutex);
550 for (i = 0; i < sched->nr_tasks; i++) {
551 task = sched->tasks[i];
552 ret = sem_wait(&task->ready_for_work);
554 sem_init(&task->ready_for_work, 0, 0);
556 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
559 cpu_usage_0 = get_cpu_usage_nsec_parent();
561 pthread_mutex_unlock(&sched->start_work_mutex);
563 for (i = 0; i < sched->nr_tasks; i++) {
564 task = sched->tasks[i];
565 ret = sem_wait(&task->work_done_sem);
567 sem_init(&task->work_done_sem, 0, 0);
568 sched->cpu_usage += task->cpu_usage;
572 cpu_usage_1 = get_cpu_usage_nsec_parent();
573 if (!sched->runavg_cpu_usage)
574 sched->runavg_cpu_usage = sched->cpu_usage;
575 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
577 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
578 if (!sched->runavg_parent_cpu_usage)
579 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
580 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
581 sched->parent_cpu_usage)/10;
583 ret = pthread_mutex_lock(&sched->start_work_mutex);
586 for (i = 0; i < sched->nr_tasks; i++) {
587 task = sched->tasks[i];
588 sem_init(&task->sleep_sem, 0, 0);
589 task->curr_event = 0;
593 static void run_one_test(struct perf_sched *sched)
595 u64 T0, T1, delta, avg_delta, fluct;
598 wait_for_tasks(sched);
602 sched->sum_runtime += delta;
605 avg_delta = sched->sum_runtime / sched->nr_runs;
606 if (delta < avg_delta)
607 fluct = avg_delta - delta;
609 fluct = delta - avg_delta;
610 sched->sum_fluct += fluct;
612 sched->run_avg = delta;
613 sched->run_avg = (sched->run_avg * 9 + delta) / 10;
615 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
617 printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
619 printf("cpu: %0.2f / %0.2f",
620 (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
624 * rusage statistics done by the parent, these are less
625 * accurate than the sched->sum_exec_runtime based statistics:
627 printf(" [%0.2f / %0.2f]",
628 (double)sched->parent_cpu_usage/1e6,
629 (double)sched->runavg_parent_cpu_usage/1e6);
634 if (sched->nr_sleep_corrections)
635 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
636 sched->nr_sleep_corrections = 0;
639 static void test_calibrations(struct perf_sched *sched)
644 burn_nsecs(sched, 1e6);
647 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
653 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
657 replay_wakeup_event(struct perf_sched *sched,
658 struct perf_evsel *evsel, struct perf_sample *sample,
659 struct machine *machine __maybe_unused)
661 const char *comm = perf_evsel__strval(evsel, sample, "comm");
662 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
663 struct task_desc *waker, *wakee;
666 printf("sched_wakeup event %p\n", evsel);
668 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
671 waker = register_pid(sched, sample->tid, "<unknown>");
672 wakee = register_pid(sched, pid, comm);
674 add_sched_event_wakeup(sched, waker, sample->time, wakee);
678 static int replay_switch_event(struct perf_sched *sched,
679 struct perf_evsel *evsel,
680 struct perf_sample *sample,
681 struct machine *machine __maybe_unused)
683 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
684 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
685 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
686 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
687 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
688 struct task_desc *prev, __maybe_unused *next;
689 u64 timestamp0, timestamp = sample->time;
690 int cpu = sample->cpu;
694 printf("sched_switch event %p\n", evsel);
696 if (cpu >= MAX_CPUS || cpu < 0)
699 timestamp0 = sched->cpu_last_switched[cpu];
701 delta = timestamp - timestamp0;
706 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
710 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
711 prev_comm, prev_pid, next_comm, next_pid, delta);
713 prev = register_pid(sched, prev_pid, prev_comm);
714 next = register_pid(sched, next_pid, next_comm);
716 sched->cpu_last_switched[cpu] = timestamp;
718 add_sched_event_run(sched, prev, timestamp, delta);
719 add_sched_event_sleep(sched, prev, timestamp, prev_state);
724 static int replay_fork_event(struct perf_sched *sched,
725 union perf_event *event,
726 struct machine *machine)
728 struct thread *child, *parent;
730 child = machine__findnew_thread(machine, event->fork.pid,
732 parent = machine__findnew_thread(machine, event->fork.ppid,
735 if (child == NULL || parent == NULL) {
736 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
742 printf("fork event\n");
743 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
744 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
747 register_pid(sched, parent->tid, thread__comm_str(parent));
748 register_pid(sched, child->tid, thread__comm_str(child));
752 struct sort_dimension {
755 struct list_head list;
759 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
761 struct sort_dimension *sort;
764 BUG_ON(list_empty(list));
766 list_for_each_entry(sort, list, list) {
767 ret = sort->cmp(l, r);
775 static struct work_atoms *
776 thread_atoms_search(struct rb_root *root, struct thread *thread,
777 struct list_head *sort_list)
779 struct rb_node *node = root->rb_node;
780 struct work_atoms key = { .thread = thread };
783 struct work_atoms *atoms;
786 atoms = container_of(node, struct work_atoms, node);
788 cmp = thread_lat_cmp(sort_list, &key, atoms);
790 node = node->rb_left;
792 node = node->rb_right;
794 BUG_ON(thread != atoms->thread);
802 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
803 struct list_head *sort_list)
805 struct rb_node **new = &(root->rb_node), *parent = NULL;
808 struct work_atoms *this;
811 this = container_of(*new, struct work_atoms, node);
814 cmp = thread_lat_cmp(sort_list, data, this);
817 new = &((*new)->rb_left);
819 new = &((*new)->rb_right);
822 rb_link_node(&data->node, parent, new);
823 rb_insert_color(&data->node, root);
826 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
828 struct work_atoms *atoms = zalloc(sizeof(*atoms));
830 pr_err("No memory at %s\n", __func__);
834 atoms->thread = thread;
835 INIT_LIST_HEAD(&atoms->work_list);
836 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
840 static char sched_out_state(u64 prev_state)
842 const char *str = TASK_STATE_TO_CHAR_STR;
844 return str[prev_state];
848 add_sched_out_event(struct work_atoms *atoms,
852 struct work_atom *atom = zalloc(sizeof(*atom));
854 pr_err("Non memory at %s", __func__);
858 atom->sched_out_time = timestamp;
860 if (run_state == 'R') {
861 atom->state = THREAD_WAIT_CPU;
862 atom->wake_up_time = atom->sched_out_time;
865 list_add_tail(&atom->list, &atoms->work_list);
870 add_runtime_event(struct work_atoms *atoms, u64 delta,
871 u64 timestamp __maybe_unused)
873 struct work_atom *atom;
875 BUG_ON(list_empty(&atoms->work_list));
877 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
879 atom->runtime += delta;
880 atoms->total_runtime += delta;
884 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
886 struct work_atom *atom;
889 if (list_empty(&atoms->work_list))
892 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
894 if (atom->state != THREAD_WAIT_CPU)
897 if (timestamp < atom->wake_up_time) {
898 atom->state = THREAD_IGNORE;
902 atom->state = THREAD_SCHED_IN;
903 atom->sched_in_time = timestamp;
905 delta = atom->sched_in_time - atom->wake_up_time;
906 atoms->total_lat += delta;
907 if (delta > atoms->max_lat) {
908 atoms->max_lat = delta;
909 atoms->max_lat_at = timestamp;
914 static int latency_switch_event(struct perf_sched *sched,
915 struct perf_evsel *evsel,
916 struct perf_sample *sample,
917 struct machine *machine)
919 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
920 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
921 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
922 struct work_atoms *out_events, *in_events;
923 struct thread *sched_out, *sched_in;
924 u64 timestamp0, timestamp = sample->time;
925 int cpu = sample->cpu;
928 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
930 timestamp0 = sched->cpu_last_switched[cpu];
931 sched->cpu_last_switched[cpu] = timestamp;
933 delta = timestamp - timestamp0;
938 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
942 sched_out = machine__findnew_thread(machine, -1, prev_pid);
943 sched_in = machine__findnew_thread(machine, -1, next_pid);
945 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
947 if (thread_atoms_insert(sched, sched_out))
949 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
951 pr_err("out-event: Internal tree error");
955 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
958 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
960 if (thread_atoms_insert(sched, sched_in))
962 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
964 pr_err("in-event: Internal tree error");
968 * Take came in we have not heard about yet,
969 * add in an initial atom in runnable state:
971 if (add_sched_out_event(in_events, 'R', timestamp))
974 add_sched_in_event(in_events, timestamp);
979 static int latency_runtime_event(struct perf_sched *sched,
980 struct perf_evsel *evsel,
981 struct perf_sample *sample,
982 struct machine *machine)
984 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
985 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
986 struct thread *thread = machine__findnew_thread(machine, -1, pid);
987 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
988 u64 timestamp = sample->time;
989 int cpu = sample->cpu;
991 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
993 if (thread_atoms_insert(sched, thread))
995 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
997 pr_err("in-event: Internal tree error");
1000 if (add_sched_out_event(atoms, 'R', timestamp))
1004 add_runtime_event(atoms, runtime, timestamp);
1008 static int latency_wakeup_event(struct perf_sched *sched,
1009 struct perf_evsel *evsel,
1010 struct perf_sample *sample,
1011 struct machine *machine)
1013 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1014 struct work_atoms *atoms;
1015 struct work_atom *atom;
1016 struct thread *wakee;
1017 u64 timestamp = sample->time;
1019 wakee = machine__findnew_thread(machine, -1, pid);
1020 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1022 if (thread_atoms_insert(sched, wakee))
1024 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1026 pr_err("wakeup-event: Internal tree error");
1029 if (add_sched_out_event(atoms, 'S', timestamp))
1033 BUG_ON(list_empty(&atoms->work_list));
1035 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1038 * As we do not guarantee the wakeup event happens when
1039 * task is out of run queue, also may happen when task is
1040 * on run queue and wakeup only change ->state to TASK_RUNNING,
1041 * then we should not set the ->wake_up_time when wake up a
1042 * task which is on run queue.
1044 * You WILL be missing events if you've recorded only
1045 * one CPU, or are only looking at only one, so don't
1046 * skip in this case.
1048 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1051 sched->nr_timestamps++;
1052 if (atom->sched_out_time > timestamp) {
1053 sched->nr_unordered_timestamps++;
1057 atom->state = THREAD_WAIT_CPU;
1058 atom->wake_up_time = timestamp;
1062 static int latency_migrate_task_event(struct perf_sched *sched,
1063 struct perf_evsel *evsel,
1064 struct perf_sample *sample,
1065 struct machine *machine)
1067 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1068 u64 timestamp = sample->time;
1069 struct work_atoms *atoms;
1070 struct work_atom *atom;
1071 struct thread *migrant;
1074 * Only need to worry about migration when profiling one CPU.
1076 if (sched->profile_cpu == -1)
1079 migrant = machine__findnew_thread(machine, -1, pid);
1080 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1082 if (thread_atoms_insert(sched, migrant))
1084 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1085 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1087 pr_err("migration-event: Internal tree error");
1090 if (add_sched_out_event(atoms, 'R', timestamp))
1094 BUG_ON(list_empty(&atoms->work_list));
1096 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1097 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1099 sched->nr_timestamps++;
1101 if (atom->sched_out_time > timestamp)
1102 sched->nr_unordered_timestamps++;
1107 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1113 if (!work_list->nb_atoms)
1116 * Ignore idle threads:
1118 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1121 sched->all_runtime += work_list->total_runtime;
1122 sched->all_count += work_list->nb_atoms;
1124 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1126 for (i = 0; i < 24 - ret; i++)
1129 avg = work_list->total_lat / work_list->nb_atoms;
1131 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
1132 (double)work_list->total_runtime / 1e6,
1133 work_list->nb_atoms, (double)avg / 1e6,
1134 (double)work_list->max_lat / 1e6,
1135 (double)work_list->max_lat_at / 1e9);
1138 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1140 if (l->thread->tid < r->thread->tid)
1142 if (l->thread->tid > r->thread->tid)
1148 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1158 avgl = l->total_lat / l->nb_atoms;
1159 avgr = r->total_lat / r->nb_atoms;
1169 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1171 if (l->max_lat < r->max_lat)
1173 if (l->max_lat > r->max_lat)
1179 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1181 if (l->nb_atoms < r->nb_atoms)
1183 if (l->nb_atoms > r->nb_atoms)
1189 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1191 if (l->total_runtime < r->total_runtime)
1193 if (l->total_runtime > r->total_runtime)
1199 static int sort_dimension__add(const char *tok, struct list_head *list)
1202 static struct sort_dimension avg_sort_dimension = {
1206 static struct sort_dimension max_sort_dimension = {
1210 static struct sort_dimension pid_sort_dimension = {
1214 static struct sort_dimension runtime_sort_dimension = {
1218 static struct sort_dimension switch_sort_dimension = {
1222 struct sort_dimension *available_sorts[] = {
1223 &pid_sort_dimension,
1224 &avg_sort_dimension,
1225 &max_sort_dimension,
1226 &switch_sort_dimension,
1227 &runtime_sort_dimension,
1230 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1231 if (!strcmp(available_sorts[i]->name, tok)) {
1232 list_add_tail(&available_sorts[i]->list, list);
1241 static void perf_sched__sort_lat(struct perf_sched *sched)
1243 struct rb_node *node;
1246 struct work_atoms *data;
1247 node = rb_first(&sched->atom_root);
1251 rb_erase(node, &sched->atom_root);
1252 data = rb_entry(node, struct work_atoms, node);
1253 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1257 static int process_sched_wakeup_event(struct perf_tool *tool,
1258 struct perf_evsel *evsel,
1259 struct perf_sample *sample,
1260 struct machine *machine)
1262 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1264 if (sched->tp_handler->wakeup_event)
1265 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1270 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1271 struct perf_sample *sample, struct machine *machine)
1273 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1274 struct thread *sched_in;
1276 u64 timestamp0, timestamp = sample->time;
1278 int cpu, this_cpu = sample->cpu;
1280 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1282 if (this_cpu > sched->max_cpu)
1283 sched->max_cpu = this_cpu;
1285 timestamp0 = sched->cpu_last_switched[this_cpu];
1286 sched->cpu_last_switched[this_cpu] = timestamp;
1288 delta = timestamp - timestamp0;
1293 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1297 sched_in = machine__findnew_thread(machine, -1, next_pid);
1299 sched->curr_thread[this_cpu] = sched_in;
1304 if (!sched_in->shortname[0]) {
1305 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1307 * Don't allocate a letter-number for swapper:0
1308 * as a shortname. Instead, we use '.' for it.
1310 sched_in->shortname[0] = '.';
1311 sched_in->shortname[1] = ' ';
1313 sched_in->shortname[0] = sched->next_shortname1;
1314 sched_in->shortname[1] = sched->next_shortname2;
1316 if (sched->next_shortname1 < 'Z') {
1317 sched->next_shortname1++;
1319 sched->next_shortname1 = 'A';
1320 if (sched->next_shortname2 < '9')
1321 sched->next_shortname2++;
1323 sched->next_shortname2 = '0';
1329 for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
1330 if (cpu != this_cpu)
1335 if (sched->curr_thread[cpu])
1336 printf("%2s ", sched->curr_thread[cpu]->shortname);
1341 printf(" %12.6f secs ", (double)timestamp/1e9);
1342 if (new_shortname) {
1343 printf("%s => %s:%d\n",
1344 sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1352 static int process_sched_switch_event(struct perf_tool *tool,
1353 struct perf_evsel *evsel,
1354 struct perf_sample *sample,
1355 struct machine *machine)
1357 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1358 int this_cpu = sample->cpu, err = 0;
1359 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1360 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1362 if (sched->curr_pid[this_cpu] != (u32)-1) {
1364 * Are we trying to switch away a PID that is
1367 if (sched->curr_pid[this_cpu] != prev_pid)
1368 sched->nr_context_switch_bugs++;
1371 if (sched->tp_handler->switch_event)
1372 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1374 sched->curr_pid[this_cpu] = next_pid;
1378 static int process_sched_runtime_event(struct perf_tool *tool,
1379 struct perf_evsel *evsel,
1380 struct perf_sample *sample,
1381 struct machine *machine)
1383 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1385 if (sched->tp_handler->runtime_event)
1386 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1391 static int perf_sched__process_fork_event(struct perf_tool *tool,
1392 union perf_event *event,
1393 struct perf_sample *sample,
1394 struct machine *machine)
1396 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1398 /* run the fork event through the perf machineruy */
1399 perf_event__process_fork(tool, event, sample, machine);
1401 /* and then run additional processing needed for this command */
1402 if (sched->tp_handler->fork_event)
1403 return sched->tp_handler->fork_event(sched, event, machine);
1408 static int process_sched_migrate_task_event(struct perf_tool *tool,
1409 struct perf_evsel *evsel,
1410 struct perf_sample *sample,
1411 struct machine *machine)
1413 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1415 if (sched->tp_handler->migrate_task_event)
1416 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1421 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1422 struct perf_evsel *evsel,
1423 struct perf_sample *sample,
1424 struct machine *machine);
1426 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1427 union perf_event *event __maybe_unused,
1428 struct perf_sample *sample,
1429 struct perf_evsel *evsel,
1430 struct machine *machine)
1434 evsel->hists.stats.total_period += sample->period;
1435 hists__inc_nr_samples(&evsel->hists, true);
1437 if (evsel->handler != NULL) {
1438 tracepoint_handler f = evsel->handler;
1439 err = f(tool, evsel, sample, machine);
1445 static int perf_sched__read_events(struct perf_sched *sched,
1446 struct perf_session **psession)
1448 const struct perf_evsel_str_handler handlers[] = {
1449 { "sched:sched_switch", process_sched_switch_event, },
1450 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1451 { "sched:sched_wakeup", process_sched_wakeup_event, },
1452 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1453 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1455 struct perf_session *session;
1456 struct perf_data_file file = {
1458 .mode = PERF_DATA_MODE_READ,
1461 session = perf_session__new(&file, false, &sched->tool);
1462 if (session == NULL) {
1463 pr_debug("No Memory for session\n");
1467 symbol__init(&session->header.env);
1469 if (perf_session__set_tracepoints_handlers(session, handlers))
1472 if (perf_session__has_traces(session, "record -R")) {
1473 int err = perf_session__process_events(session, &sched->tool);
1475 pr_err("Failed to process events, error %d", err);
1479 sched->nr_events = session->stats.nr_events[0];
1480 sched->nr_lost_events = session->stats.total_lost;
1481 sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
1485 *psession = session;
1487 perf_session__delete(session);
1492 perf_session__delete(session);
1496 static void print_bad_events(struct perf_sched *sched)
1498 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1499 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1500 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1501 sched->nr_unordered_timestamps, sched->nr_timestamps);
1503 if (sched->nr_lost_events && sched->nr_events) {
1504 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1505 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1506 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1508 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1509 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1510 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1511 sched->nr_context_switch_bugs, sched->nr_timestamps);
1512 if (sched->nr_lost_events)
1513 printf(" (due to lost events?)");
1518 static int perf_sched__lat(struct perf_sched *sched)
1520 struct rb_node *next;
1521 struct perf_session *session;
1525 /* save session -- references to threads are held in work_list */
1526 if (perf_sched__read_events(sched, &session))
1529 perf_sched__sort_lat(sched);
1531 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
1532 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1533 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1535 next = rb_first(&sched->sorted_atom_root);
1538 struct work_atoms *work_list;
1540 work_list = rb_entry(next, struct work_atoms, node);
1541 output_lat_thread(sched, work_list);
1542 next = rb_next(next);
1545 printf(" -----------------------------------------------------------------------------------------------------------------\n");
1546 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
1547 (double)sched->all_runtime / 1e6, sched->all_count);
1549 printf(" ---------------------------------------------------\n");
1551 print_bad_events(sched);
1554 perf_session__delete(session);
1558 static int perf_sched__map(struct perf_sched *sched)
1560 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1563 if (perf_sched__read_events(sched, NULL))
1565 print_bad_events(sched);
1569 static int perf_sched__replay(struct perf_sched *sched)
1573 calibrate_run_measurement_overhead(sched);
1574 calibrate_sleep_measurement_overhead(sched);
1576 test_calibrations(sched);
1578 if (perf_sched__read_events(sched, NULL))
1581 printf("nr_run_events: %ld\n", sched->nr_run_events);
1582 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
1583 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
1585 if (sched->targetless_wakeups)
1586 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
1587 if (sched->multitarget_wakeups)
1588 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1589 if (sched->nr_run_events_optimized)
1590 printf("run atoms optimized: %ld\n",
1591 sched->nr_run_events_optimized);
1593 print_task_traces(sched);
1594 add_cross_task_wakeups(sched);
1596 create_tasks(sched);
1597 printf("------------------------------------------------------------\n");
1598 for (i = 0; i < sched->replay_repeat; i++)
1599 run_one_test(sched);
1604 static void setup_sorting(struct perf_sched *sched, const struct option *options,
1605 const char * const usage_msg[])
1607 char *tmp, *tok, *str = strdup(sched->sort_order);
1609 for (tok = strtok_r(str, ", ", &tmp);
1610 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1611 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1612 error("Unknown --sort key: `%s'", tok);
1613 usage_with_options(usage_msg, options);
1619 sort_dimension__add("pid", &sched->cmp_pid);
1622 static int __cmd_record(int argc, const char **argv)
1624 unsigned int rec_argc, i, j;
1625 const char **rec_argv;
1626 const char * const record_args[] = {
1632 "-e", "sched:sched_switch",
1633 "-e", "sched:sched_stat_wait",
1634 "-e", "sched:sched_stat_sleep",
1635 "-e", "sched:sched_stat_iowait",
1636 "-e", "sched:sched_stat_runtime",
1637 "-e", "sched:sched_process_fork",
1638 "-e", "sched:sched_wakeup",
1639 "-e", "sched:sched_wakeup_new",
1640 "-e", "sched:sched_migrate_task",
1643 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1644 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1646 if (rec_argv == NULL)
1649 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1650 rec_argv[i] = strdup(record_args[i]);
1652 for (j = 1; j < (unsigned int)argc; j++, i++)
1653 rec_argv[i] = argv[j];
1655 BUG_ON(i != rec_argc);
1657 return cmd_record(i, rec_argv, NULL);
1660 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1662 const char default_sort_order[] = "avg, max, switch, runtime";
1663 struct perf_sched sched = {
1665 .sample = perf_sched__process_tracepoint_sample,
1666 .comm = perf_event__process_comm,
1667 .lost = perf_event__process_lost,
1668 .fork = perf_sched__process_fork_event,
1669 .ordered_events = true,
1671 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
1672 .sort_list = LIST_HEAD_INIT(sched.sort_list),
1673 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
1674 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1675 .sort_order = default_sort_order,
1676 .replay_repeat = 10,
1678 .next_shortname1 = 'A',
1679 .next_shortname2 = '0',
1681 const struct option latency_options[] = {
1682 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1683 "sort by key(s): runtime, switch, avg, max"),
1684 OPT_INCR('v', "verbose", &verbose,
1685 "be more verbose (show symbol address, etc)"),
1686 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1687 "CPU to profile on"),
1688 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1689 "dump raw trace in ASCII"),
1692 const struct option replay_options[] = {
1693 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1694 "repeat the workload replay N times (-1: infinite)"),
1695 OPT_INCR('v', "verbose", &verbose,
1696 "be more verbose (show symbol address, etc)"),
1697 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1698 "dump raw trace in ASCII"),
1701 const struct option sched_options[] = {
1702 OPT_STRING('i', "input", &input_name, "file",
1704 OPT_INCR('v', "verbose", &verbose,
1705 "be more verbose (show symbol address, etc)"),
1706 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1707 "dump raw trace in ASCII"),
1710 const char * const latency_usage[] = {
1711 "perf sched latency [<options>]",
1714 const char * const replay_usage[] = {
1715 "perf sched replay [<options>]",
1718 const char *const sched_subcommands[] = { "record", "latency", "map",
1719 "replay", "script", NULL };
1720 const char *sched_usage[] = {
1724 struct trace_sched_handler lat_ops = {
1725 .wakeup_event = latency_wakeup_event,
1726 .switch_event = latency_switch_event,
1727 .runtime_event = latency_runtime_event,
1728 .migrate_task_event = latency_migrate_task_event,
1730 struct trace_sched_handler map_ops = {
1731 .switch_event = map_switch_event,
1733 struct trace_sched_handler replay_ops = {
1734 .wakeup_event = replay_wakeup_event,
1735 .switch_event = replay_switch_event,
1736 .fork_event = replay_fork_event,
1740 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
1741 sched.curr_pid[i] = -1;
1743 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
1744 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1746 usage_with_options(sched_usage, sched_options);
1749 * Aliased to 'perf script' for now:
1751 if (!strcmp(argv[0], "script"))
1752 return cmd_script(argc, argv, prefix);
1754 if (!strncmp(argv[0], "rec", 3)) {
1755 return __cmd_record(argc, argv);
1756 } else if (!strncmp(argv[0], "lat", 3)) {
1757 sched.tp_handler = &lat_ops;
1759 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1761 usage_with_options(latency_usage, latency_options);
1763 setup_sorting(&sched, latency_options, latency_usage);
1764 return perf_sched__lat(&sched);
1765 } else if (!strcmp(argv[0], "map")) {
1766 sched.tp_handler = &map_ops;
1767 setup_sorting(&sched, latency_options, latency_usage);
1768 return perf_sched__map(&sched);
1769 } else if (!strncmp(argv[0], "rep", 3)) {
1770 sched.tp_handler = &replay_ops;
1772 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1774 usage_with_options(replay_usage, replay_options);
1776 return perf_sched__replay(&sched);
1778 usage_with_options(sched_usage, sched_options);