2 * kernel/time/sched_debug.c
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
20 * This allows printing both to /proc/sched_debug and
23 #define SEQ_printf(m, x...) \
32 * Ease the printing of nsec fields:
34 static long long nsec_high(unsigned long long nsec)
36 if ((long long)nsec < 0) {
38 do_div(nsec, 1000000);
41 do_div(nsec, 1000000);
46 static unsigned long nsec_low(unsigned long long nsec)
48 if ((long long)nsec < 0)
51 return do_div(nsec, 1000000);
54 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
56 #ifdef CONFIG_FAIR_GROUP_SCHED
57 static void print_cfs_group_stats(struct seq_file *m, int cpu,
58 struct task_group *tg)
60 struct sched_entity *se = tg->se[cpu];
65 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
67 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
71 PN(se->sum_exec_runtime);
72 #ifdef CONFIG_SCHEDSTATS
91 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
98 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
100 SPLIT_NS(p->se.vruntime),
101 (long long)(p->nvcsw + p->nivcsw),
103 #ifdef CONFIG_SCHEDSTATS
104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
105 SPLIT_NS(p->se.vruntime),
106 SPLIT_NS(p->se.sum_exec_runtime),
107 SPLIT_NS(p->se.sum_sleep_runtime));
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
113 #ifdef CONFIG_CGROUP_SCHED
117 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
118 SEQ_printf(m, " %s", path);
124 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
126 struct task_struct *g, *p;
130 "\nrunnable tasks:\n"
131 " task PID tree-key switches prio"
132 " exec-runtime sum-exec sum-sleep\n"
133 "------------------------------------------------------"
134 "----------------------------------------------------\n");
136 read_lock_irqsave(&tasklist_lock, flags);
138 do_each_thread(g, p) {
139 if (!p->se.on_rq || task_cpu(p) != rq_cpu)
142 print_task(m, rq, p);
143 } while_each_thread(g, p);
145 read_unlock_irqrestore(&tasklist_lock, flags);
148 #if defined(CONFIG_CGROUP_SCHED) && \
149 (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
150 static void task_group_path(struct task_group *tg, char *buf, int buflen)
152 /* may be NULL if the underlying cgroup isn't fully-created yet */
153 if (!tg->css.cgroup) {
157 cgroup_path(tg->css.cgroup, buf, buflen);
161 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
163 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
164 spread, rq0_min_vruntime, spread0;
165 struct rq *rq = &per_cpu(runqueues, cpu);
166 struct sched_entity *last;
169 #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
171 struct task_group *tg = cfs_rq->tg;
173 task_group_path(tg, path, sizeof(path));
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
176 #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
178 uid_t uid = cfs_rq->tg->uid;
179 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
182 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
185 SPLIT_NS(cfs_rq->exec_clock));
187 spin_lock_irqsave(&rq->lock, flags);
188 if (cfs_rq->rb_leftmost)
189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
190 last = __pick_last_entity(cfs_rq);
192 max_vruntime = last->vruntime;
193 min_vruntime = cfs_rq->min_vruntime;
194 rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
195 spin_unlock_irqrestore(&rq->lock, flags);
196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 SPLIT_NS(MIN_vruntime));
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
199 SPLIT_NS(min_vruntime));
200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
201 SPLIT_NS(max_vruntime));
202 spread = max_vruntime - MIN_vruntime;
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
205 spread0 = min_vruntime - rq0_min_vruntime;
206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
208 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
209 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
211 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
212 cfs_rq->nr_spread_over);
213 #ifdef CONFIG_FAIR_GROUP_SCHED
215 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
217 print_cfs_group_stats(m, cpu, cfs_rq->tg);
221 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
223 #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
225 struct task_group *tg = rt_rq->tg;
227 task_group_path(tg, path, sizeof(path));
229 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
231 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
236 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
238 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
249 static void print_cpu(struct seq_file *m, int cpu)
251 struct rq *rq = &per_cpu(runqueues, cpu);
255 unsigned int freq = cpu_khz ? : 1;
257 SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
258 cpu, freq / 1000, (freq % 1000));
261 SEQ_printf(m, "\ncpu#%d\n", cpu);
265 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x))
267 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
270 SEQ_printf(m, " .%-30s: %lu\n", "load",
274 P(nr_uninterruptible);
275 SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
287 #ifdef CONFIG_SCHEDSTATS
288 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
306 print_cfs_stats(m, cpu);
307 print_rt_stats(m, cpu);
309 print_rq(m, rq, cpu);
312 static int sched_debug_show(struct seq_file *m, void *v)
314 u64 now = ktime_to_ns(ktime_get());
317 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
318 init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version);
322 SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now));
325 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
327 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
328 PN(sysctl_sched_latency);
329 PN(sysctl_sched_min_granularity);
330 PN(sysctl_sched_wakeup_granularity);
331 PN(sysctl_sched_child_runs_first);
332 P(sysctl_sched_features);
336 for_each_online_cpu(cpu)
344 static void sysrq_sched_debug_show(void)
346 sched_debug_show(NULL, NULL);
349 static int sched_debug_open(struct inode *inode, struct file *filp)
351 return single_open(filp, sched_debug_show, NULL);
354 static const struct file_operations sched_debug_fops = {
355 .open = sched_debug_open,
358 .release = single_release,
361 static int __init init_sched_debug_procfs(void)
363 struct proc_dir_entry *pe;
365 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
371 __initcall(init_sched_debug_procfs);
373 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
375 unsigned long nr_switches;
379 if (lock_task_sighand(p, &flags)) {
380 num_threads = atomic_read(&p->signal->count);
381 unlock_task_sighand(p, &flags);
384 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
386 "---------------------------------------------------------\n");
388 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
390 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
392 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
394 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
398 PN(se.sum_exec_runtime);
401 nr_switches = p->nvcsw + p->nivcsw;
403 #ifdef CONFIG_SCHEDSTATS
414 P(sched_info.bkl_count);
416 P(se.nr_migrations_cold);
417 P(se.nr_failed_migrations_affine);
418 P(se.nr_failed_migrations_running);
419 P(se.nr_failed_migrations_hot);
420 P(se.nr_forced_migrations);
421 P(se.nr_forced2_migrations);
423 P(se.nr_wakeups_sync);
424 P(se.nr_wakeups_migrate);
425 P(se.nr_wakeups_local);
426 P(se.nr_wakeups_remote);
427 P(se.nr_wakeups_affine);
428 P(se.nr_wakeups_affine_attempts);
429 P(se.nr_wakeups_passive);
430 P(se.nr_wakeups_idle);
433 u64 avg_atom, avg_per_cpu;
435 avg_atom = p->se.sum_exec_runtime;
437 do_div(avg_atom, nr_switches);
441 avg_per_cpu = p->se.sum_exec_runtime;
442 if (p->se.nr_migrations) {
443 avg_per_cpu = div64_u64(avg_per_cpu,
444 p->se.nr_migrations);
454 SEQ_printf(m, "%-35s:%21Ld\n",
455 "nr_voluntary_switches", (long long)p->nvcsw);
456 SEQ_printf(m, "%-35s:%21Ld\n",
457 "nr_involuntary_switches", (long long)p->nivcsw);
468 unsigned int this_cpu = raw_smp_processor_id();
471 t0 = cpu_clock(this_cpu);
472 t1 = cpu_clock(this_cpu);
473 SEQ_printf(m, "%-35s:%21Ld\n",
474 "clock-delta", (long long)(t1-t0));
478 void proc_sched_set_task(struct task_struct *p)
480 #ifdef CONFIG_SCHEDSTATS
483 p->se.wait_count = 0;
485 p->se.sum_sleep_runtime = 0;
489 p->se.nr_migrations = 0;
490 p->se.nr_migrations_cold = 0;
491 p->se.nr_failed_migrations_affine = 0;
492 p->se.nr_failed_migrations_running = 0;
493 p->se.nr_failed_migrations_hot = 0;
494 p->se.nr_forced_migrations = 0;
495 p->se.nr_forced2_migrations = 0;
496 p->se.nr_wakeups = 0;
497 p->se.nr_wakeups_sync = 0;
498 p->se.nr_wakeups_migrate = 0;
499 p->se.nr_wakeups_local = 0;
500 p->se.nr_wakeups_remote = 0;
501 p->se.nr_wakeups_affine = 0;
502 p->se.nr_wakeups_affine_attempts = 0;
503 p->se.nr_wakeups_passive = 0;
504 p->se.nr_wakeups_idle = 0;
505 p->sched_info.bkl_count = 0;
507 p->se.sum_exec_runtime = 0;
508 p->se.prev_sum_exec_runtime = 0;