2 #include <linux/slab.h>
4 #include <linux/seq_file.h>
5 #include <linux/proc_fs.h>
10 * bump this up when changing the output format or the meaning of an existing
11 * format, so that tools can adapt (or abort)
13 #define SCHEDSTAT_VERSION 15
16 static inline void show_easstat(struct seq_file *seq, struct eas_stats *stats)
18 /* eas-specific runqueue stats */
19 seq_printf(seq, "eas %llu %llu %llu %llu %llu %llu ",
20 stats->sis_attempts, stats->sis_idle, stats->sis_cache_affine,
21 stats->sis_suff_cap, stats->sis_idle_cpu, stats->sis_count);
23 seq_printf(seq, "%llu %llu %llu %llu %llu %llu %llu ",
24 stats->secb_attempts, stats->secb_sync, stats->secb_idle_bt,
25 stats->secb_insuff_cap, stats->secb_no_nrg_sav,
26 stats->secb_nrg_sav, stats->secb_count);
28 seq_printf(seq, "%llu %llu %llu %llu %llu ",
29 stats->fbt_attempts, stats->fbt_no_cpu, stats->fbt_no_sd,
30 stats->fbt_pref_idle, stats->fbt_count);
32 seq_printf(seq, "%llu %llu\n",
33 stats->cas_attempts, stats->cas_count);
37 static int show_schedstat(struct seq_file *seq, void *v)
42 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
43 seq_printf(seq, "timestamp %lu\n", jiffies);
47 struct sched_domain *sd;
50 cpu = (unsigned long)(v - 2);
53 /* runqueue-specific stats */
55 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
57 rq->sched_count, rq->sched_goidle,
58 rq->ttwu_count, rq->ttwu_local,
60 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
62 seq_printf(seq, "\n");
65 show_easstat(seq, &rq->eas_stats);
67 /* domain-specific stats */
69 for_each_domain(cpu, sd) {
70 enum cpu_idle_type itype;
72 seq_printf(seq, "domain%d %*pb", dcount++,
73 cpumask_pr_args(sched_domain_span(sd)));
74 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
76 seq_printf(seq, " %u %u %u %u %u %u %u %u",
78 sd->lb_balanced[itype],
80 sd->lb_imbalance[itype],
82 sd->lb_hot_gained[itype],
83 sd->lb_nobusyq[itype],
84 sd->lb_nobusyg[itype]);
87 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
88 sd->alb_count, sd->alb_failed, sd->alb_pushed,
89 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
90 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
91 sd->ttwu_wake_remote, sd->ttwu_move_affine,
92 sd->ttwu_move_balance);
94 show_easstat(seq, &sd->eas_stats);
103 * This itererator needs some explanation.
104 * It returns 1 for the header position.
105 * This means 2 is cpu 0.
106 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
107 * to use cpumask_* to iterate over the cpus.
109 static void *schedstat_start(struct seq_file *file, loff_t *offset)
111 unsigned long n = *offset;
119 n = cpumask_next(n - 1, cpu_online_mask);
121 n = cpumask_first(cpu_online_mask);
126 return (void *)(unsigned long)(n + 2);
130 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
133 return schedstat_start(file, offset);
136 static void schedstat_stop(struct seq_file *file, void *data)
140 static const struct seq_operations schedstat_sops = {
141 .start = schedstat_start,
142 .next = schedstat_next,
143 .stop = schedstat_stop,
144 .show = show_schedstat,
147 static int schedstat_open(struct inode *inode, struct file *file)
149 return seq_open(file, &schedstat_sops);
152 static const struct file_operations proc_schedstat_operations = {
153 .open = schedstat_open,
156 .release = seq_release,
159 static int __init proc_schedstat_init(void)
161 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
164 subsys_initcall(proc_schedstat_init);