128b64b93f1424bb4dfe4ada9aa0a660b8846725
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_workqueue.c
1 /*
2  * Workqueue statistical tracer.
3  *
4  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  *
6  */
7
8
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
13 #include "trace.h"
14
15
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats {
18         struct list_head            list;
19 /* Useful to know if we print the cpu headers */
20         bool                        first_entry;
21         int                         cpu;
22         pid_t                       pid;
23 /* Can be inserted from interrupt or user context, need to be atomic */
24         atomic_t                    inserted;
25 /*
26  *  Don't need to be atomic, works are serialized in a single workqueue thread
27  *  on a single CPU.
28  */
29         unsigned int                executed;
30 };
31
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats {
34         struct list_head        list;
35         spinlock_t              lock;
36 };
37
38 /* Don't need a global lock because allocated before the workqueues, and
39  * never freed.
40  */
41 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
43
44 /* Insertion of a work */
45 static void
46 probe_workqueue_insertion(struct task_struct *wq_thread,
47                           struct work_struct *work)
48 {
49         int cpu = cpumask_first(&wq_thread->cpus_allowed);
50         struct cpu_workqueue_stats *node;
51         unsigned long flags;
52
53         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
54         list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
55                 if (node->pid == wq_thread->pid) {
56                         atomic_inc(&node->inserted);
57                         goto found;
58                 }
59         }
60         pr_debug("trace_workqueue: entry not found\n");
61 found:
62         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
63 }
64
65 /* Execution of a work */
66 static void
67 probe_workqueue_execution(struct task_struct *wq_thread,
68                           struct work_struct *work)
69 {
70         int cpu = cpumask_first(&wq_thread->cpus_allowed);
71         struct cpu_workqueue_stats *node;
72         unsigned long flags;
73
74         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
75         list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76                 if (node->pid == wq_thread->pid) {
77                         node->executed++;
78                         goto found;
79                 }
80         }
81         pr_debug("trace_workqueue: entry not found\n");
82 found:
83         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
84 }
85
86 /* Creation of a cpu workqueue thread */
87 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
88 {
89         struct cpu_workqueue_stats *cws;
90         unsigned long flags;
91
92         WARN_ON(cpu < 0);
93
94         /* Workqueues are sometimes created in atomic context */
95         cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
96         if (!cws) {
97                 pr_warning("trace_workqueue: not enough memory\n");
98                 return;
99         }
100         INIT_LIST_HEAD(&cws->list);
101         cws->cpu = cpu;
102
103         cws->pid = wq_thread->pid;
104
105         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
106         if (list_empty(&workqueue_cpu_stat(cpu)->list))
107                 cws->first_entry = true;
108         list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
109         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
110 }
111
112 /* Destruction of a cpu workqueue thread */
113 static void probe_workqueue_destruction(struct task_struct *wq_thread)
114 {
115         /* Workqueue only execute on one cpu */
116         int cpu = cpumask_first(&wq_thread->cpus_allowed);
117         struct cpu_workqueue_stats *node, *next;
118         unsigned long flags;
119
120         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
121         list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
122                                                         list) {
123                 if (node->pid == wq_thread->pid) {
124                         list_del(&node->list);
125                         kfree(node);
126                         goto found;
127                 }
128         }
129
130         pr_debug("trace_workqueue: don't find workqueue to destroy\n");
131 found:
132         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
133
134 }
135
136 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
137 {
138         unsigned long flags;
139         struct cpu_workqueue_stats *ret = NULL;
140
141
142         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
143
144         if (!list_empty(&workqueue_cpu_stat(cpu)->list))
145                 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
146                                  struct cpu_workqueue_stats, list);
147
148         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
149
150         return ret;
151 }
152
153 static void *workqueue_stat_start(struct tracer_stat *trace)
154 {
155         int cpu;
156         void *ret = NULL;
157
158         for_each_possible_cpu(cpu) {
159                 ret = workqueue_stat_start_cpu(cpu);
160                 if (ret)
161                         return ret;
162         }
163         return NULL;
164 }
165
166 static void *workqueue_stat_next(void *prev, int idx)
167 {
168         struct cpu_workqueue_stats *prev_cws = prev;
169         int cpu = prev_cws->cpu;
170         unsigned long flags;
171         void *ret = NULL;
172
173         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
174         if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
175                 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
176                 do {
177                         cpu = cpumask_next(cpu, cpu_possible_mask);
178                         if (cpu >= nr_cpu_ids)
179                                 return NULL;
180                 } while (!(ret = workqueue_stat_start_cpu(cpu)));
181                 return ret;
182         }
183         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
184
185         return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
186                           list);
187 }
188
189 static int workqueue_stat_show(struct seq_file *s, void *p)
190 {
191         struct cpu_workqueue_stats *cws = p;
192         unsigned long flags;
193         int cpu = cws->cpu;
194         struct pid *pid;
195         struct task_struct *tsk;
196
197         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
198         if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
199                 seq_printf(s, "\n");
200         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
201
202         pid = find_get_pid(cws->pid);
203         if (pid) {
204                 tsk = get_pid_task(pid, PIDTYPE_PID);
205                 if (tsk) {
206                         seq_printf(s, "%3d %6d     %6u       %s\n", cws->cpu,
207                                    atomic_read(&cws->inserted), cws->executed,
208                                    tsk->comm);
209                         put_task_struct(tsk);
210                 }
211                 put_pid(pid);
212         }
213
214         return 0;
215 }
216
217 static int workqueue_stat_headers(struct seq_file *s)
218 {
219         seq_printf(s, "# CPU  INSERTED  EXECUTED   NAME\n");
220         seq_printf(s, "# |      |         |          |\n");
221         return 0;
222 }
223
224 struct tracer_stat workqueue_stats __read_mostly = {
225         .name = "workqueues",
226         .stat_start = workqueue_stat_start,
227         .stat_next = workqueue_stat_next,
228         .stat_show = workqueue_stat_show,
229         .stat_headers = workqueue_stat_headers
230 };
231
232
233 int __init stat_workqueue_init(void)
234 {
235         if (register_stat_tracer(&workqueue_stats)) {
236                 pr_warning("Unable to register workqueue stat tracer\n");
237                 return 1;
238         }
239
240         return 0;
241 }
242 fs_initcall(stat_workqueue_init);
243
244 /*
245  * Workqueues are created very early, just after pre-smp initcalls.
246  * So we must register our tracepoints at this stage.
247  */
248 int __init trace_workqueue_early_init(void)
249 {
250         int ret, cpu;
251
252         ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
253         if (ret)
254                 goto out;
255
256         ret = register_trace_workqueue_execution(probe_workqueue_execution);
257         if (ret)
258                 goto no_insertion;
259
260         ret = register_trace_workqueue_creation(probe_workqueue_creation);
261         if (ret)
262                 goto no_execution;
263
264         ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
265         if (ret)
266                 goto no_creation;
267
268         for_each_possible_cpu(cpu) {
269                 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
270                 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
271         }
272
273         return 0;
274
275 no_creation:
276         unregister_trace_workqueue_creation(probe_workqueue_creation);
277 no_execution:
278         unregister_trace_workqueue_execution(probe_workqueue_execution);
279 no_insertion:
280         unregister_trace_workqueue_insertion(probe_workqueue_insertion);
281 out:
282         pr_warning("trace_workqueue: unable to trace workqueues\n");
283
284         return 1;
285 }
286 early_initcall(trace_workqueue_early_init);