2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count;
24 static int perf_trace_event_perm(struct trace_event_call *tp_event,
25 struct perf_event *p_event)
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
34 * We checked and allowed to create parent,
35 * allow children without checking.
41 * It's ok to check current process (owner) permissions in here,
42 * because code below is called only via perf_event_open syscall.
45 /* The ftrace function trace is allowed only for root. */
46 if (ftrace_event_is_function(tp_event)) {
47 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
51 * We don't allow user space callchains for function trace
52 * event, due to issues with page faults while tracing page
53 * fault handler and its overall trickiness nature.
55 if (!p_event->attr.exclude_callchain_user)
59 * Same reason to disable user stack dump as for user space
62 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
66 /* No tracing, just counting, so no obvious leak */
67 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
70 /* Some events are ok to be traced by non-root users... */
71 if (p_event->attach_state == PERF_ATTACH_TASK) {
72 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
77 * ...otherwise raw tracepoint data can be a severe data leak,
78 * only allow root to have these.
80 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
86 static int perf_trace_event_reg(struct trace_event_call *tp_event,
87 struct perf_event *p_event)
89 struct hlist_head __percpu *list;
93 p_event->tp_event = tp_event;
94 if (tp_event->perf_refcount++ > 0)
97 list = alloc_percpu(struct hlist_head);
101 for_each_possible_cpu(cpu)
102 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
104 tp_event->perf_events = list;
106 if (!total_ref_count) {
110 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
111 buf = (char __percpu *)alloc_percpu(perf_trace_t);
115 perf_trace_buf[i] = buf;
119 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
127 if (!total_ref_count) {
130 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131 free_percpu(perf_trace_buf[i]);
132 perf_trace_buf[i] = NULL;
136 if (!--tp_event->perf_refcount) {
137 free_percpu(tp_event->perf_events);
138 tp_event->perf_events = NULL;
144 static void perf_trace_event_unreg(struct perf_event *p_event)
146 struct trace_event_call *tp_event = p_event->tp_event;
149 if (--tp_event->perf_refcount > 0)
152 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
155 * Ensure our callback won't be called anymore. The buffers
156 * will be freed after that.
158 tracepoint_synchronize_unregister();
160 free_percpu(tp_event->perf_events);
161 tp_event->perf_events = NULL;
163 if (!--total_ref_count) {
164 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165 free_percpu(perf_trace_buf[i]);
166 perf_trace_buf[i] = NULL;
170 module_put(tp_event->mod);
173 static int perf_trace_event_open(struct perf_event *p_event)
175 struct trace_event_call *tp_event = p_event->tp_event;
176 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
179 static void perf_trace_event_close(struct perf_event *p_event)
181 struct trace_event_call *tp_event = p_event->tp_event;
182 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
185 static int perf_trace_event_init(struct trace_event_call *tp_event,
186 struct perf_event *p_event)
190 ret = perf_trace_event_perm(tp_event, p_event);
194 ret = perf_trace_event_reg(tp_event, p_event);
198 ret = perf_trace_event_open(p_event);
200 perf_trace_event_unreg(p_event);
207 int perf_trace_init(struct perf_event *p_event)
209 struct trace_event_call *tp_event;
210 u64 event_id = p_event->attr.config;
213 mutex_lock(&event_mutex);
214 list_for_each_entry(tp_event, &ftrace_events, list) {
215 if (tp_event->event.type == event_id &&
216 tp_event->class && tp_event->class->reg &&
217 try_module_get(tp_event->mod)) {
218 ret = perf_trace_event_init(tp_event, p_event);
220 module_put(tp_event->mod);
224 mutex_unlock(&event_mutex);
229 void perf_trace_destroy(struct perf_event *p_event)
231 mutex_lock(&event_mutex);
232 perf_trace_event_close(p_event);
233 perf_trace_event_unreg(p_event);
234 mutex_unlock(&event_mutex);
237 int perf_trace_add(struct perf_event *p_event, int flags)
239 struct trace_event_call *tp_event = p_event->tp_event;
240 struct hlist_head __percpu *pcpu_list;
241 struct hlist_head *list;
243 pcpu_list = tp_event->perf_events;
244 if (WARN_ON_ONCE(!pcpu_list))
247 if (!(flags & PERF_EF_START))
248 p_event->hw.state = PERF_HES_STOPPED;
250 list = this_cpu_ptr(pcpu_list);
251 hlist_add_head_rcu(&p_event->hlist_entry, list);
253 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
256 void perf_trace_del(struct perf_event *p_event, int flags)
258 struct trace_event_call *tp_event = p_event->tp_event;
259 hlist_del_rcu(&p_event->hlist_entry);
260 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
263 void *perf_trace_buf_prepare(int size, unsigned short type,
264 struct pt_regs **regs, int *rctxp)
266 struct trace_entry *entry;
271 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
273 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274 "perf buffer not large enough"))
277 pc = preempt_count();
279 *rctxp = perf_swevent_get_recursion_context();
284 *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
285 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
287 /* zero the dead bytes from align to not leak stack to user */
288 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
290 entry = (struct trace_entry *)raw_data;
291 local_save_flags(flags);
292 tracing_generic_entry_update(entry, flags, pc);
297 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
298 NOKPROBE_SYMBOL(perf_trace_buf_prepare);
300 #ifdef CONFIG_FUNCTION_TRACER
302 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
303 struct ftrace_ops *ops, struct pt_regs *pt_regs)
305 struct ftrace_entry *entry;
306 struct hlist_head *head;
310 head = this_cpu_ptr(event_function.perf_events);
311 if (hlist_empty(head))
314 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
315 sizeof(u64)) - sizeof(u32))
317 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
319 perf_fetch_caller_regs(®s);
321 entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
326 entry->parent_ip = parent_ip;
327 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
328 1, ®s, head, NULL);
333 static int perf_ftrace_function_register(struct perf_event *event)
335 struct ftrace_ops *ops = &event->ftrace_ops;
337 ops->flags |= FTRACE_OPS_FL_CONTROL;
338 ops->func = perf_ftrace_function_call;
339 return register_ftrace_function(ops);
342 static int perf_ftrace_function_unregister(struct perf_event *event)
344 struct ftrace_ops *ops = &event->ftrace_ops;
345 int ret = unregister_ftrace_function(ops);
346 ftrace_free_filter(ops);
350 static void perf_ftrace_function_enable(struct perf_event *event)
352 ftrace_function_local_enable(&event->ftrace_ops);
355 static void perf_ftrace_function_disable(struct perf_event *event)
357 ftrace_function_local_disable(&event->ftrace_ops);
360 int perf_ftrace_event_register(struct trace_event_call *call,
361 enum trace_reg type, void *data)
364 case TRACE_REG_REGISTER:
365 case TRACE_REG_UNREGISTER:
367 case TRACE_REG_PERF_REGISTER:
368 case TRACE_REG_PERF_UNREGISTER:
370 case TRACE_REG_PERF_OPEN:
371 return perf_ftrace_function_register(data);
372 case TRACE_REG_PERF_CLOSE:
373 return perf_ftrace_function_unregister(data);
374 case TRACE_REG_PERF_ADD:
375 perf_ftrace_function_enable(data);
377 case TRACE_REG_PERF_DEL:
378 perf_ftrace_function_disable(data);
384 #endif /* CONFIG_FUNCTION_TRACER */