4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
21 #include <asm/setup.h>
23 #include "trace_output.h"
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
28 DEFINE_MUTEX(event_mutex);
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
44 #define SYSTEM_FL_FREE_NAME (1 << 31)
46 static inline int system_refcount(struct event_subsystem *system)
48 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
51 static int system_refcount_inc(struct event_subsystem *system)
53 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
56 static int system_refcount_dec(struct event_subsystem *system)
58 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 list_for_each_entry(file, &tr->events, list)
66 #define do_for_each_event_file_safe(tr, file) \
67 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
68 struct ftrace_event_file *___n; \
69 list_for_each_entry_safe(file, ___n, &tr->events, list)
71 #define while_for_each_event_file() \
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
77 if (!event_call->class->get_fields)
78 return &event_call->class->fields;
79 return event_call->class->get_fields(event_call);
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
85 struct ftrace_event_field *field;
87 list_for_each_entry(field, head, link) {
88 if (!strcmp(field->name, name))
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
98 struct ftrace_event_field *field;
99 struct list_head *head;
101 field = __find_event_field(&ftrace_common_fields, name);
105 head = trace_get_fields(call);
106 return __find_event_field(head, name);
109 static int __trace_define_field(struct list_head *head, const char *type,
110 const char *name, int offset, int size,
111 int is_signed, int filter_type)
113 struct ftrace_event_field *field;
115 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
122 if (filter_type == FILTER_OTHER)
123 field->filter_type = filter_assign_type(type);
125 field->filter_type = filter_type;
127 field->offset = offset;
129 field->is_signed = is_signed;
131 list_add(&field->link, head);
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 const char *name, int offset, int size, int is_signed,
140 struct list_head *head;
142 if (WARN_ON(!call->class))
145 head = trace_get_fields(call);
146 return __trace_define_field(head, type, name, offset, size,
147 is_signed, filter_type);
149 EXPORT_SYMBOL_GPL(trace_define_field);
151 #define __common_field(type, item) \
152 ret = __trace_define_field(&ftrace_common_fields, #type, \
154 offsetof(typeof(ent), item), \
156 is_signed_type(type), FILTER_OTHER); \
160 static int trace_define_common_fields(void)
163 struct trace_entry ent;
165 __common_field(unsigned short, type);
166 __common_field(unsigned char, flags);
167 __common_field(unsigned char, preempt_count);
168 __common_field(int, pid);
173 static void trace_destroy_fields(struct ftrace_event_call *call)
175 struct ftrace_event_field *field, *next;
176 struct list_head *head;
178 head = trace_get_fields(call);
179 list_for_each_entry_safe(field, next, head, link) {
180 list_del(&field->link);
181 kmem_cache_free(field_cachep, field);
185 int trace_event_raw_init(struct ftrace_event_call *call)
189 id = register_ftrace_event(&call->event);
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
197 int ftrace_event_reg(struct ftrace_event_call *call,
198 enum trace_reg type, void *data)
200 struct ftrace_event_file *file = data;
203 case TRACE_REG_REGISTER:
204 return tracepoint_probe_register(call->name,
207 case TRACE_REG_UNREGISTER:
208 tracepoint_probe_unregister(call->name,
213 #ifdef CONFIG_PERF_EVENTS
214 case TRACE_REG_PERF_REGISTER:
215 return tracepoint_probe_register(call->name,
216 call->class->perf_probe,
218 case TRACE_REG_PERF_UNREGISTER:
219 tracepoint_probe_unregister(call->name,
220 call->class->perf_probe,
223 case TRACE_REG_PERF_OPEN:
224 case TRACE_REG_PERF_CLOSE:
225 case TRACE_REG_PERF_ADD:
226 case TRACE_REG_PERF_DEL:
232 EXPORT_SYMBOL_GPL(ftrace_event_reg);
234 void trace_event_enable_cmd_record(bool enable)
236 struct ftrace_event_file *file;
237 struct trace_array *tr;
239 mutex_lock(&event_mutex);
240 do_for_each_event_file(tr, file) {
242 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
246 tracing_start_cmdline_record();
247 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
249 tracing_stop_cmdline_record();
250 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
252 } while_for_each_event_file();
253 mutex_unlock(&event_mutex);
256 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
257 int enable, int soft_disable)
259 struct ftrace_event_call *call = file->event_call;
266 * When soft_disable is set and enable is cleared, the sm_ref
267 * reference counter is decremented. If it reaches 0, we want
268 * to clear the SOFT_DISABLED flag but leave the event in the
269 * state that it was. That is, if the event was enabled and
270 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 * is set we do not want the event to be enabled before we
274 * When soft_disable is not set but the SOFT_MODE flag is,
275 * we do nothing. Do not disable the tracepoint, otherwise
276 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
279 if (atomic_dec_return(&file->sm_ref) > 0)
281 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
282 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
284 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
286 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
287 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
289 tracing_stop_cmdline_record();
290 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
292 call->class->reg(call, TRACE_REG_UNREGISTER, file);
294 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
295 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
296 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
300 * When soft_disable is set and enable is set, we want to
301 * register the tracepoint for the event, but leave the event
302 * as is. That means, if the event was already enabled, we do
303 * nothing (but set SOFT_MODE). If the event is disabled, we
304 * set SOFT_DISABLED before enabling the event tracepoint, so
305 * it still seems to be disabled.
308 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
310 if (atomic_inc_return(&file->sm_ref) > 1)
312 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
315 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
317 /* Keep the event disabled, when going to SOFT_MODE. */
319 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
321 if (trace_flags & TRACE_ITER_RECORD_CMD) {
322 tracing_start_cmdline_record();
323 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
325 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
327 tracing_stop_cmdline_record();
328 pr_info("event trace: Could not enable event "
332 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
334 /* WAS_ENABLED gets set but never cleared. */
335 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
343 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
346 return __ftrace_event_enable_disable(file, enable, 0);
349 static void ftrace_clear_events(struct trace_array *tr)
351 struct ftrace_event_file *file;
353 mutex_lock(&event_mutex);
354 list_for_each_entry(file, &tr->events, list) {
355 ftrace_event_enable_disable(file, 0);
357 mutex_unlock(&event_mutex);
360 static void __put_system(struct event_subsystem *system)
362 struct event_filter *filter = system->filter;
364 WARN_ON_ONCE(system_refcount(system) == 0);
365 if (system_refcount_dec(system))
368 list_del(&system->list);
371 kfree(filter->filter_string);
374 if (system->ref_count & SYSTEM_FL_FREE_NAME)
379 static void __get_system(struct event_subsystem *system)
381 WARN_ON_ONCE(system_refcount(system) == 0);
382 system_refcount_inc(system);
385 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
387 WARN_ON_ONCE(dir->ref_count == 0);
389 __get_system(dir->subsystem);
392 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
394 WARN_ON_ONCE(dir->ref_count == 0);
395 /* If the subsystem is about to be freed, the dir must be too */
396 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
398 __put_system(dir->subsystem);
399 if (!--dir->ref_count)
403 static void put_system(struct ftrace_subsystem_dir *dir)
405 mutex_lock(&event_mutex);
406 __put_system_dir(dir);
407 mutex_unlock(&event_mutex);
410 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
415 if (!--dir->nr_events) {
416 debugfs_remove_recursive(dir->entry);
417 list_del(&dir->list);
418 __put_system_dir(dir);
422 static void *event_file_data(struct file *filp)
424 return ACCESS_ONCE(file_inode(filp)->i_private);
427 static void remove_event_file_dir(struct ftrace_event_file *file)
429 struct dentry *dir = file->dir;
430 struct dentry *child;
433 spin_lock(&dir->d_lock); /* probably unneeded */
434 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
435 if (child->d_inode) /* probably unneeded */
436 child->d_inode->i_private = NULL;
438 spin_unlock(&dir->d_lock);
440 debugfs_remove_recursive(dir);
443 list_del(&file->list);
444 remove_subsystem(file->system);
445 kmem_cache_free(file_cachep, file);
449 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
452 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
453 const char *sub, const char *event, int set)
455 struct ftrace_event_file *file;
456 struct ftrace_event_call *call;
459 list_for_each_entry(file, &tr->events, list) {
461 call = file->event_call;
463 if (!call->name || !call->class || !call->class->reg)
466 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
470 strcmp(match, call->name) != 0 &&
471 strcmp(match, call->class->system) != 0)
474 if (sub && strcmp(sub, call->class->system) != 0)
477 if (event && strcmp(event, call->name) != 0)
480 ftrace_event_enable_disable(file, set);
488 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
489 const char *sub, const char *event, int set)
493 mutex_lock(&event_mutex);
494 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
495 mutex_unlock(&event_mutex);
500 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
502 char *event = NULL, *sub = NULL, *match;
505 * The buf format can be <subsystem>:<event-name>
506 * *:<event-name> means any event by that name.
507 * :<event-name> is the same.
509 * <subsystem>:* means all events in that subsystem
510 * <subsystem>: means the same.
512 * <name> (no ':') means all events in a subsystem with
513 * the name <name> or any event that matches <name>
516 match = strsep(&buf, ":");
522 if (!strlen(sub) || strcmp(sub, "*") == 0)
524 if (!strlen(event) || strcmp(event, "*") == 0)
528 return __ftrace_set_clr_event(tr, match, sub, event, set);
532 * trace_set_clr_event - enable or disable an event
533 * @system: system name to match (NULL for any system)
534 * @event: event name to match (NULL for all events, within system)
535 * @set: 1 to enable, 0 to disable
537 * This is a way for other parts of the kernel to enable or disable
540 * Returns 0 on success, -EINVAL if the parameters do not match any
543 int trace_set_clr_event(const char *system, const char *event, int set)
545 struct trace_array *tr = top_trace_array();
547 return __ftrace_set_clr_event(tr, NULL, system, event, set);
549 EXPORT_SYMBOL_GPL(trace_set_clr_event);
551 /* 128 should be much more than enough */
552 #define EVENT_BUF_SIZE 127
555 ftrace_event_write(struct file *file, const char __user *ubuf,
556 size_t cnt, loff_t *ppos)
558 struct trace_parser parser;
559 struct seq_file *m = file->private_data;
560 struct trace_array *tr = m->private;
566 ret = tracing_update_buffers();
570 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
573 read = trace_get_user(&parser, ubuf, cnt, ppos);
575 if (read >= 0 && trace_parser_loaded((&parser))) {
578 if (*parser.buffer == '!')
581 parser.buffer[parser.idx] = 0;
583 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
591 trace_parser_put(&parser);
597 t_next(struct seq_file *m, void *v, loff_t *pos)
599 struct ftrace_event_file *file = v;
600 struct ftrace_event_call *call;
601 struct trace_array *tr = m->private;
605 list_for_each_entry_continue(file, &tr->events, list) {
606 call = file->event_call;
608 * The ftrace subsystem is for showing formats only.
609 * They can not be enabled or disabled via the event files.
611 if (call->class && call->class->reg)
618 static void *t_start(struct seq_file *m, loff_t *pos)
620 struct ftrace_event_file *file;
621 struct trace_array *tr = m->private;
624 mutex_lock(&event_mutex);
626 file = list_entry(&tr->events, struct ftrace_event_file, list);
627 for (l = 0; l <= *pos; ) {
628 file = t_next(m, file, &l);
636 s_next(struct seq_file *m, void *v, loff_t *pos)
638 struct ftrace_event_file *file = v;
639 struct trace_array *tr = m->private;
643 list_for_each_entry_continue(file, &tr->events, list) {
644 if (file->flags & FTRACE_EVENT_FL_ENABLED)
651 static void *s_start(struct seq_file *m, loff_t *pos)
653 struct ftrace_event_file *file;
654 struct trace_array *tr = m->private;
657 mutex_lock(&event_mutex);
659 file = list_entry(&tr->events, struct ftrace_event_file, list);
660 for (l = 0; l <= *pos; ) {
661 file = s_next(m, file, &l);
668 static int t_show(struct seq_file *m, void *v)
670 struct ftrace_event_file *file = v;
671 struct ftrace_event_call *call = file->event_call;
673 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
674 seq_printf(m, "%s:", call->class->system);
675 seq_printf(m, "%s\n", call->name);
680 static void t_stop(struct seq_file *m, void *p)
682 mutex_unlock(&event_mutex);
686 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
689 struct ftrace_event_file *file;
693 mutex_lock(&event_mutex);
694 file = event_file_data(filp);
697 mutex_unlock(&event_mutex);
702 if (flags & FTRACE_EVENT_FL_ENABLED) {
703 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
705 else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
712 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
716 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
719 struct ftrace_event_file *file;
723 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
727 ret = tracing_update_buffers();
735 mutex_lock(&event_mutex);
736 file = event_file_data(filp);
738 ret = ftrace_event_enable_disable(file, val);
739 mutex_unlock(&event_mutex);
748 return ret ? ret : cnt;
752 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
755 const char set_to_char[4] = { '?', '0', '1', 'X' };
756 struct ftrace_subsystem_dir *dir = filp->private_data;
757 struct event_subsystem *system = dir->subsystem;
758 struct ftrace_event_call *call;
759 struct ftrace_event_file *file;
760 struct trace_array *tr = dir->tr;
765 mutex_lock(&event_mutex);
766 list_for_each_entry(file, &tr->events, list) {
767 call = file->event_call;
768 if (!call->name || !call->class || !call->class->reg)
771 if (system && strcmp(call->class->system, system->name) != 0)
775 * We need to find out if all the events are set
776 * or if all events or cleared, or if we have
779 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
782 * If we have a mixture, no need to look further.
787 mutex_unlock(&event_mutex);
789 buf[0] = set_to_char[set];
792 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
798 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
801 struct ftrace_subsystem_dir *dir = filp->private_data;
802 struct event_subsystem *system = dir->subsystem;
803 const char *name = NULL;
807 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
811 ret = tracing_update_buffers();
815 if (val != 0 && val != 1)
819 * Opening of "enable" adds a ref count to system,
820 * so the name is safe to use.
825 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
839 FORMAT_FIELD_SEPERATOR = 2,
843 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
845 struct ftrace_event_call *call = event_file_data(m->private);
846 struct ftrace_event_field *field;
847 struct list_head *common_head = &ftrace_common_fields;
848 struct list_head *head = trace_get_fields(call);
852 switch ((unsigned long)v) {
854 if (unlikely(list_empty(common_head)))
857 field = list_entry(common_head->prev,
858 struct ftrace_event_field, link);
861 case FORMAT_FIELD_SEPERATOR:
862 if (unlikely(list_empty(head)))
865 field = list_entry(head->prev, struct ftrace_event_field, link);
868 case FORMAT_PRINTFMT:
874 if (field->link.prev == common_head)
875 return (void *)FORMAT_FIELD_SEPERATOR;
876 else if (field->link.prev == head)
877 return (void *)FORMAT_PRINTFMT;
879 field = list_entry(field->link.prev, struct ftrace_event_field, link);
884 static void *f_start(struct seq_file *m, loff_t *pos)
889 /* ->stop() is called even if ->start() fails */
890 mutex_lock(&event_mutex);
891 if (!event_file_data(m->private))
892 return ERR_PTR(-ENODEV);
894 /* Start by showing the header */
896 return (void *)FORMAT_HEADER;
898 p = (void *)FORMAT_HEADER;
900 p = f_next(m, p, &l);
901 } while (p && l < *pos);
906 static int f_show(struct seq_file *m, void *v)
908 struct ftrace_event_call *call = event_file_data(m->private);
909 struct ftrace_event_field *field;
910 const char *array_descriptor;
912 switch ((unsigned long)v) {
914 seq_printf(m, "name: %s\n", call->name);
915 seq_printf(m, "ID: %d\n", call->event.type);
916 seq_printf(m, "format:\n");
919 case FORMAT_FIELD_SEPERATOR:
923 case FORMAT_PRINTFMT:
924 seq_printf(m, "\nprint fmt: %s\n",
932 * Smartly shows the array type(except dynamic array).
935 * If TYPE := TYPE[LEN], it is shown:
936 * field:TYPE VAR[LEN]
938 array_descriptor = strchr(field->type, '[');
940 if (!strncmp(field->type, "__data_loc", 10))
941 array_descriptor = NULL;
943 if (!array_descriptor)
944 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
945 field->type, field->name, field->offset,
946 field->size, !!field->is_signed);
948 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
949 (int)(array_descriptor - field->type),
950 field->type, field->name,
951 array_descriptor, field->offset,
952 field->size, !!field->is_signed);
957 static void f_stop(struct seq_file *m, void *p)
959 mutex_unlock(&event_mutex);
962 static const struct seq_operations trace_format_seq_ops = {
969 static int trace_format_open(struct inode *inode, struct file *file)
974 ret = seq_open(file, &trace_format_seq_ops);
978 m = file->private_data;
985 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
987 int id = (long)event_file_data(filp);
997 s = kmalloc(sizeof(*s), GFP_KERNEL);
1002 trace_seq_printf(s, "%d\n", id);
1004 r = simple_read_from_buffer(ubuf, cnt, ppos,
1011 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1014 struct ftrace_event_call *call;
1015 struct trace_seq *s;
1021 s = kmalloc(sizeof(*s), GFP_KERNEL);
1028 mutex_lock(&event_mutex);
1029 call = event_file_data(filp);
1031 print_event_filter(call, s);
1032 mutex_unlock(&event_mutex);
1035 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1043 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1046 struct ftrace_event_call *call;
1050 if (cnt >= PAGE_SIZE)
1053 buf = (char *)__get_free_page(GFP_TEMPORARY);
1057 if (copy_from_user(buf, ubuf, cnt)) {
1058 free_page((unsigned long) buf);
1063 mutex_lock(&event_mutex);
1064 call = event_file_data(filp);
1066 err = apply_event_filter(call, buf);
1067 mutex_unlock(&event_mutex);
1069 free_page((unsigned long) buf);
1078 static LIST_HEAD(event_subsystems);
1080 static int subsystem_open(struct inode *inode, struct file *filp)
1082 struct event_subsystem *system = NULL;
1083 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1084 struct trace_array *tr;
1087 /* Make sure the system still exists */
1088 mutex_lock(&trace_types_lock);
1089 mutex_lock(&event_mutex);
1090 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1091 list_for_each_entry(dir, &tr->systems, list) {
1092 if (dir == inode->i_private) {
1093 /* Don't open systems with no events */
1094 if (dir->nr_events) {
1095 __get_system_dir(dir);
1096 system = dir->subsystem;
1103 mutex_unlock(&event_mutex);
1104 mutex_unlock(&trace_types_lock);
1109 /* Some versions of gcc think dir can be uninitialized here */
1112 /* Still need to increment the ref count of the system */
1113 if (trace_array_get(tr) < 0) {
1118 ret = tracing_open_generic(inode, filp);
1120 trace_array_put(tr);
1127 static int system_tr_open(struct inode *inode, struct file *filp)
1129 struct ftrace_subsystem_dir *dir;
1130 struct trace_array *tr = inode->i_private;
1133 if (trace_array_get(tr) < 0)
1136 /* Make a temporary dir that has no system but points to tr */
1137 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1139 trace_array_put(tr);
1145 ret = tracing_open_generic(inode, filp);
1147 trace_array_put(tr);
1151 filp->private_data = dir;
1156 static int subsystem_release(struct inode *inode, struct file *file)
1158 struct ftrace_subsystem_dir *dir = file->private_data;
1160 trace_array_put(dir->tr);
1163 * If dir->subsystem is NULL, then this is a temporary
1164 * descriptor that was made for a trace_array to enable
1176 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1179 struct ftrace_subsystem_dir *dir = filp->private_data;
1180 struct event_subsystem *system = dir->subsystem;
1181 struct trace_seq *s;
1187 s = kmalloc(sizeof(*s), GFP_KERNEL);
1193 print_subsystem_event_filter(system, s);
1194 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1202 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1205 struct ftrace_subsystem_dir *dir = filp->private_data;
1209 if (cnt >= PAGE_SIZE)
1212 buf = (char *)__get_free_page(GFP_TEMPORARY);
1216 if (copy_from_user(buf, ubuf, cnt)) {
1217 free_page((unsigned long) buf);
1222 err = apply_subsystem_event_filter(dir, buf);
1223 free_page((unsigned long) buf);
1233 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1235 int (*func)(struct trace_seq *s) = filp->private_data;
1236 struct trace_seq *s;
1242 s = kmalloc(sizeof(*s), GFP_KERNEL);
1249 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1256 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1257 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1258 static int ftrace_event_release(struct inode *inode, struct file *file);
1260 static const struct seq_operations show_event_seq_ops = {
1267 static const struct seq_operations show_set_event_seq_ops = {
1274 static const struct file_operations ftrace_avail_fops = {
1275 .open = ftrace_event_avail_open,
1277 .llseek = seq_lseek,
1278 .release = seq_release,
1281 static const struct file_operations ftrace_set_event_fops = {
1282 .open = ftrace_event_set_open,
1284 .write = ftrace_event_write,
1285 .llseek = seq_lseek,
1286 .release = ftrace_event_release,
1289 static const struct file_operations ftrace_enable_fops = {
1290 .open = tracing_open_generic,
1291 .read = event_enable_read,
1292 .write = event_enable_write,
1293 .llseek = default_llseek,
1296 static const struct file_operations ftrace_event_format_fops = {
1297 .open = trace_format_open,
1299 .llseek = seq_lseek,
1300 .release = seq_release,
1303 static const struct file_operations ftrace_event_id_fops = {
1304 .read = event_id_read,
1305 .llseek = default_llseek,
1308 static const struct file_operations ftrace_event_filter_fops = {
1309 .open = tracing_open_generic,
1310 .read = event_filter_read,
1311 .write = event_filter_write,
1312 .llseek = default_llseek,
1315 static const struct file_operations ftrace_subsystem_filter_fops = {
1316 .open = subsystem_open,
1317 .read = subsystem_filter_read,
1318 .write = subsystem_filter_write,
1319 .llseek = default_llseek,
1320 .release = subsystem_release,
1323 static const struct file_operations ftrace_system_enable_fops = {
1324 .open = subsystem_open,
1325 .read = system_enable_read,
1326 .write = system_enable_write,
1327 .llseek = default_llseek,
1328 .release = subsystem_release,
1331 static const struct file_operations ftrace_tr_enable_fops = {
1332 .open = system_tr_open,
1333 .read = system_enable_read,
1334 .write = system_enable_write,
1335 .llseek = default_llseek,
1336 .release = subsystem_release,
1339 static const struct file_operations ftrace_show_header_fops = {
1340 .open = tracing_open_generic,
1341 .read = show_header,
1342 .llseek = default_llseek,
1346 ftrace_event_open(struct inode *inode, struct file *file,
1347 const struct seq_operations *seq_ops)
1352 ret = seq_open(file, seq_ops);
1355 m = file->private_data;
1356 /* copy tr over to seq ops */
1357 m->private = inode->i_private;
1362 static int ftrace_event_release(struct inode *inode, struct file *file)
1364 struct trace_array *tr = inode->i_private;
1366 trace_array_put(tr);
1368 return seq_release(inode, file);
1372 ftrace_event_avail_open(struct inode *inode, struct file *file)
1374 const struct seq_operations *seq_ops = &show_event_seq_ops;
1376 return ftrace_event_open(inode, file, seq_ops);
1380 ftrace_event_set_open(struct inode *inode, struct file *file)
1382 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1383 struct trace_array *tr = inode->i_private;
1386 if (trace_array_get(tr) < 0)
1389 if ((file->f_mode & FMODE_WRITE) &&
1390 (file->f_flags & O_TRUNC))
1391 ftrace_clear_events(tr);
1393 ret = ftrace_event_open(inode, file, seq_ops);
1395 trace_array_put(tr);
1399 static struct event_subsystem *
1400 create_new_subsystem(const char *name)
1402 struct event_subsystem *system;
1404 /* need to create new entry */
1405 system = kmalloc(sizeof(*system), GFP_KERNEL);
1409 system->ref_count = 1;
1411 /* Only allocate if dynamic (kprobes and modules) */
1412 if (!core_kernel_data((unsigned long)name)) {
1413 system->ref_count |= SYSTEM_FL_FREE_NAME;
1414 system->name = kstrdup(name, GFP_KERNEL);
1418 system->name = name;
1420 system->filter = NULL;
1422 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1423 if (!system->filter)
1426 list_add(&system->list, &event_subsystems);
1431 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1432 kfree(system->name);
1437 static struct dentry *
1438 event_subsystem_dir(struct trace_array *tr, const char *name,
1439 struct ftrace_event_file *file, struct dentry *parent)
1441 struct ftrace_subsystem_dir *dir;
1442 struct event_subsystem *system;
1443 struct dentry *entry;
1445 /* First see if we did not already create this dir */
1446 list_for_each_entry(dir, &tr->systems, list) {
1447 system = dir->subsystem;
1448 if (strcmp(system->name, name) == 0) {
1455 /* Now see if the system itself exists. */
1456 list_for_each_entry(system, &event_subsystems, list) {
1457 if (strcmp(system->name, name) == 0)
1460 /* Reset system variable when not found */
1461 if (&system->list == &event_subsystems)
1464 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1469 system = create_new_subsystem(name);
1473 __get_system(system);
1475 dir->entry = debugfs_create_dir(name, parent);
1477 pr_warning("Failed to create system directory %s\n", name);
1478 __put_system(system);
1485 dir->subsystem = system;
1488 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1489 &ftrace_subsystem_filter_fops);
1491 kfree(system->filter);
1492 system->filter = NULL;
1493 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1496 trace_create_file("enable", 0644, dir->entry, dir,
1497 &ftrace_system_enable_fops);
1499 list_add(&dir->list, &tr->systems);
1506 /* Only print this message if failed on memory allocation */
1507 if (!dir || !system)
1508 pr_warning("No memory to create event subsystem %s\n",
1514 event_create_dir(struct dentry *parent,
1515 struct ftrace_event_file *file,
1516 const struct file_operations *id,
1517 const struct file_operations *enable,
1518 const struct file_operations *filter,
1519 const struct file_operations *format)
1521 struct ftrace_event_call *call = file->event_call;
1522 struct trace_array *tr = file->tr;
1523 struct list_head *head;
1524 struct dentry *d_events;
1528 * If the trace point header did not define TRACE_SYSTEM
1529 * then the system would be called "TRACE_SYSTEM".
1531 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1532 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1538 file->dir = debugfs_create_dir(call->name, d_events);
1540 pr_warning("Could not create debugfs '%s' directory\n",
1545 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1546 trace_create_file("enable", 0644, file->dir, file,
1549 #ifdef CONFIG_PERF_EVENTS
1550 if (call->event.type && call->class->reg)
1551 trace_create_file("id", 0444, file->dir,
1552 (void *)(long)call->event.type, id);
1556 * Other events may have the same class. Only update
1557 * the fields if they are not already defined.
1559 head = trace_get_fields(call);
1560 if (list_empty(head)) {
1561 ret = call->class->define_fields(call);
1563 pr_warning("Could not initialize trace point"
1564 " events/%s\n", call->name);
1568 trace_create_file("filter", 0644, file->dir, call,
1571 trace_create_file("format", 0444, file->dir, call,
1577 static void remove_event_from_tracers(struct ftrace_event_call *call)
1579 struct ftrace_event_file *file;
1580 struct trace_array *tr;
1582 do_for_each_event_file_safe(tr, file) {
1583 if (file->event_call != call)
1586 remove_event_file_dir(file);
1588 * The do_for_each_event_file_safe() is
1589 * a double loop. After finding the call for this
1590 * trace_array, we use break to jump to the next
1594 } while_for_each_event_file();
1597 static void event_remove(struct ftrace_event_call *call)
1599 struct trace_array *tr;
1600 struct ftrace_event_file *file;
1602 do_for_each_event_file(tr, file) {
1603 if (file->event_call != call)
1605 ftrace_event_enable_disable(file, 0);
1607 * The do_for_each_event_file() is
1608 * a double loop. After finding the call for this
1609 * trace_array, we use break to jump to the next
1613 } while_for_each_event_file();
1615 if (call->event.funcs)
1616 __unregister_ftrace_event(&call->event);
1617 remove_event_from_tracers(call);
1618 list_del(&call->list);
1621 static int event_init(struct ftrace_event_call *call)
1625 if (WARN_ON(!call->name))
1628 if (call->class->raw_init) {
1629 ret = call->class->raw_init(call);
1630 if (ret < 0 && ret != -ENOSYS)
1631 pr_warn("Could not initialize trace events/%s\n",
1639 __register_event(struct ftrace_event_call *call, struct module *mod)
1643 ret = event_init(call);
1647 list_add(&call->list, &ftrace_events);
1653 static struct ftrace_event_file *
1654 trace_create_new_event(struct ftrace_event_call *call,
1655 struct trace_array *tr)
1657 struct ftrace_event_file *file;
1659 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1663 file->event_call = call;
1665 atomic_set(&file->sm_ref, 0);
1666 list_add(&file->list, &tr->events);
1671 /* Add an event to a trace directory */
1673 __trace_add_new_event(struct ftrace_event_call *call,
1674 struct trace_array *tr,
1675 const struct file_operations *id,
1676 const struct file_operations *enable,
1677 const struct file_operations *filter,
1678 const struct file_operations *format)
1680 struct ftrace_event_file *file;
1682 file = trace_create_new_event(call, tr);
1686 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1690 * Just create a decriptor for early init. A descriptor is required
1691 * for enabling events at boot. We want to enable events before
1692 * the filesystem is initialized.
1695 __trace_early_add_new_event(struct ftrace_event_call *call,
1696 struct trace_array *tr)
1698 struct ftrace_event_file *file;
1700 file = trace_create_new_event(call, tr);
1707 struct ftrace_module_file_ops;
1708 static void __add_event_to_tracers(struct ftrace_event_call *call,
1709 struct ftrace_module_file_ops *file_ops);
1711 /* Add an additional event_call dynamically */
1712 int trace_add_event_call(struct ftrace_event_call *call)
1715 mutex_lock(&trace_types_lock);
1716 mutex_lock(&event_mutex);
1718 ret = __register_event(call, NULL);
1720 __add_event_to_tracers(call, NULL);
1722 mutex_unlock(&event_mutex);
1723 mutex_unlock(&trace_types_lock);
1728 * Must be called under locking of trace_types_lock, event_mutex and
1731 static void __trace_remove_event_call(struct ftrace_event_call *call)
1734 trace_destroy_fields(call);
1735 destroy_preds(call);
1738 static int probe_remove_event_call(struct ftrace_event_call *call)
1740 struct trace_array *tr;
1741 struct ftrace_event_file *file;
1743 #ifdef CONFIG_PERF_EVENTS
1744 if (call->perf_refcount)
1747 do_for_each_event_file(tr, file) {
1748 if (file->event_call != call)
1751 * We can't rely on ftrace_event_enable_disable(enable => 0)
1752 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1753 * TRACE_REG_UNREGISTER.
1755 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1758 } while_for_each_event_file();
1760 __trace_remove_event_call(call);
1765 /* Remove an event_call */
1766 int trace_remove_event_call(struct ftrace_event_call *call)
1770 mutex_lock(&trace_types_lock);
1771 mutex_lock(&event_mutex);
1772 down_write(&trace_event_sem);
1773 ret = probe_remove_event_call(call);
1774 up_write(&trace_event_sem);
1775 mutex_unlock(&event_mutex);
1776 mutex_unlock(&trace_types_lock);
1781 #define for_each_event(event, start, end) \
1782 for (event = start; \
1783 (unsigned long)event < (unsigned long)end; \
1786 #ifdef CONFIG_MODULES
1788 static LIST_HEAD(ftrace_module_file_list);
1791 * Modules must own their file_operations to keep up with
1792 * reference counting.
1794 struct ftrace_module_file_ops {
1795 struct list_head list;
1797 struct file_operations id;
1798 struct file_operations enable;
1799 struct file_operations format;
1800 struct file_operations filter;
1803 static struct ftrace_module_file_ops *
1804 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1807 * As event_calls are added in groups by module,
1808 * when we find one file_ops, we don't need to search for
1809 * each call in that module, as the rest should be the
1810 * same. Only search for a new one if the last one did
1813 if (file_ops && mod == file_ops->mod)
1816 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1817 if (file_ops->mod == mod)
1823 static struct ftrace_module_file_ops *
1824 trace_create_file_ops(struct module *mod)
1826 struct ftrace_module_file_ops *file_ops;
1829 * This is a bit of a PITA. To allow for correct reference
1830 * counting, modules must "own" their file_operations.
1831 * To do this, we allocate the file operations that will be
1832 * used in the event directory.
1835 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1839 file_ops->mod = mod;
1841 file_ops->id = ftrace_event_id_fops;
1842 file_ops->id.owner = mod;
1844 file_ops->enable = ftrace_enable_fops;
1845 file_ops->enable.owner = mod;
1847 file_ops->filter = ftrace_event_filter_fops;
1848 file_ops->filter.owner = mod;
1850 file_ops->format = ftrace_event_format_fops;
1851 file_ops->format.owner = mod;
1853 list_add(&file_ops->list, &ftrace_module_file_list);
1858 static void trace_module_add_events(struct module *mod)
1860 struct ftrace_module_file_ops *file_ops = NULL;
1861 struct ftrace_event_call **call, **start, **end;
1863 if (!mod->num_trace_events)
1866 /* Don't add infrastructure for mods without tracepoints */
1867 if (trace_module_has_bad_taint(mod)) {
1868 pr_err("%s: module has bad taint, not creating trace events\n",
1873 start = mod->trace_events;
1874 end = mod->trace_events + mod->num_trace_events;
1879 file_ops = trace_create_file_ops(mod);
1883 for_each_event(call, start, end) {
1884 __register_event(*call, mod);
1885 __add_event_to_tracers(*call, file_ops);
1889 static void trace_module_remove_events(struct module *mod)
1891 struct ftrace_module_file_ops *file_ops;
1892 struct ftrace_event_call *call, *p;
1893 bool clear_trace = false;
1895 down_write(&trace_event_sem);
1896 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1897 if (call->mod == mod) {
1898 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1900 __trace_remove_event_call(call);
1904 /* Now free the file_operations */
1905 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1906 if (file_ops->mod == mod)
1909 if (&file_ops->list != &ftrace_module_file_list) {
1910 list_del(&file_ops->list);
1913 up_write(&trace_event_sem);
1916 * It is safest to reset the ring buffer if the module being unloaded
1917 * registered any events that were used. The only worry is if
1918 * a new module gets loaded, and takes on the same id as the events
1919 * of this module. When printing out the buffer, traced events left
1920 * over from this module may be passed to the new module events and
1921 * unexpected results may occur.
1924 tracing_reset_all_online_cpus();
1927 static int trace_module_notify(struct notifier_block *self,
1928 unsigned long val, void *data)
1930 struct module *mod = data;
1932 mutex_lock(&trace_types_lock);
1933 mutex_lock(&event_mutex);
1935 case MODULE_STATE_COMING:
1936 trace_module_add_events(mod);
1938 case MODULE_STATE_GOING:
1939 trace_module_remove_events(mod);
1942 mutex_unlock(&event_mutex);
1943 mutex_unlock(&trace_types_lock);
1949 __trace_add_new_mod_event(struct ftrace_event_call *call,
1950 struct trace_array *tr,
1951 struct ftrace_module_file_ops *file_ops)
1953 return __trace_add_new_event(call, tr,
1954 &file_ops->id, &file_ops->enable,
1955 &file_ops->filter, &file_ops->format);
1959 static inline struct ftrace_module_file_ops *
1960 find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1964 static inline int trace_module_notify(struct notifier_block *self,
1965 unsigned long val, void *data)
1970 __trace_add_new_mod_event(struct ftrace_event_call *call,
1971 struct trace_array *tr,
1972 struct ftrace_module_file_ops *file_ops)
1976 #endif /* CONFIG_MODULES */
1978 /* Create a new event directory structure for a trace directory. */
1980 __trace_add_event_dirs(struct trace_array *tr)
1982 struct ftrace_module_file_ops *file_ops = NULL;
1983 struct ftrace_event_call *call;
1986 list_for_each_entry(call, &ftrace_events, list) {
1989 * Directories for events by modules need to
1990 * keep module ref counts when opened (as we don't
1991 * want the module to disappear when reading one
1992 * of these files). The file_ops keep account of
1993 * the module ref count.
1995 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1997 continue; /* Warn? */
1998 ret = __trace_add_new_mod_event(call, tr, file_ops);
2000 pr_warning("Could not create directory for event %s\n",
2004 ret = __trace_add_new_event(call, tr,
2005 &ftrace_event_id_fops,
2006 &ftrace_enable_fops,
2007 &ftrace_event_filter_fops,
2008 &ftrace_event_format_fops);
2010 pr_warning("Could not create directory for event %s\n",
2015 #ifdef CONFIG_DYNAMIC_FTRACE
2018 #define ENABLE_EVENT_STR "enable_event"
2019 #define DISABLE_EVENT_STR "disable_event"
2021 struct event_probe_data {
2022 struct ftrace_event_file *file;
2023 unsigned long count;
2028 static struct ftrace_event_file *
2029 find_event_file(struct trace_array *tr, const char *system, const char *event)
2031 struct ftrace_event_file *file;
2032 struct ftrace_event_call *call;
2034 list_for_each_entry(file, &tr->events, list) {
2036 call = file->event_call;
2038 if (!call->name || !call->class || !call->class->reg)
2041 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2044 if (strcmp(event, call->name) == 0 &&
2045 strcmp(system, call->class->system) == 0)
2052 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2054 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2055 struct event_probe_data *data = *pdata;
2061 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2063 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
2067 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2069 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2070 struct event_probe_data *data = *pdata;
2078 /* Skip if the event is in a state we want to switch to */
2079 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
2082 if (data->count != -1)
2085 event_enable_probe(ip, parent_ip, _data);
2089 event_enable_print(struct seq_file *m, unsigned long ip,
2090 struct ftrace_probe_ops *ops, void *_data)
2092 struct event_probe_data *data = _data;
2094 seq_printf(m, "%ps:", (void *)ip);
2096 seq_printf(m, "%s:%s:%s",
2097 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2098 data->file->event_call->class->system,
2099 data->file->event_call->name);
2101 if (data->count == -1)
2102 seq_printf(m, ":unlimited\n");
2104 seq_printf(m, ":count=%ld\n", data->count);
2110 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2113 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2114 struct event_probe_data *data = *pdata;
2121 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2124 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2125 struct event_probe_data *data = *pdata;
2127 if (WARN_ON_ONCE(data->ref <= 0))
2132 /* Remove the SOFT_MODE flag */
2133 __ftrace_event_enable_disable(data->file, 0, 1);
2134 module_put(data->file->event_call->mod);
2140 static struct ftrace_probe_ops event_enable_probe_ops = {
2141 .func = event_enable_probe,
2142 .print = event_enable_print,
2143 .init = event_enable_init,
2144 .free = event_enable_free,
2147 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2148 .func = event_enable_count_probe,
2149 .print = event_enable_print,
2150 .init = event_enable_init,
2151 .free = event_enable_free,
2154 static struct ftrace_probe_ops event_disable_probe_ops = {
2155 .func = event_enable_probe,
2156 .print = event_enable_print,
2157 .init = event_enable_init,
2158 .free = event_enable_free,
2161 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2162 .func = event_enable_count_probe,
2163 .print = event_enable_print,
2164 .init = event_enable_init,
2165 .free = event_enable_free,
2169 event_enable_func(struct ftrace_hash *hash,
2170 char *glob, char *cmd, char *param, int enabled)
2172 struct trace_array *tr = top_trace_array();
2173 struct ftrace_event_file *file;
2174 struct ftrace_probe_ops *ops;
2175 struct event_probe_data *data;
2182 /* hash funcs only work with set_ftrace_filter */
2189 system = strsep(¶m, ":");
2193 event = strsep(¶m, ":");
2195 mutex_lock(&event_mutex);
2198 file = find_event_file(tr, system, event);
2202 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2205 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2207 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2209 if (glob[0] == '!') {
2210 unregister_ftrace_function_probe_func(glob+1, ops);
2216 data = kzalloc(sizeof(*data), GFP_KERNEL);
2220 data->enable = enable;
2227 number = strsep(¶m, ":");
2230 if (!strlen(number))
2234 * We use the callback data field (which is a pointer)
2237 ret = kstrtoul(number, 0, &data->count);
2242 /* Don't let event modules unload while probe registered */
2243 ret = try_module_get(file->event_call->mod);
2249 ret = __ftrace_event_enable_disable(file, 1, 1);
2252 ret = register_ftrace_function_probe(glob, ops, data);
2254 * The above returns on success the # of functions enabled,
2255 * but if it didn't find any functions it returns zero.
2256 * Consider no functions a failure too.
2263 /* Just return zero, not the number of enabled functions */
2266 mutex_unlock(&event_mutex);
2270 __ftrace_event_enable_disable(file, 0, 1);
2272 module_put(file->event_call->mod);
2278 static struct ftrace_func_command event_enable_cmd = {
2279 .name = ENABLE_EVENT_STR,
2280 .func = event_enable_func,
2283 static struct ftrace_func_command event_disable_cmd = {
2284 .name = DISABLE_EVENT_STR,
2285 .func = event_enable_func,
2288 static __init int register_event_cmds(void)
2292 ret = register_ftrace_command(&event_enable_cmd);
2293 if (WARN_ON(ret < 0))
2295 ret = register_ftrace_command(&event_disable_cmd);
2296 if (WARN_ON(ret < 0))
2297 unregister_ftrace_command(&event_enable_cmd);
2301 static inline int register_event_cmds(void) { return 0; }
2302 #endif /* CONFIG_DYNAMIC_FTRACE */
2305 * The top level array has already had its ftrace_event_file
2306 * descriptors created in order to allow for early events to
2307 * be recorded. This function is called after the debugfs has been
2308 * initialized, and we now have to create the files associated
2312 __trace_early_add_event_dirs(struct trace_array *tr)
2314 struct ftrace_event_file *file;
2318 list_for_each_entry(file, &tr->events, list) {
2319 ret = event_create_dir(tr->event_dir, file,
2320 &ftrace_event_id_fops,
2321 &ftrace_enable_fops,
2322 &ftrace_event_filter_fops,
2323 &ftrace_event_format_fops);
2325 pr_warning("Could not create directory for event %s\n",
2326 file->event_call->name);
2331 * For early boot up, the top trace array requires to have
2332 * a list of events that can be enabled. This must be done before
2333 * the filesystem is set up in order to allow events to be traced
2337 __trace_early_add_events(struct trace_array *tr)
2339 struct ftrace_event_call *call;
2342 list_for_each_entry(call, &ftrace_events, list) {
2343 /* Early boot up should not have any modules loaded */
2344 if (WARN_ON_ONCE(call->mod))
2347 ret = __trace_early_add_new_event(call, tr);
2349 pr_warning("Could not create early event %s\n",
2354 /* Remove the event directory structure for a trace directory. */
2356 __trace_remove_event_dirs(struct trace_array *tr)
2358 struct ftrace_event_file *file, *next;
2360 list_for_each_entry_safe(file, next, &tr->events, list)
2361 remove_event_file_dir(file);
2365 __add_event_to_tracers(struct ftrace_event_call *call,
2366 struct ftrace_module_file_ops *file_ops)
2368 struct trace_array *tr;
2370 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2372 __trace_add_new_mod_event(call, tr, file_ops);
2374 __trace_add_new_event(call, tr,
2375 &ftrace_event_id_fops,
2376 &ftrace_enable_fops,
2377 &ftrace_event_filter_fops,
2378 &ftrace_event_format_fops);
2382 static struct notifier_block trace_module_nb = {
2383 .notifier_call = trace_module_notify,
2387 extern struct ftrace_event_call *__start_ftrace_events[];
2388 extern struct ftrace_event_call *__stop_ftrace_events[];
2390 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2392 static __init int setup_trace_event(char *str)
2394 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2395 ring_buffer_expanded = true;
2396 tracing_selftest_disabled = true;
2400 __setup("trace_event=", setup_trace_event);
2402 /* Expects to have event_mutex held when called */
2404 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2406 struct dentry *d_events;
2407 struct dentry *entry;
2409 entry = debugfs_create_file("set_event", 0644, parent,
2410 tr, &ftrace_set_event_fops);
2412 pr_warning("Could not create debugfs 'set_event' entry\n");
2416 d_events = debugfs_create_dir("events", parent);
2418 pr_warning("Could not create debugfs 'events' directory\n");
2422 /* ring buffer internal formats */
2423 trace_create_file("header_page", 0444, d_events,
2424 ring_buffer_print_page_header,
2425 &ftrace_show_header_fops);
2427 trace_create_file("header_event", 0444, d_events,
2428 ring_buffer_print_entry_header,
2429 &ftrace_show_header_fops);
2431 trace_create_file("enable", 0644, d_events,
2432 tr, &ftrace_tr_enable_fops);
2434 tr->event_dir = d_events;
2440 * event_trace_add_tracer - add a instance of a trace_array to events
2441 * @parent: The parent dentry to place the files/directories for events in
2442 * @tr: The trace array associated with these events
2444 * When a new instance is created, it needs to set up its events
2445 * directory, as well as other files associated with events. It also
2446 * creates the event hierachry in the @parent/events directory.
2448 * Returns 0 on success.
2450 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2454 mutex_lock(&event_mutex);
2456 ret = create_event_toplevel_files(parent, tr);
2460 down_write(&trace_event_sem);
2461 __trace_add_event_dirs(tr);
2462 up_write(&trace_event_sem);
2465 mutex_unlock(&event_mutex);
2471 * The top trace array already had its file descriptors created.
2472 * Now the files themselves need to be created.
2475 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2479 mutex_lock(&event_mutex);
2481 ret = create_event_toplevel_files(parent, tr);
2485 down_write(&trace_event_sem);
2486 __trace_early_add_event_dirs(tr);
2487 up_write(&trace_event_sem);
2490 mutex_unlock(&event_mutex);
2495 int event_trace_del_tracer(struct trace_array *tr)
2497 mutex_lock(&event_mutex);
2499 /* Disable any running events */
2500 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2502 down_write(&trace_event_sem);
2503 __trace_remove_event_dirs(tr);
2504 debugfs_remove_recursive(tr->event_dir);
2505 up_write(&trace_event_sem);
2507 tr->event_dir = NULL;
2509 mutex_unlock(&event_mutex);
2514 static __init int event_trace_memsetup(void)
2516 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2517 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2521 static __init int event_trace_enable(void)
2523 struct trace_array *tr = top_trace_array();
2524 struct ftrace_event_call **iter, *call;
2525 char *buf = bootup_event_buf;
2529 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2532 ret = event_init(call);
2534 list_add(&call->list, &ftrace_events);
2538 * We need the top trace array to have a working set of trace
2539 * points at early init, before the debug files and directories
2540 * are created. Create the file entries now, and attach them
2541 * to the actual file dentries later.
2543 __trace_early_add_events(tr);
2546 token = strsep(&buf, ",");
2553 ret = ftrace_set_clr_event(tr, token, 1);
2555 pr_warn("Failed to enable trace event: %s\n", token);
2558 trace_printk_start_comm();
2560 register_event_cmds();
2565 static __init int event_trace_init(void)
2567 struct trace_array *tr;
2568 struct dentry *d_tracer;
2569 struct dentry *entry;
2572 tr = top_trace_array();
2574 d_tracer = tracing_init_dentry();
2578 entry = debugfs_create_file("available_events", 0444, d_tracer,
2579 tr, &ftrace_avail_fops);
2581 pr_warning("Could not create debugfs "
2582 "'available_events' entry\n");
2584 if (trace_define_common_fields())
2585 pr_warning("tracing: Failed to allocate common fields");
2587 ret = early_event_add_tracer(d_tracer, tr);
2591 ret = register_module_notifier(&trace_module_nb);
2593 pr_warning("Failed to register trace events module notifier\n");
2597 early_initcall(event_trace_memsetup);
2598 core_initcall(event_trace_enable);
2599 fs_initcall(event_trace_init);
2601 #ifdef CONFIG_FTRACE_STARTUP_TEST
2603 static DEFINE_SPINLOCK(test_spinlock);
2604 static DEFINE_SPINLOCK(test_spinlock_irq);
2605 static DEFINE_MUTEX(test_mutex);
2607 static __init void test_work(struct work_struct *dummy)
2609 spin_lock(&test_spinlock);
2610 spin_lock_irq(&test_spinlock_irq);
2612 spin_unlock_irq(&test_spinlock_irq);
2613 spin_unlock(&test_spinlock);
2615 mutex_lock(&test_mutex);
2617 mutex_unlock(&test_mutex);
2620 static __init int event_test_thread(void *unused)
2624 test_malloc = kmalloc(1234, GFP_KERNEL);
2626 pr_info("failed to kmalloc\n");
2628 schedule_on_each_cpu(test_work);
2632 set_current_state(TASK_INTERRUPTIBLE);
2633 while (!kthread_should_stop())
2640 * Do various things that may trigger events.
2642 static __init void event_test_stuff(void)
2644 struct task_struct *test_thread;
2646 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2648 kthread_stop(test_thread);
2652 * For every trace event defined, we will test each trace point separately,
2653 * and then by groups, and finally all trace points.
2655 static __init void event_trace_self_tests(void)
2657 struct ftrace_subsystem_dir *dir;
2658 struct ftrace_event_file *file;
2659 struct ftrace_event_call *call;
2660 struct event_subsystem *system;
2661 struct trace_array *tr;
2664 tr = top_trace_array();
2666 pr_info("Running tests on trace events:\n");
2668 list_for_each_entry(file, &tr->events, list) {
2670 call = file->event_call;
2672 /* Only test those that have a probe */
2673 if (!call->class || !call->class->probe)
2677 * Testing syscall events here is pretty useless, but
2678 * we still do it if configured. But this is time consuming.
2679 * What we really need is a user thread to perform the
2680 * syscalls as we test.
2682 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2683 if (call->class->system &&
2684 strcmp(call->class->system, "syscalls") == 0)
2688 pr_info("Testing event %s: ", call->name);
2691 * If an event is already enabled, someone is using
2692 * it and the self test should not be on.
2694 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2695 pr_warning("Enabled event during self test!\n");
2700 ftrace_event_enable_disable(file, 1);
2702 ftrace_event_enable_disable(file, 0);
2707 /* Now test at the sub system level */
2709 pr_info("Running tests on trace event systems:\n");
2711 list_for_each_entry(dir, &tr->systems, list) {
2713 system = dir->subsystem;
2715 /* the ftrace system is special, skip it */
2716 if (strcmp(system->name, "ftrace") == 0)
2719 pr_info("Testing event system %s: ", system->name);
2721 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2722 if (WARN_ON_ONCE(ret)) {
2723 pr_warning("error enabling system %s\n",
2730 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2731 if (WARN_ON_ONCE(ret)) {
2732 pr_warning("error disabling system %s\n",
2740 /* Test with all events enabled */
2742 pr_info("Running tests on all trace events:\n");
2743 pr_info("Testing all events: ");
2745 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2746 if (WARN_ON_ONCE(ret)) {
2747 pr_warning("error enabling all events\n");
2754 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2755 if (WARN_ON_ONCE(ret)) {
2756 pr_warning("error disabling all events\n");
2763 #ifdef CONFIG_FUNCTION_TRACER
2765 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2768 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2769 struct ftrace_ops *op, struct pt_regs *pt_regs)
2771 struct ring_buffer_event *event;
2772 struct ring_buffer *buffer;
2773 struct ftrace_entry *entry;
2774 unsigned long flags;
2779 pc = preempt_count();
2780 preempt_disable_notrace();
2781 cpu = raw_smp_processor_id();
2782 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2787 local_save_flags(flags);
2789 event = trace_current_buffer_lock_reserve(&buffer,
2790 TRACE_FN, sizeof(*entry),
2794 entry = ring_buffer_event_data(event);
2796 entry->parent_ip = parent_ip;
2798 trace_buffer_unlock_commit(buffer, event, flags, pc);
2801 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2802 preempt_enable_notrace();
2805 static struct ftrace_ops trace_ops __initdata =
2807 .func = function_test_events_call,
2808 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
2811 static __init void event_trace_self_test_with_function(void)
2814 ret = register_ftrace_function(&trace_ops);
2815 if (WARN_ON(ret < 0)) {
2816 pr_info("Failed to enable function tracer for event tests\n");
2819 pr_info("Running tests again, along with the function tracer\n");
2820 event_trace_self_tests();
2821 unregister_ftrace_function(&trace_ops);
2824 static __init void event_trace_self_test_with_function(void)
2829 static __init int event_trace_self_tests_init(void)
2831 if (!tracing_selftest_disabled) {
2832 event_trace_self_tests();
2833 event_trace_self_test_with_function();
2839 late_initcall(event_trace_self_tests_init);