tracing: Don't allocate common fields for every trace events
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_events.c
index acc0f55742c321c15c332b6ceed79403c411c8dc..d3b4bdf00b39733a830e106c0aeda6cf0f0d0503 100644 (file)
@@ -28,6 +28,7 @@
 DEFINE_MUTEX(event_mutex);
 
 LIST_HEAD(ftrace_events);
+LIST_HEAD(ftrace_common_fields);
 
 struct list_head *
 trace_get_fields(struct ftrace_event_call *event_call)
@@ -37,15 +38,11 @@ trace_get_fields(struct ftrace_event_call *event_call)
        return event_call->class->get_fields(event_call);
 }
 
-int trace_define_field(struct ftrace_event_call *call, const char *type,
-                      const char *name, int offset, int size, int is_signed,
-                      int filter_type)
+static int __trace_define_field(struct list_head *head, const char *type,
+                               const char *name, int offset, int size,
+                               int is_signed, int filter_type)
 {
        struct ftrace_event_field *field;
-       struct list_head *head;
-
-       if (WARN_ON(!call->class))
-               return 0;
 
        field = kzalloc(sizeof(*field), GFP_KERNEL);
        if (!field)
@@ -68,7 +65,6 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
        field->size = size;
        field->is_signed = is_signed;
 
-       head = trace_get_fields(call);
        list_add(&field->link, head);
 
        return 0;
@@ -80,17 +76,32 @@ err:
 
        return -ENOMEM;
 }
+
+int trace_define_field(struct ftrace_event_call *call, const char *type,
+                      const char *name, int offset, int size, int is_signed,
+                      int filter_type)
+{
+       struct list_head *head;
+
+       if (WARN_ON(!call->class))
+               return 0;
+
+       head = trace_get_fields(call);
+       return __trace_define_field(head, type, name, offset, size,
+                                   is_signed, filter_type);
+}
 EXPORT_SYMBOL_GPL(trace_define_field);
 
 #define __common_field(type, item)                                     \
-       ret = trace_define_field(call, #type, "common_" #item,          \
-                                offsetof(typeof(ent), item),           \
-                                sizeof(ent.item),                      \
-                                is_signed_type(type), FILTER_OTHER);   \
+       ret = __trace_define_field(&ftrace_common_fields, #type,        \
+                                  "common_" #item,                     \
+                                  offsetof(typeof(ent), item),         \
+                                  sizeof(ent.item),                    \
+                                  is_signed_type(type), FILTER_OTHER); \
        if (ret)                                                        \
                return ret;
 
-static int trace_define_common_fields(struct ftrace_event_call *call)
+static int trace_define_common_fields(void)
 {
        int ret;
        struct trace_entry ent;
@@ -122,10 +133,9 @@ int trace_event_raw_init(struct ftrace_event_call *call)
 {
        int id;
 
-       id = register_ftrace_event(call->event);
+       id = register_ftrace_event(&call->event);
        if (!id)
                return -ENODEV;
-       call->id = id;
 
        return 0;
 }
@@ -138,8 +148,8 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
 
        switch (enable) {
        case 0:
-               if (call->enabled) {
-                       call->enabled = 0;
+               if (call->flags & TRACE_EVENT_FL_ENABLED) {
+                       call->flags &= ~TRACE_EVENT_FL_ENABLED;
                        tracing_stop_cmdline_record();
                        if (call->class->reg)
                                call->class->reg(call, TRACE_REG_UNREGISTER);
@@ -150,7 +160,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
                }
                break;
        case 1:
-               if (!call->enabled) {
+               if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
                        tracing_start_cmdline_record();
                        if (call->class->reg)
                                ret = call->class->reg(call, TRACE_REG_REGISTER);
@@ -164,7 +174,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
                                        "%s\n", call->name);
                                break;
                        }
-                       call->enabled = 1;
+                       call->flags |= TRACE_EVENT_FL_ENABLED;
                }
                break;
        }
@@ -353,7 +363,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
        (*pos)++;
 
        list_for_each_entry_continue(call, &ftrace_events, list) {
-               if (call->enabled)
+               if (call->flags & TRACE_EVENT_FL_ENABLED)
                        return call;
        }
 
@@ -412,7 +422,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        struct ftrace_event_call *call = filp->private_data;
        char *buf;
 
-       if (call->enabled)
+       if (call->flags & TRACE_EVENT_FL_ENABLED)
                buf = "1\n";
        else
                buf = "0\n";
@@ -487,7 +497,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                 * or if all events or cleared, or if we have
                 * a mixture.
                 */
-               set |= (1 << !!call->enabled);
+               set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
 
                /*
                 * If we have a mixture, no need to look further.
@@ -545,32 +555,10 @@ out:
        return ret;
 }
 
-static ssize_t
-event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
-                 loff_t *ppos)
+static void print_event_fields(struct trace_seq *s, struct list_head *head)
 {
-       struct ftrace_event_call *call = filp->private_data;
        struct ftrace_event_field *field;
-       struct list_head *head;
-       struct trace_seq *s;
-       int common_field_count = 5;
-       char *buf;
-       int r = 0;
 
-       if (*ppos)
-               return 0;
-
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
-       if (!s)
-               return -ENOMEM;
-
-       trace_seq_init(s);
-
-       trace_seq_printf(s, "name: %s\n", call->name);
-       trace_seq_printf(s, "ID: %d\n", call->id);
-       trace_seq_printf(s, "format:\n");
-
-       head = trace_get_fields(call);
        list_for_each_entry_reverse(field, head, link) {
                /*
                 * Smartly shows the array type(except dynamic array).
@@ -585,29 +573,54 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
                        array_descriptor = NULL;
 
                if (!array_descriptor) {
-                       r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
+                       trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
                                        "\tsize:%u;\tsigned:%d;\n",
                                        field->type, field->name, field->offset,
                                        field->size, !!field->is_signed);
                } else {
-                       r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
+                       trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
                                        "\tsize:%u;\tsigned:%d;\n",
                                        (int)(array_descriptor - field->type),
                                        field->type, field->name,
                                        array_descriptor, field->offset,
                                        field->size, !!field->is_signed);
                }
+       }
+}
 
-               if (--common_field_count == 0)
-                       r = trace_seq_printf(s, "\n");
+static ssize_t
+event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
+                 loff_t *ppos)
+{
+       struct ftrace_event_call *call = filp->private_data;
+       struct list_head *head;
+       struct trace_seq *s;
+       char *buf;
+       int r;
 
-               if (!r)
-                       break;
-       }
+       if (*ppos)
+               return 0;
 
-       if (r)
-               r = trace_seq_printf(s, "\nprint fmt: %s\n",
-                               call->print_fmt);
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       trace_seq_init(s);
+
+       trace_seq_printf(s, "name: %s\n", call->name);
+       trace_seq_printf(s, "ID: %d\n", call->event.type);
+       trace_seq_printf(s, "format:\n");
+
+       /* print common fields */
+       print_event_fields(s, &ftrace_common_fields);
+
+       trace_seq_putc(s, '\n');
+
+       /* print event specific fields */
+       head = trace_get_fields(call);
+       print_event_fields(s, head);
+
+       r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt);
 
        if (!r) {
                /*
@@ -641,7 +654,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
                return -ENOMEM;
 
        trace_seq_init(s);
-       trace_seq_printf(s, "%d\n", call->id);
+       trace_seq_printf(s, "%d\n", call->event.type);
 
        r = simple_read_from_buffer(ubuf, cnt, ppos,
                                    s->buffer, s->len);
@@ -969,7 +982,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
                                  enable);
 
 #ifdef CONFIG_PERF_EVENTS
-       if (call->id && (call->class->perf_probe || call->class->reg))
+       if (call->event.type && (call->class->perf_probe || call->class->reg))
                trace_create_file("id", 0444, call->dir, call,
                                  id);
 #endif
@@ -981,9 +994,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
                 */
                head = trace_get_fields(call);
                if (list_empty(head)) {
-                       ret = trace_define_common_fields(call);
-                       if (!ret)
-                               ret = call->class->define_fields(call);
+                       ret = call->class->define_fields(call);
                        if (ret < 0) {
                                pr_warning("Could not initialize trace point"
                                           " events/%s\n", call->name);
@@ -1008,8 +1019,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call)
        if (!call->name)
                return -EINVAL;
 
-       if (call->raw_init) {
-               ret = call->raw_init(call);
+       if (call->class->raw_init) {
+               ret = call->class->raw_init(call);
                if (ret < 0) {
                        if (ret != -ENOSYS)
                                pr_warning("Could not initialize trace "
@@ -1073,8 +1084,8 @@ static void remove_subsystem_dir(const char *name)
 static void __trace_remove_event_call(struct ftrace_event_call *call)
 {
        ftrace_event_enable_disable(call, 0);
-       if (call->event)
-               __unregister_ftrace_event(call->event);
+       if (call->event.funcs)
+               __unregister_ftrace_event(&call->event);
        debugfs_remove_recursive(call->dir);
        list_del(&call->list);
        trace_destroy_fields(call);
@@ -1170,8 +1181,8 @@ static void trace_module_add_events(struct module *mod)
                /* The linker may leave blanks */
                if (!call->name)
                        continue;
-               if (call->raw_init) {
-                       ret = call->raw_init(call);
+               if (call->class->raw_init) {
+                       ret = call->class->raw_init(call);
                        if (ret < 0) {
                                if (ret != -ENOSYS)
                                        pr_warning("Could not initialize trace "
@@ -1320,12 +1331,15 @@ static __init int event_trace_init(void)
        trace_create_file("enable", 0644, d_events,
                          NULL, &ftrace_system_enable_fops);
 
+       if (trace_define_common_fields())
+               pr_warning("tracing: Failed to allocate common fields");
+
        for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
                /* The linker may leave blanks */
                if (!call->name)
                        continue;
-               if (call->raw_init) {
-                       ret = call->raw_init(call);
+               if (call->class->raw_init) {
+                       ret = call->class->raw_init(call);
                        if (ret < 0) {
                                if (ret != -ENOSYS)
                                        pr_warning("Could not initialize trace "
@@ -1448,7 +1462,7 @@ static __init void event_trace_self_tests(void)
                 * If an event is already enabled, someone is using
                 * it and the self test should not be on.
                 */
-               if (call->enabled) {
+               if (call->flags & TRACE_EVENT_FL_ENABLED) {
                        pr_warning("Enabled event during self test!\n");
                        WARN_ON_ONCE(1);
                        continue;
@@ -1525,12 +1539,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
        struct ftrace_entry *entry;
        unsigned long flags;
        long disabled;
-       int resched;
        int cpu;
        int pc;
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        cpu = raw_smp_processor_id();
        disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
 
@@ -1552,7 +1565,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
 
  out:
        atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __initdata  =