Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / kernel / perf_event.c
index f928878a1c17c173334b80e0cca4f38ddaefc08a..f309e8014c7853105d1a38bc662f10164dc4d3d1 100644 (file)
@@ -34,7 +34,7 @@
 
 #include <asm/irq_regs.h>
 
-static atomic_t nr_events __read_mostly;
+atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
@@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  * accessing the event control register. If a NMI hits, then it will
  * not restart the event.
  */
-void perf_event_task_sched_out(struct task_struct *task,
-                              struct task_struct *next)
+void __perf_event_task_sched_out(struct task_struct *task,
+                                struct task_struct *next)
 {
        int ctxn;
 
@@ -1337,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
        cpuctx->task_ctx = NULL;
 }
 
-/*
- * Called with IRQs disabled
- */
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
-{
-       task_ctx_sched_out(ctx, EVENT_ALL);
-}
-
 /*
  * Called with IRQs disabled
  */
@@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event)
        irq_work_sync(&event->pending);
 
        if (!event->parent) {
-               atomic_dec(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_dec(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
@@ -2516,15 +2509,13 @@ static void perf_event_for_each(struct perf_event *event,
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        struct perf_event_context *ctx = event->ctx;
-       unsigned long size;
        int ret = 0;
        u64 value;
 
        if (!event->attr.sample_period)
                return -EINVAL;
 
-       size = copy_from_user(&value, arg, sizeof(value));
-       if (size != sizeof(value))
+       if (copy_from_user(&value, arg, sizeof(value)))
                return -EFAULT;
 
        if (!value)
@@ -4676,7 +4667,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
 
        WARN_ON(event->parent);
 
-       atomic_dec(&perf_swevent_enabled[event_id]);
+       jump_label_dec(&perf_swevent_enabled[event_id]);
        swevent_hlist_put(event);
 }
 
@@ -4706,7 +4697,7 @@ static int perf_swevent_init(struct perf_event *event)
                if (err)
                        return err;
 
-               atomic_inc(&perf_swevent_enabled[event_id]);
+               jump_label_inc(&perf_swevent_enabled[event_id]);
                event->destroy = sw_perf_event_destroy;
        }
 
@@ -5255,9 +5246,10 @@ unlock:
  */
 static struct perf_event *
 perf_event_alloc(struct perf_event_attr *attr, int cpu,
-                  struct perf_event *group_leader,
-                  struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler)
+                struct task_struct *task,
+                struct perf_event *group_leader,
+                struct perf_event *parent_event,
+                perf_overflow_handler_t overflow_handler)
 {
        struct pmu *pmu;
        struct perf_event *event;
@@ -5299,6 +5291,17 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (task) {
+               event->attach_state = PERF_ATTACH_TASK;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+               /*
+                * hw_breakpoint is a bit difficult here..
+                */
+               if (attr->type == PERF_TYPE_BREAKPOINT)
+                       event->hw.bp_target = task;
+#endif
+       }
+
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
        
@@ -5342,7 +5345,8 @@ done:
        event->pmu = pmu;
 
        if (!event->parent) {
-               atomic_inc(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_inc(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
@@ -5551,10 +5555,18 @@ SYSCALL_DEFINE5(perf_event_open,
                        group_leader = NULL;
        }
 
-       event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+
+       event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
-               goto err_fd;
+               goto err_task;
        }
 
        /*
@@ -5586,21 +5598,13 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        }
 
-       if (pid != -1) {
-               task = find_lively_task_by_vpid(pid);
-               if (IS_ERR(task)) {
-                       err = PTR_ERR(task);
-                       goto err_group_fd;
-               }
-       }
-
        /*
         * Get the target context (task or percpu):
         */
        ctx = find_get_context(pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
-               goto err_task;
+               goto err_alloc;
        }
 
        /*
@@ -5696,12 +5700,13 @@ SYSCALL_DEFINE5(perf_event_open,
 
 err_context:
        put_ctx(ctx);
+err_alloc:
+       free_event(event);
 err_task:
        if (task)
                put_task_struct(task);
 err_group_fd:
        fput_light(group_file, fput_needed);
-       free_event(event);
 err_fd:
        put_unused_fd(event_fd);
        return err;
@@ -5727,7 +5732,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
         * Get the target context (task or percpu):
         */
 
-       event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
+       event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
                goto err;
@@ -5836,7 +5841,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         * our context.
         */
        child_ctx = child->perf_event_ctxp[ctxn];
-       __perf_event_task_sched_out(child_ctx);
+       task_ctx_sched_out(child_ctx, EVENT_ALL);
 
        /*
         * Take the context lock here so that if find_get_context is
@@ -5995,6 +6000,7 @@ inherit_event(struct perf_event *parent_event,
 
        child_event = perf_event_alloc(&parent_event->attr,
                                           parent_event->cpu,
+                                          child,
                                           group_leader, parent_event,
                                           NULL);
        if (IS_ERR(child_event))