perf: fix stack data leak
[firefly-linux-kernel-4.4.55.git] / kernel / perf_counter.c
index 02994a719e2711f4a09de6b96f02d87ec92899af..7bc888dfd06ab890c2a55c9b3211f0785146e678 100644 (file)
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 
        list_add_rcu(&counter->event_entry, &ctx->event_list);
        ctx->nr_counters++;
+       if (counter->attr.inherit_stat)
+               ctx->nr_stat++;
 }
 
 /*
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
        if (list_empty(&counter->list_entry))
                return;
        ctx->nr_counters--;
+       if (counter->attr.inherit_stat)
+               ctx->nr_stat--;
 
        list_del_init(&counter->list_entry);
        list_del_rcu(&counter->event_entry);
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1,
                && !ctx1->pin_count && !ctx2->pin_count;
 }
 
+static void __perf_counter_read(void *counter);
+
+static void __perf_counter_sync_stat(struct perf_counter *counter,
+                                    struct perf_counter *next_counter)
+{
+       u64 value;
+
+       if (!counter->attr.inherit_stat)
+               return;
+
+       /*
+        * Update the counter value, we cannot use perf_counter_read()
+        * because we're in the middle of a context switch and have IRQs
+        * disabled, which upsets smp_call_function_single(), however
+        * we know the counter must be on the current CPU, therefore we
+        * don't need to use it.
+        */
+       switch (counter->state) {
+       case PERF_COUNTER_STATE_ACTIVE:
+               __perf_counter_read(counter);
+               break;
+
+       case PERF_COUNTER_STATE_INACTIVE:
+               update_counter_times(counter);
+               break;
+
+       default:
+               break;
+       }
+
+       /*
+        * In order to keep per-task stats reliable we need to flip the counter
+        * values when we flip the contexts.
+        */
+       value = atomic64_read(&next_counter->count);
+       value = atomic64_xchg(&counter->count, value);
+       atomic64_set(&next_counter->count, value);
+
+       swap(counter->total_time_enabled, next_counter->total_time_enabled);
+       swap(counter->total_time_running, next_counter->total_time_running);
+
+       /*
+        * Since we swizzled the values, update the user visible data too.
+        */
+       perf_counter_update_userpage(counter);
+       perf_counter_update_userpage(next_counter);
+}
+
+#define list_next_entry(pos, member) \
+       list_entry(pos->member.next, typeof(*pos), member)
+
+static void perf_counter_sync_stat(struct perf_counter_context *ctx,
+                                  struct perf_counter_context *next_ctx)
+{
+       struct perf_counter *counter, *next_counter;
+
+       if (!ctx->nr_stat)
+               return;
+
+       counter = list_first_entry(&ctx->event_list,
+                                  struct perf_counter, event_entry);
+
+       next_counter = list_first_entry(&next_ctx->event_list,
+                                       struct perf_counter, event_entry);
+
+       while (&counter->event_entry != &ctx->event_list &&
+              &next_counter->event_entry != &next_ctx->event_list) {
+
+               __perf_counter_sync_stat(counter, next_counter);
+
+               counter = list_next_entry(counter, event_entry);
+               next_counter = list_next_entry(counter, event_entry);
+       }
+}
+
 /*
  * Called from scheduler to remove the counters of the current task,
  * with interrupts disabled.
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
                        ctx->task = next;
                        next_ctx->task = task;
                        do_switch = 0;
+
+                       perf_counter_sync_stat(ctx, next_ctx);
                }
                spin_unlock(&next_ctx->lock);
                spin_unlock(&ctx->lock);
@@ -1347,10 +1428,57 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
                perf_counter_task_sched_in(curr, cpu);
 }
 
+/*
+ * Enable all of a task's counters that have been marked enable-on-exec.
+ * This expects task == current.
+ */
+static void perf_counter_enable_on_exec(struct task_struct *task)
+{
+       struct perf_counter_context *ctx;
+       struct perf_counter *counter;
+       unsigned long flags;
+       int enabled = 0;
+
+       local_irq_save(flags);
+       ctx = task->perf_counter_ctxp;
+       if (!ctx || !ctx->nr_counters)
+               goto out;
+
+       __perf_counter_task_sched_out(ctx);
+
+       spin_lock(&ctx->lock);
+
+       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+               if (!counter->attr.enable_on_exec)
+                       continue;
+               counter->attr.enable_on_exec = 0;
+               if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+                       continue;
+               counter->state = PERF_COUNTER_STATE_INACTIVE;
+               counter->tstamp_enabled =
+                       ctx->time - counter->total_time_enabled;
+               enabled = 1;
+       }
+
+       /*
+        * Unclone this context if we enabled any counter.
+        */
+       if (enabled && ctx->parent_ctx) {
+               put_ctx(ctx->parent_ctx);
+               ctx->parent_ctx = NULL;
+       }
+
+       spin_unlock(&ctx->lock);
+
+       perf_counter_task_sched_in(task, smp_processor_id());
+ out:
+       local_irq_restore(flags);
+}
+
 /*
  * Cross CPU call to read the hardware counter
  */
-static void __read(void *info)
+static void __perf_counter_read(void *info)
 {
        struct perf_counter *counter = info;
        struct perf_counter_context *ctx = counter->ctx;
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
         */
        if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
                smp_call_function_single(counter->oncpu,
-                                        __read, counter, 1);
+                                        __perf_counter_read, counter, 1);
        } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
                update_counter_times(counter);
        }
@@ -1892,7 +2020,7 @@ fail:
 
 static void perf_mmap_free_page(unsigned long addr)
 {
-       struct page *page = virt_to_page(addr);
+       struct page *page = virt_to_page((void *)addr);
 
        page->mapping = NULL;
        __free_page(page);
@@ -2499,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                u32 cpu, reserved;
        } cpu_entry;
 
-       header.type = 0;
+       header.type = PERF_EVENT_SAMPLE;
        header.size = sizeof(header);
 
-       header.misc = PERF_EVENT_MISC_OVERFLOW;
+       header.misc = 0;
        header.misc |= perf_misc_flags(data->regs);
 
        if (sample_type & PERF_SAMPLE_IP) {
                ip = perf_instruction_pointer(data->regs);
-               header.type |= PERF_SAMPLE_IP;
                header.size += sizeof(ip);
        }
 
@@ -2516,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                tid_entry.pid = perf_counter_pid(counter, current);
                tid_entry.tid = perf_counter_tid(counter, current);
 
-               header.type |= PERF_SAMPLE_TID;
                header.size += sizeof(tid_entry);
        }
 
@@ -2526,34 +2652,26 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                 */
                time = sched_clock();
 
-               header.type |= PERF_SAMPLE_TIME;
                header.size += sizeof(u64);
        }
 
-       if (sample_type & PERF_SAMPLE_ADDR) {
-               header.type |= PERF_SAMPLE_ADDR;
+       if (sample_type & PERF_SAMPLE_ADDR)
                header.size += sizeof(u64);
-       }
 
-       if (sample_type & PERF_SAMPLE_ID) {
-               header.type |= PERF_SAMPLE_ID;
+       if (sample_type & PERF_SAMPLE_ID)
                header.size += sizeof(u64);
-       }
 
        if (sample_type & PERF_SAMPLE_CPU) {
-               header.type |= PERF_SAMPLE_CPU;
                header.size += sizeof(cpu_entry);
 
                cpu_entry.cpu = raw_smp_processor_id();
+               cpu_entry.reserved = 0;
        }
 
-       if (sample_type & PERF_SAMPLE_PERIOD) {
-               header.type |= PERF_SAMPLE_PERIOD;
+       if (sample_type & PERF_SAMPLE_PERIOD)
                header.size += sizeof(u64);
-       }
 
        if (sample_type & PERF_SAMPLE_GROUP) {
-               header.type |= PERF_SAMPLE_GROUP;
                header.size += sizeof(u64) +
                        counter->nr_siblings * sizeof(group_entry);
        }
@@ -2563,10 +2681,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
 
                if (callchain) {
                        callchain_size = (1 + callchain->nr) * sizeof(u64);
-
-                       header.type |= PERF_SAMPLE_CALLCHAIN;
                        header.size += callchain_size;
-               }
+               } else
+                       header.size += sizeof(u64);
        }
 
        ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2617,9 +2734,75 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                }
        }
 
-       if (callchain)
-               perf_output_copy(&handle, callchain, callchain_size);
+       if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+               if (callchain)
+                       perf_output_copy(&handle, callchain, callchain_size);
+               else {
+                       u64 nr = 0;
+                       perf_output_put(&handle, nr);
+               }
+       }
+
+       perf_output_end(&handle);
+}
+
+/*
+ * read event
+ */
+
+struct perf_read_event {
+       struct perf_event_header        header;
+
+       u32                             pid;
+       u32                             tid;
+       u64                             value;
+       u64                             format[3];
+};
+
+static void
+perf_counter_read_event(struct perf_counter *counter,
+                       struct task_struct *task)
+{
+       struct perf_output_handle handle;
+       struct perf_read_event event = {
+               .header = {
+                       .type = PERF_EVENT_READ,
+                       .misc = 0,
+                       .size = sizeof(event) - sizeof(event.format),
+               },
+               .pid = perf_counter_pid(counter, task),
+               .tid = perf_counter_tid(counter, task),
+               .value = atomic64_read(&counter->count),
+       };
+       int ret, i = 0;
+
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+               event.header.size += sizeof(u64);
+               event.format[i++] = counter->total_time_enabled;
+       }
+
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+               event.header.size += sizeof(u64);
+               event.format[i++] = counter->total_time_running;
+       }
+
+       if (counter->attr.read_format & PERF_FORMAT_ID) {
+               u64 id;
+
+               event.header.size += sizeof(u64);
+               if (counter->parent)
+                       id = counter->parent->id;
+               else
+                       id = counter->id;
+
+               event.format[i++] = id;
+       }
+
+       ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+       if (ret)
+               return;
 
+       perf_output_copy(&handle, &event, event.header.size);
        perf_output_end(&handle);
 }
 
@@ -2814,6 +2997,9 @@ void perf_counter_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
 
+       if (task->perf_counter_ctxp)
+               perf_counter_enable_on_exec(task);
+
        if (!atomic_read(&nr_comm_counters))
                return;
 
@@ -3985,10 +4171,14 @@ static int inherit_group(struct perf_counter *parent_counter,
 }
 
 static void sync_child_counter(struct perf_counter *child_counter,
-                              struct perf_counter *parent_counter)
+                              struct task_struct *child)
 {
+       struct perf_counter *parent_counter = child_counter->parent;
        u64 child_val;
 
+       if (child_counter->attr.inherit_stat)
+               perf_counter_read_event(child_counter, child);
+
        child_val = atomic64_read(&child_counter->count);
 
        /*
@@ -4017,7 +4207,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
 
 static void
 __perf_counter_exit_task(struct perf_counter *child_counter,
-                        struct perf_counter_context *child_ctx)
+                        struct perf_counter_context *child_ctx,
+                        struct task_struct *child)
 {
        struct perf_counter *parent_counter;
 
@@ -4031,7 +4222,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
         * counters need to be zapped - but otherwise linger.
         */
        if (parent_counter) {
-               sync_child_counter(child_counter, parent_counter);
+               sync_child_counter(child_counter, child);
                free_counter(child_counter);
        }
 }
@@ -4093,7 +4284,7 @@ void perf_counter_exit_task(struct task_struct *child)
 again:
        list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
                                 list_entry)
-               __perf_counter_exit_task(child_counter, child_ctx);
+               __perf_counter_exit_task(child_counter, child_ctx, child);
 
        /*
         * If the last counter was a group counter, it will have appended all