2 * Performance counter core code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
31 #include <asm/irq_regs.h>
34 * Each CPU has a list of per CPU counters:
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
47 * perf counter paranoia level:
49 * 1 - disallow cpu counters to unpriv
50 * 2 - disallow kernel profiling to unpriv
52 int sysctl_perf_counter_paranoid __read_mostly;
54 static inline bool perf_paranoid_cpu(void)
56 return sysctl_perf_counter_paranoid > 0;
59 static inline bool perf_paranoid_kernel(void)
61 return sysctl_perf_counter_paranoid > 1;
64 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
67 * max perf counter sample rate
69 int sysctl_perf_counter_sample_rate __read_mostly = 100000;
71 static atomic64_t perf_counter_id;
74 * Lock for (sysadmin-configurable) counter reservations:
76 static DEFINE_SPINLOCK(perf_resource_lock);
79 * Architecture provided APIs - weak aliases:
81 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
86 void __weak hw_perf_disable(void) { barrier(); }
87 void __weak hw_perf_enable(void) { barrier(); }
89 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
92 hw_perf_group_sched_in(struct perf_counter *group_leader,
93 struct perf_cpu_context *cpuctx,
94 struct perf_counter_context *ctx, int cpu)
99 void __weak perf_counter_print_debug(void) { }
101 static DEFINE_PER_CPU(int, disable_count);
103 void __perf_disable(void)
105 __get_cpu_var(disable_count)++;
108 bool __perf_enable(void)
110 return !--__get_cpu_var(disable_count);
113 void perf_disable(void)
119 void perf_enable(void)
125 static void get_ctx(struct perf_counter_context *ctx)
127 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
130 static void free_ctx(struct rcu_head *head)
132 struct perf_counter_context *ctx;
134 ctx = container_of(head, struct perf_counter_context, rcu_head);
138 static void put_ctx(struct perf_counter_context *ctx)
140 if (atomic_dec_and_test(&ctx->refcount)) {
142 put_ctx(ctx->parent_ctx);
144 put_task_struct(ctx->task);
145 call_rcu(&ctx->rcu_head, free_ctx);
150 * Get the perf_counter_context for a task and lock it.
151 * This has to cope with with the fact that until it is locked,
152 * the context could get moved to another task.
154 static struct perf_counter_context *
155 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
157 struct perf_counter_context *ctx;
161 ctx = rcu_dereference(task->perf_counter_ctxp);
164 * If this context is a clone of another, it might
165 * get swapped for another underneath us by
166 * perf_counter_task_sched_out, though the
167 * rcu_read_lock() protects us from any context
168 * getting freed. Lock the context and check if it
169 * got swapped before we could get the lock, and retry
170 * if so. If we locked the right context, then it
171 * can't get swapped on us any more.
173 spin_lock_irqsave(&ctx->lock, *flags);
174 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
175 spin_unlock_irqrestore(&ctx->lock, *flags);
179 if (!atomic_inc_not_zero(&ctx->refcount)) {
180 spin_unlock_irqrestore(&ctx->lock, *flags);
189 * Get the context for a task and increment its pin_count so it
190 * can't get swapped to another task. This also increments its
191 * reference count so that the context can't get freed.
193 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
195 struct perf_counter_context *ctx;
198 ctx = perf_lock_task_context(task, &flags);
201 spin_unlock_irqrestore(&ctx->lock, flags);
206 static void perf_unpin_context(struct perf_counter_context *ctx)
210 spin_lock_irqsave(&ctx->lock, flags);
212 spin_unlock_irqrestore(&ctx->lock, flags);
217 * Add a counter from the lists for its context.
218 * Must be called with ctx->mutex and ctx->lock held.
221 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
223 struct perf_counter *group_leader = counter->group_leader;
226 * Depending on whether it is a standalone or sibling counter,
227 * add it straight to the context's counter list, or to the group
228 * leader's sibling list:
230 if (group_leader == counter)
231 list_add_tail(&counter->list_entry, &ctx->counter_list);
233 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
234 group_leader->nr_siblings++;
237 list_add_rcu(&counter->event_entry, &ctx->event_list);
242 * Remove a counter from the lists for its context.
243 * Must be called with ctx->mutex and ctx->lock held.
246 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
248 struct perf_counter *sibling, *tmp;
250 if (list_empty(&counter->list_entry))
254 list_del_init(&counter->list_entry);
255 list_del_rcu(&counter->event_entry);
257 if (counter->group_leader != counter)
258 counter->group_leader->nr_siblings--;
261 * If this was a group counter with sibling counters then
262 * upgrade the siblings to singleton counters by adding them
263 * to the context list directly:
265 list_for_each_entry_safe(sibling, tmp,
266 &counter->sibling_list, list_entry) {
268 list_move_tail(&sibling->list_entry, &ctx->counter_list);
269 sibling->group_leader = sibling;
274 counter_sched_out(struct perf_counter *counter,
275 struct perf_cpu_context *cpuctx,
276 struct perf_counter_context *ctx)
278 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
281 counter->state = PERF_COUNTER_STATE_INACTIVE;
282 counter->tstamp_stopped = ctx->time;
283 counter->pmu->disable(counter);
286 if (!is_software_counter(counter))
287 cpuctx->active_oncpu--;
289 if (counter->attr.exclusive || !cpuctx->active_oncpu)
290 cpuctx->exclusive = 0;
294 group_sched_out(struct perf_counter *group_counter,
295 struct perf_cpu_context *cpuctx,
296 struct perf_counter_context *ctx)
298 struct perf_counter *counter;
300 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
303 counter_sched_out(group_counter, cpuctx, ctx);
306 * Schedule out siblings (if any):
308 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
309 counter_sched_out(counter, cpuctx, ctx);
311 if (group_counter->attr.exclusive)
312 cpuctx->exclusive = 0;
316 * Cross CPU call to remove a performance counter
318 * We disable the counter on the hardware level first. After that we
319 * remove it from the context list.
321 static void __perf_counter_remove_from_context(void *info)
323 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
324 struct perf_counter *counter = info;
325 struct perf_counter_context *ctx = counter->ctx;
328 * If this is a task context, we need to check whether it is
329 * the current task context of this cpu. If not it has been
330 * scheduled out before the smp call arrived.
332 if (ctx->task && cpuctx->task_ctx != ctx)
335 spin_lock(&ctx->lock);
337 * Protect the list operation against NMI by disabling the
338 * counters on a global level.
342 counter_sched_out(counter, cpuctx, ctx);
344 list_del_counter(counter, ctx);
348 * Allow more per task counters with respect to the
351 cpuctx->max_pertask =
352 min(perf_max_counters - ctx->nr_counters,
353 perf_max_counters - perf_reserved_percpu);
357 spin_unlock(&ctx->lock);
362 * Remove the counter from a task's (or a CPU's) list of counters.
364 * Must be called with ctx->mutex held.
366 * CPU counters are removed with a smp call. For task counters we only
367 * call when the task is on a CPU.
369 * If counter->ctx is a cloned context, callers must make sure that
370 * every task struct that counter->ctx->task could possibly point to
371 * remains valid. This is OK when called from perf_release since
372 * that only calls us on the top-level context, which can't be a clone.
373 * When called from perf_counter_exit_task, it's OK because the
374 * context has been detached from its task.
376 static void perf_counter_remove_from_context(struct perf_counter *counter)
378 struct perf_counter_context *ctx = counter->ctx;
379 struct task_struct *task = ctx->task;
383 * Per cpu counters are removed via an smp call and
384 * the removal is always sucessful.
386 smp_call_function_single(counter->cpu,
387 __perf_counter_remove_from_context,
393 task_oncpu_function_call(task, __perf_counter_remove_from_context,
396 spin_lock_irq(&ctx->lock);
398 * If the context is active we need to retry the smp call.
400 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
401 spin_unlock_irq(&ctx->lock);
406 * The lock prevents that this context is scheduled in so we
407 * can remove the counter safely, if the call above did not
410 if (!list_empty(&counter->list_entry)) {
411 list_del_counter(counter, ctx);
413 spin_unlock_irq(&ctx->lock);
416 static inline u64 perf_clock(void)
418 return cpu_clock(smp_processor_id());
422 * Update the record of the current time in a context.
424 static void update_context_time(struct perf_counter_context *ctx)
426 u64 now = perf_clock();
428 ctx->time += now - ctx->timestamp;
429 ctx->timestamp = now;
433 * Update the total_time_enabled and total_time_running fields for a counter.
435 static void update_counter_times(struct perf_counter *counter)
437 struct perf_counter_context *ctx = counter->ctx;
440 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
443 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
445 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
446 run_end = counter->tstamp_stopped;
450 counter->total_time_running = run_end - counter->tstamp_running;
454 * Update total_time_enabled and total_time_running for all counters in a group.
456 static void update_group_times(struct perf_counter *leader)
458 struct perf_counter *counter;
460 update_counter_times(leader);
461 list_for_each_entry(counter, &leader->sibling_list, list_entry)
462 update_counter_times(counter);
466 * Cross CPU call to disable a performance counter
468 static void __perf_counter_disable(void *info)
470 struct perf_counter *counter = info;
471 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
472 struct perf_counter_context *ctx = counter->ctx;
475 * If this is a per-task counter, need to check whether this
476 * counter's task is the current task on this cpu.
478 if (ctx->task && cpuctx->task_ctx != ctx)
481 spin_lock(&ctx->lock);
484 * If the counter is on, turn it off.
485 * If it is in error state, leave it in error state.
487 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
488 update_context_time(ctx);
489 update_counter_times(counter);
490 if (counter == counter->group_leader)
491 group_sched_out(counter, cpuctx, ctx);
493 counter_sched_out(counter, cpuctx, ctx);
494 counter->state = PERF_COUNTER_STATE_OFF;
497 spin_unlock(&ctx->lock);
503 * If counter->ctx is a cloned context, callers must make sure that
504 * every task struct that counter->ctx->task could possibly point to
505 * remains valid. This condition is satisifed when called through
506 * perf_counter_for_each_child or perf_counter_for_each because they
507 * hold the top-level counter's child_mutex, so any descendant that
508 * goes to exit will block in sync_child_counter.
509 * When called from perf_pending_counter it's OK because counter->ctx
510 * is the current context on this CPU and preemption is disabled,
511 * hence we can't get into perf_counter_task_sched_out for this context.
513 static void perf_counter_disable(struct perf_counter *counter)
515 struct perf_counter_context *ctx = counter->ctx;
516 struct task_struct *task = ctx->task;
520 * Disable the counter on the cpu that it's on
522 smp_call_function_single(counter->cpu, __perf_counter_disable,
528 task_oncpu_function_call(task, __perf_counter_disable, counter);
530 spin_lock_irq(&ctx->lock);
532 * If the counter is still active, we need to retry the cross-call.
534 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
535 spin_unlock_irq(&ctx->lock);
540 * Since we have the lock this context can't be scheduled
541 * in, so we can change the state safely.
543 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
544 update_counter_times(counter);
545 counter->state = PERF_COUNTER_STATE_OFF;
548 spin_unlock_irq(&ctx->lock);
552 counter_sched_in(struct perf_counter *counter,
553 struct perf_cpu_context *cpuctx,
554 struct perf_counter_context *ctx,
557 if (counter->state <= PERF_COUNTER_STATE_OFF)
560 counter->state = PERF_COUNTER_STATE_ACTIVE;
561 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
563 * The new state must be visible before we turn it on in the hardware:
567 if (counter->pmu->enable(counter)) {
568 counter->state = PERF_COUNTER_STATE_INACTIVE;
573 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
575 if (!is_software_counter(counter))
576 cpuctx->active_oncpu++;
579 if (counter->attr.exclusive)
580 cpuctx->exclusive = 1;
586 group_sched_in(struct perf_counter *group_counter,
587 struct perf_cpu_context *cpuctx,
588 struct perf_counter_context *ctx,
591 struct perf_counter *counter, *partial_group;
594 if (group_counter->state == PERF_COUNTER_STATE_OFF)
597 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
599 return ret < 0 ? ret : 0;
601 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
605 * Schedule in siblings as one group (if any):
607 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
608 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
609 partial_group = counter;
618 * Groups can be scheduled in as one unit only, so undo any
619 * partial group before returning:
621 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
622 if (counter == partial_group)
624 counter_sched_out(counter, cpuctx, ctx);
626 counter_sched_out(group_counter, cpuctx, ctx);
632 * Return 1 for a group consisting entirely of software counters,
633 * 0 if the group contains any hardware counters.
635 static int is_software_only_group(struct perf_counter *leader)
637 struct perf_counter *counter;
639 if (!is_software_counter(leader))
642 list_for_each_entry(counter, &leader->sibling_list, list_entry)
643 if (!is_software_counter(counter))
650 * Work out whether we can put this counter group on the CPU now.
652 static int group_can_go_on(struct perf_counter *counter,
653 struct perf_cpu_context *cpuctx,
657 * Groups consisting entirely of software counters can always go on.
659 if (is_software_only_group(counter))
662 * If an exclusive group is already on, no other hardware
663 * counters can go on.
665 if (cpuctx->exclusive)
668 * If this group is exclusive and there are already
669 * counters on the CPU, it can't go on.
671 if (counter->attr.exclusive && cpuctx->active_oncpu)
674 * Otherwise, try to add it if all previous groups were able
680 static void add_counter_to_ctx(struct perf_counter *counter,
681 struct perf_counter_context *ctx)
683 list_add_counter(counter, ctx);
684 counter->tstamp_enabled = ctx->time;
685 counter->tstamp_running = ctx->time;
686 counter->tstamp_stopped = ctx->time;
690 * Cross CPU call to install and enable a performance counter
692 * Must be called with ctx->mutex held
694 static void __perf_install_in_context(void *info)
696 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
697 struct perf_counter *counter = info;
698 struct perf_counter_context *ctx = counter->ctx;
699 struct perf_counter *leader = counter->group_leader;
700 int cpu = smp_processor_id();
704 * If this is a task context, we need to check whether it is
705 * the current task context of this cpu. If not it has been
706 * scheduled out before the smp call arrived.
707 * Or possibly this is the right context but it isn't
708 * on this cpu because it had no counters.
710 if (ctx->task && cpuctx->task_ctx != ctx) {
711 if (cpuctx->task_ctx || ctx->task != current)
713 cpuctx->task_ctx = ctx;
716 spin_lock(&ctx->lock);
718 update_context_time(ctx);
721 * Protect the list operation against NMI by disabling the
722 * counters on a global level. NOP for non NMI based counters.
726 add_counter_to_ctx(counter, ctx);
729 * Don't put the counter on if it is disabled or if
730 * it is in a group and the group isn't on.
732 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
733 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
737 * An exclusive counter can't go on if there are already active
738 * hardware counters, and no hardware counter can go on if there
739 * is already an exclusive counter on.
741 if (!group_can_go_on(counter, cpuctx, 1))
744 err = counter_sched_in(counter, cpuctx, ctx, cpu);
748 * This counter couldn't go on. If it is in a group
749 * then we have to pull the whole group off.
750 * If the counter group is pinned then put it in error state.
752 if (leader != counter)
753 group_sched_out(leader, cpuctx, ctx);
754 if (leader->attr.pinned) {
755 update_group_times(leader);
756 leader->state = PERF_COUNTER_STATE_ERROR;
760 if (!err && !ctx->task && cpuctx->max_pertask)
761 cpuctx->max_pertask--;
766 spin_unlock(&ctx->lock);
770 * Attach a performance counter to a context
772 * First we add the counter to the list with the hardware enable bit
773 * in counter->hw_config cleared.
775 * If the counter is attached to a task which is on a CPU we use a smp
776 * call to enable it in the task context. The task might have been
777 * scheduled away, but we check this in the smp call again.
779 * Must be called with ctx->mutex held.
782 perf_install_in_context(struct perf_counter_context *ctx,
783 struct perf_counter *counter,
786 struct task_struct *task = ctx->task;
790 * Per cpu counters are installed via an smp call and
791 * the install is always sucessful.
793 smp_call_function_single(cpu, __perf_install_in_context,
799 task_oncpu_function_call(task, __perf_install_in_context,
802 spin_lock_irq(&ctx->lock);
804 * we need to retry the smp call.
806 if (ctx->is_active && list_empty(&counter->list_entry)) {
807 spin_unlock_irq(&ctx->lock);
812 * The lock prevents that this context is scheduled in so we
813 * can add the counter safely, if it the call above did not
816 if (list_empty(&counter->list_entry))
817 add_counter_to_ctx(counter, ctx);
818 spin_unlock_irq(&ctx->lock);
822 * Cross CPU call to enable a performance counter
824 static void __perf_counter_enable(void *info)
826 struct perf_counter *counter = info;
827 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
828 struct perf_counter_context *ctx = counter->ctx;
829 struct perf_counter *leader = counter->group_leader;
833 * If this is a per-task counter, need to check whether this
834 * counter's task is the current task on this cpu.
836 if (ctx->task && cpuctx->task_ctx != ctx) {
837 if (cpuctx->task_ctx || ctx->task != current)
839 cpuctx->task_ctx = ctx;
842 spin_lock(&ctx->lock);
844 update_context_time(ctx);
846 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
848 counter->state = PERF_COUNTER_STATE_INACTIVE;
849 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
852 * If the counter is in a group and isn't the group leader,
853 * then don't put it on unless the group is on.
855 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
858 if (!group_can_go_on(counter, cpuctx, 1)) {
862 if (counter == leader)
863 err = group_sched_in(counter, cpuctx, ctx,
866 err = counter_sched_in(counter, cpuctx, ctx,
873 * If this counter can't go on and it's part of a
874 * group, then the whole group has to come off.
876 if (leader != counter)
877 group_sched_out(leader, cpuctx, ctx);
878 if (leader->attr.pinned) {
879 update_group_times(leader);
880 leader->state = PERF_COUNTER_STATE_ERROR;
885 spin_unlock(&ctx->lock);
891 * If counter->ctx is a cloned context, callers must make sure that
892 * every task struct that counter->ctx->task could possibly point to
893 * remains valid. This condition is satisfied when called through
894 * perf_counter_for_each_child or perf_counter_for_each as described
895 * for perf_counter_disable.
897 static void perf_counter_enable(struct perf_counter *counter)
899 struct perf_counter_context *ctx = counter->ctx;
900 struct task_struct *task = ctx->task;
904 * Enable the counter on the cpu that it's on
906 smp_call_function_single(counter->cpu, __perf_counter_enable,
911 spin_lock_irq(&ctx->lock);
912 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
916 * If the counter is in error state, clear that first.
917 * That way, if we see the counter in error state below, we
918 * know that it has gone back into error state, as distinct
919 * from the task having been scheduled away before the
920 * cross-call arrived.
922 if (counter->state == PERF_COUNTER_STATE_ERROR)
923 counter->state = PERF_COUNTER_STATE_OFF;
926 spin_unlock_irq(&ctx->lock);
927 task_oncpu_function_call(task, __perf_counter_enable, counter);
929 spin_lock_irq(&ctx->lock);
932 * If the context is active and the counter is still off,
933 * we need to retry the cross-call.
935 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
939 * Since we have the lock this context can't be scheduled
940 * in, so we can change the state safely.
942 if (counter->state == PERF_COUNTER_STATE_OFF) {
943 counter->state = PERF_COUNTER_STATE_INACTIVE;
944 counter->tstamp_enabled =
945 ctx->time - counter->total_time_enabled;
948 spin_unlock_irq(&ctx->lock);
951 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
954 * not supported on inherited counters
956 if (counter->attr.inherit)
959 atomic_add(refresh, &counter->event_limit);
960 perf_counter_enable(counter);
965 void __perf_counter_sched_out(struct perf_counter_context *ctx,
966 struct perf_cpu_context *cpuctx)
968 struct perf_counter *counter;
970 spin_lock(&ctx->lock);
972 if (likely(!ctx->nr_counters))
974 update_context_time(ctx);
977 if (ctx->nr_active) {
978 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
979 if (counter != counter->group_leader)
980 counter_sched_out(counter, cpuctx, ctx);
982 group_sched_out(counter, cpuctx, ctx);
987 spin_unlock(&ctx->lock);
991 * Test whether two contexts are equivalent, i.e. whether they
992 * have both been cloned from the same version of the same context
993 * and they both have the same number of enabled counters.
994 * If the number of enabled counters is the same, then the set
995 * of enabled counters should be the same, because these are both
996 * inherited contexts, therefore we can't access individual counters
997 * in them directly with an fd; we can only enable/disable all
998 * counters via prctl, or enable/disable all counters in a family
999 * via ioctl, which will have the same effect on both contexts.
1001 static int context_equiv(struct perf_counter_context *ctx1,
1002 struct perf_counter_context *ctx2)
1004 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1005 && ctx1->parent_gen == ctx2->parent_gen
1006 && !ctx1->pin_count && !ctx2->pin_count;
1010 * Called from scheduler to remove the counters of the current task,
1011 * with interrupts disabled.
1013 * We stop each counter and update the counter value in counter->count.
1015 * This does not protect us against NMI, but disable()
1016 * sets the disabled bit in the control field of counter _before_
1017 * accessing the counter control register. If a NMI hits, then it will
1018 * not restart the counter.
1020 void perf_counter_task_sched_out(struct task_struct *task,
1021 struct task_struct *next, int cpu)
1023 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1024 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1025 struct perf_counter_context *next_ctx;
1026 struct perf_counter_context *parent;
1027 struct pt_regs *regs;
1030 regs = task_pt_regs(task);
1031 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1033 if (likely(!ctx || !cpuctx->task_ctx))
1036 update_context_time(ctx);
1039 parent = rcu_dereference(ctx->parent_ctx);
1040 next_ctx = next->perf_counter_ctxp;
1041 if (parent && next_ctx &&
1042 rcu_dereference(next_ctx->parent_ctx) == parent) {
1044 * Looks like the two contexts are clones, so we might be
1045 * able to optimize the context switch. We lock both
1046 * contexts and check that they are clones under the
1047 * lock (including re-checking that neither has been
1048 * uncloned in the meantime). It doesn't matter which
1049 * order we take the locks because no other cpu could
1050 * be trying to lock both of these tasks.
1052 spin_lock(&ctx->lock);
1053 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1054 if (context_equiv(ctx, next_ctx)) {
1056 * XXX do we need a memory barrier of sorts
1057 * wrt to rcu_dereference() of perf_counter_ctxp
1059 task->perf_counter_ctxp = next_ctx;
1060 next->perf_counter_ctxp = ctx;
1062 next_ctx->task = task;
1065 spin_unlock(&next_ctx->lock);
1066 spin_unlock(&ctx->lock);
1071 __perf_counter_sched_out(ctx, cpuctx);
1072 cpuctx->task_ctx = NULL;
1077 * Called with IRQs disabled
1079 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1081 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1083 if (!cpuctx->task_ctx)
1086 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1089 __perf_counter_sched_out(ctx, cpuctx);
1090 cpuctx->task_ctx = NULL;
1094 * Called with IRQs disabled
1096 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1098 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1102 __perf_counter_sched_in(struct perf_counter_context *ctx,
1103 struct perf_cpu_context *cpuctx, int cpu)
1105 struct perf_counter *counter;
1108 spin_lock(&ctx->lock);
1110 if (likely(!ctx->nr_counters))
1113 ctx->timestamp = perf_clock();
1118 * First go through the list and put on any pinned groups
1119 * in order to give them the best chance of going on.
1121 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1122 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1123 !counter->attr.pinned)
1125 if (counter->cpu != -1 && counter->cpu != cpu)
1128 if (counter != counter->group_leader)
1129 counter_sched_in(counter, cpuctx, ctx, cpu);
1131 if (group_can_go_on(counter, cpuctx, 1))
1132 group_sched_in(counter, cpuctx, ctx, cpu);
1136 * If this pinned group hasn't been scheduled,
1137 * put it in error state.
1139 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1140 update_group_times(counter);
1141 counter->state = PERF_COUNTER_STATE_ERROR;
1145 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1147 * Ignore counters in OFF or ERROR state, and
1148 * ignore pinned counters since we did them already.
1150 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1151 counter->attr.pinned)
1155 * Listen to the 'cpu' scheduling filter constraint
1158 if (counter->cpu != -1 && counter->cpu != cpu)
1161 if (counter != counter->group_leader) {
1162 if (counter_sched_in(counter, cpuctx, ctx, cpu))
1165 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1166 if (group_sched_in(counter, cpuctx, ctx, cpu))
1173 spin_unlock(&ctx->lock);
1177 * Called from scheduler to add the counters of the current task
1178 * with interrupts disabled.
1180 * We restore the counter value and then enable it.
1182 * This does not protect us against NMI, but enable()
1183 * sets the enabled bit in the control field of counter _before_
1184 * accessing the counter control register. If a NMI hits, then it will
1185 * keep the counter running.
1187 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1189 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1190 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1194 if (cpuctx->task_ctx == ctx)
1196 __perf_counter_sched_in(ctx, cpuctx, cpu);
1197 cpuctx->task_ctx = ctx;
1200 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1202 struct perf_counter_context *ctx = &cpuctx->ctx;
1204 __perf_counter_sched_in(ctx, cpuctx, cpu);
1207 #define MAX_INTERRUPTS (~0ULL)
1209 static void perf_log_throttle(struct perf_counter *counter, int enable);
1210 static void perf_log_period(struct perf_counter *counter, u64 period);
1212 static void perf_adjust_period(struct perf_counter *counter, u64 events)
1214 struct hw_perf_counter *hwc = &counter->hw;
1215 u64 period, sample_period;
1218 events *= hwc->sample_period;
1219 period = div64_u64(events, counter->attr.sample_freq);
1221 delta = (s64)(period - hwc->sample_period);
1222 delta = (delta + 7) / 8; /* low pass filter */
1224 sample_period = hwc->sample_period + delta;
1229 perf_log_period(counter, sample_period);
1231 hwc->sample_period = sample_period;
1234 static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1236 struct perf_counter *counter;
1237 struct hw_perf_counter *hwc;
1238 u64 interrupts, freq;
1240 spin_lock(&ctx->lock);
1241 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1242 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1247 interrupts = hwc->interrupts;
1248 hwc->interrupts = 0;
1251 * unthrottle counters on the tick
1253 if (interrupts == MAX_INTERRUPTS) {
1254 perf_log_throttle(counter, 1);
1255 counter->pmu->unthrottle(counter);
1256 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1259 if (!counter->attr.freq || !counter->attr.sample_freq)
1263 * if the specified freq < HZ then we need to skip ticks
1265 if (counter->attr.sample_freq < HZ) {
1266 freq = counter->attr.sample_freq;
1268 hwc->freq_count += freq;
1269 hwc->freq_interrupts += interrupts;
1271 if (hwc->freq_count < HZ)
1274 interrupts = hwc->freq_interrupts;
1275 hwc->freq_interrupts = 0;
1276 hwc->freq_count -= HZ;
1280 perf_adjust_period(counter, freq * interrupts);
1283 * In order to avoid being stalled by an (accidental) huge
1284 * sample period, force reset the sample period if we didn't
1285 * get any events in this freq period.
1289 counter->pmu->disable(counter);
1290 atomic64_set(&hwc->period_left, 0);
1291 counter->pmu->enable(counter);
1295 spin_unlock(&ctx->lock);
1299 * Round-robin a context's counters:
1301 static void rotate_ctx(struct perf_counter_context *ctx)
1303 struct perf_counter *counter;
1305 if (!ctx->nr_counters)
1308 spin_lock(&ctx->lock);
1310 * Rotate the first entry last (works just fine for group counters too):
1313 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1314 list_move_tail(&counter->list_entry, &ctx->counter_list);
1319 spin_unlock(&ctx->lock);
1322 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1324 struct perf_cpu_context *cpuctx;
1325 struct perf_counter_context *ctx;
1327 if (!atomic_read(&nr_counters))
1330 cpuctx = &per_cpu(perf_cpu_context, cpu);
1331 ctx = curr->perf_counter_ctxp;
1333 perf_ctx_adjust_freq(&cpuctx->ctx);
1335 perf_ctx_adjust_freq(ctx);
1337 perf_counter_cpu_sched_out(cpuctx);
1339 __perf_counter_task_sched_out(ctx);
1341 rotate_ctx(&cpuctx->ctx);
1345 perf_counter_cpu_sched_in(cpuctx, cpu);
1347 perf_counter_task_sched_in(curr, cpu);
1351 * Cross CPU call to read the hardware counter
1353 static void __read(void *info)
1355 struct perf_counter *counter = info;
1356 struct perf_counter_context *ctx = counter->ctx;
1357 unsigned long flags;
1359 local_irq_save(flags);
1361 update_context_time(ctx);
1362 counter->pmu->read(counter);
1363 update_counter_times(counter);
1364 local_irq_restore(flags);
1367 static u64 perf_counter_read(struct perf_counter *counter)
1370 * If counter is enabled and currently active on a CPU, update the
1371 * value in the counter structure:
1373 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1374 smp_call_function_single(counter->oncpu,
1375 __read, counter, 1);
1376 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1377 update_counter_times(counter);
1380 return atomic64_read(&counter->count);
1384 * Initialize the perf_counter context in a task_struct:
1387 __perf_counter_init_context(struct perf_counter_context *ctx,
1388 struct task_struct *task)
1390 memset(ctx, 0, sizeof(*ctx));
1391 spin_lock_init(&ctx->lock);
1392 mutex_init(&ctx->mutex);
1393 INIT_LIST_HEAD(&ctx->counter_list);
1394 INIT_LIST_HEAD(&ctx->event_list);
1395 atomic_set(&ctx->refcount, 1);
1399 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1401 struct perf_counter_context *parent_ctx;
1402 struct perf_counter_context *ctx;
1403 struct perf_cpu_context *cpuctx;
1404 struct task_struct *task;
1405 unsigned long flags;
1409 * If cpu is not a wildcard then this is a percpu counter:
1412 /* Must be root to operate on a CPU counter: */
1413 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1414 return ERR_PTR(-EACCES);
1416 if (cpu < 0 || cpu > num_possible_cpus())
1417 return ERR_PTR(-EINVAL);
1420 * We could be clever and allow to attach a counter to an
1421 * offline CPU and activate it when the CPU comes up, but
1424 if (!cpu_isset(cpu, cpu_online_map))
1425 return ERR_PTR(-ENODEV);
1427 cpuctx = &per_cpu(perf_cpu_context, cpu);
1438 task = find_task_by_vpid(pid);
1440 get_task_struct(task);
1444 return ERR_PTR(-ESRCH);
1447 * Can't attach counters to a dying task.
1450 if (task->flags & PF_EXITING)
1453 /* Reuse ptrace permission checks for now. */
1455 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1459 ctx = perf_lock_task_context(task, &flags);
1461 parent_ctx = ctx->parent_ctx;
1463 put_ctx(parent_ctx);
1464 ctx->parent_ctx = NULL; /* no longer a clone */
1466 spin_unlock_irqrestore(&ctx->lock, flags);
1470 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1474 __perf_counter_init_context(ctx, task);
1476 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1478 * We raced with some other task; use
1479 * the context they set.
1484 get_task_struct(task);
1487 put_task_struct(task);
1491 put_task_struct(task);
1492 return ERR_PTR(err);
1495 static void free_counter_rcu(struct rcu_head *head)
1497 struct perf_counter *counter;
1499 counter = container_of(head, struct perf_counter, rcu_head);
1501 put_pid_ns(counter->ns);
1505 static void perf_pending_sync(struct perf_counter *counter);
1507 static void free_counter(struct perf_counter *counter)
1509 perf_pending_sync(counter);
1511 if (!counter->parent) {
1512 atomic_dec(&nr_counters);
1513 if (counter->attr.mmap)
1514 atomic_dec(&nr_mmap_counters);
1515 if (counter->attr.comm)
1516 atomic_dec(&nr_comm_counters);
1519 if (counter->destroy)
1520 counter->destroy(counter);
1522 put_ctx(counter->ctx);
1523 call_rcu(&counter->rcu_head, free_counter_rcu);
1527 * Called when the last reference to the file is gone.
1529 static int perf_release(struct inode *inode, struct file *file)
1531 struct perf_counter *counter = file->private_data;
1532 struct perf_counter_context *ctx = counter->ctx;
1534 file->private_data = NULL;
1536 WARN_ON_ONCE(ctx->parent_ctx);
1537 mutex_lock(&ctx->mutex);
1538 perf_counter_remove_from_context(counter);
1539 mutex_unlock(&ctx->mutex);
1541 mutex_lock(&counter->owner->perf_counter_mutex);
1542 list_del_init(&counter->owner_entry);
1543 mutex_unlock(&counter->owner->perf_counter_mutex);
1544 put_task_struct(counter->owner);
1546 free_counter(counter);
1552 * Read the performance counter - simple non blocking version for now
1555 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1561 * Return end-of-file for a read on a counter that is in
1562 * error state (i.e. because it was pinned but it couldn't be
1563 * scheduled on to the CPU at some point).
1565 if (counter->state == PERF_COUNTER_STATE_ERROR)
1568 WARN_ON_ONCE(counter->ctx->parent_ctx);
1569 mutex_lock(&counter->child_mutex);
1570 values[0] = perf_counter_read(counter);
1572 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1573 values[n++] = counter->total_time_enabled +
1574 atomic64_read(&counter->child_total_time_enabled);
1575 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1576 values[n++] = counter->total_time_running +
1577 atomic64_read(&counter->child_total_time_running);
1578 if (counter->attr.read_format & PERF_FORMAT_ID)
1579 values[n++] = counter->id;
1580 mutex_unlock(&counter->child_mutex);
1582 if (count < n * sizeof(u64))
1584 count = n * sizeof(u64);
1586 if (copy_to_user(buf, values, count))
1593 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1595 struct perf_counter *counter = file->private_data;
1597 return perf_read_hw(counter, buf, count);
1600 static unsigned int perf_poll(struct file *file, poll_table *wait)
1602 struct perf_counter *counter = file->private_data;
1603 struct perf_mmap_data *data;
1604 unsigned int events = POLL_HUP;
1607 data = rcu_dereference(counter->data);
1609 events = atomic_xchg(&data->poll, 0);
1612 poll_wait(file, &counter->waitq, wait);
1617 static void perf_counter_reset(struct perf_counter *counter)
1619 (void)perf_counter_read(counter);
1620 atomic64_set(&counter->count, 0);
1621 perf_counter_update_userpage(counter);
1625 * Holding the top-level counter's child_mutex means that any
1626 * descendant process that has inherited this counter will block
1627 * in sync_child_counter if it goes to exit, thus satisfying the
1628 * task existence requirements of perf_counter_enable/disable.
1630 static void perf_counter_for_each_child(struct perf_counter *counter,
1631 void (*func)(struct perf_counter *))
1633 struct perf_counter *child;
1635 WARN_ON_ONCE(counter->ctx->parent_ctx);
1636 mutex_lock(&counter->child_mutex);
1638 list_for_each_entry(child, &counter->child_list, child_list)
1640 mutex_unlock(&counter->child_mutex);
1643 static void perf_counter_for_each(struct perf_counter *counter,
1644 void (*func)(struct perf_counter *))
1646 struct perf_counter_context *ctx = counter->ctx;
1647 struct perf_counter *sibling;
1649 WARN_ON_ONCE(ctx->parent_ctx);
1650 mutex_lock(&ctx->mutex);
1651 counter = counter->group_leader;
1653 perf_counter_for_each_child(counter, func);
1655 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1656 perf_counter_for_each_child(counter, func);
1657 mutex_unlock(&ctx->mutex);
1660 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1662 struct perf_counter_context *ctx = counter->ctx;
1667 if (!counter->attr.sample_period)
1670 size = copy_from_user(&value, arg, sizeof(value));
1671 if (size != sizeof(value))
1677 spin_lock_irq(&ctx->lock);
1678 if (counter->attr.freq) {
1679 if (value > sysctl_perf_counter_sample_rate) {
1684 counter->attr.sample_freq = value;
1686 perf_log_period(counter, value);
1688 counter->attr.sample_period = value;
1689 counter->hw.sample_period = value;
1692 spin_unlock_irq(&ctx->lock);
1697 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1699 struct perf_counter *counter = file->private_data;
1700 void (*func)(struct perf_counter *);
1704 case PERF_COUNTER_IOC_ENABLE:
1705 func = perf_counter_enable;
1707 case PERF_COUNTER_IOC_DISABLE:
1708 func = perf_counter_disable;
1710 case PERF_COUNTER_IOC_RESET:
1711 func = perf_counter_reset;
1714 case PERF_COUNTER_IOC_REFRESH:
1715 return perf_counter_refresh(counter, arg);
1717 case PERF_COUNTER_IOC_PERIOD:
1718 return perf_counter_period(counter, (u64 __user *)arg);
1724 if (flags & PERF_IOC_FLAG_GROUP)
1725 perf_counter_for_each(counter, func);
1727 perf_counter_for_each_child(counter, func);
1732 int perf_counter_task_enable(void)
1734 struct perf_counter *counter;
1736 mutex_lock(¤t->perf_counter_mutex);
1737 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1738 perf_counter_for_each_child(counter, perf_counter_enable);
1739 mutex_unlock(¤t->perf_counter_mutex);
1744 int perf_counter_task_disable(void)
1746 struct perf_counter *counter;
1748 mutex_lock(¤t->perf_counter_mutex);
1749 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1750 perf_counter_for_each_child(counter, perf_counter_disable);
1751 mutex_unlock(¤t->perf_counter_mutex);
1756 static int perf_counter_index(struct perf_counter *counter)
1758 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1761 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
1765 * Callers need to ensure there can be no nesting of this function, otherwise
1766 * the seqlock logic goes bad. We can not serialize this because the arch
1767 * code calls this from NMI context.
1769 void perf_counter_update_userpage(struct perf_counter *counter)
1771 struct perf_counter_mmap_page *userpg;
1772 struct perf_mmap_data *data;
1775 data = rcu_dereference(counter->data);
1779 userpg = data->user_page;
1782 * Disable preemption so as to not let the corresponding user-space
1783 * spin too long if we get preempted.
1788 userpg->index = perf_counter_index(counter);
1789 userpg->offset = atomic64_read(&counter->count);
1790 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1791 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1793 userpg->time_enabled = counter->total_time_enabled +
1794 atomic64_read(&counter->child_total_time_enabled);
1796 userpg->time_running = counter->total_time_running +
1797 atomic64_read(&counter->child_total_time_running);
1806 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1808 struct perf_counter *counter = vma->vm_file->private_data;
1809 struct perf_mmap_data *data;
1810 int ret = VM_FAULT_SIGBUS;
1812 if (vmf->flags & FAULT_FLAG_MKWRITE) {
1813 if (vmf->pgoff == 0)
1819 data = rcu_dereference(counter->data);
1823 if (vmf->pgoff == 0) {
1824 vmf->page = virt_to_page(data->user_page);
1826 int nr = vmf->pgoff - 1;
1828 if ((unsigned)nr > data->nr_pages)
1831 if (vmf->flags & FAULT_FLAG_WRITE)
1834 vmf->page = virt_to_page(data->data_pages[nr]);
1837 get_page(vmf->page);
1838 vmf->page->mapping = vma->vm_file->f_mapping;
1839 vmf->page->index = vmf->pgoff;
1848 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1850 struct perf_mmap_data *data;
1854 WARN_ON(atomic_read(&counter->mmap_count));
1856 size = sizeof(struct perf_mmap_data);
1857 size += nr_pages * sizeof(void *);
1859 data = kzalloc(size, GFP_KERNEL);
1863 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1864 if (!data->user_page)
1865 goto fail_user_page;
1867 for (i = 0; i < nr_pages; i++) {
1868 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1869 if (!data->data_pages[i])
1870 goto fail_data_pages;
1873 data->nr_pages = nr_pages;
1874 atomic_set(&data->lock, -1);
1876 rcu_assign_pointer(counter->data, data);
1881 for (i--; i >= 0; i--)
1882 free_page((unsigned long)data->data_pages[i]);
1884 free_page((unsigned long)data->user_page);
1893 static void perf_mmap_free_page(unsigned long addr)
1895 struct page *page = virt_to_page(addr);
1897 page->mapping = NULL;
1901 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1903 struct perf_mmap_data *data;
1906 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1908 perf_mmap_free_page((unsigned long)data->user_page);
1909 for (i = 0; i < data->nr_pages; i++)
1910 perf_mmap_free_page((unsigned long)data->data_pages[i]);
1915 static void perf_mmap_data_free(struct perf_counter *counter)
1917 struct perf_mmap_data *data = counter->data;
1919 WARN_ON(atomic_read(&counter->mmap_count));
1921 rcu_assign_pointer(counter->data, NULL);
1922 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1925 static void perf_mmap_open(struct vm_area_struct *vma)
1927 struct perf_counter *counter = vma->vm_file->private_data;
1929 atomic_inc(&counter->mmap_count);
1932 static void perf_mmap_close(struct vm_area_struct *vma)
1934 struct perf_counter *counter = vma->vm_file->private_data;
1936 WARN_ON_ONCE(counter->ctx->parent_ctx);
1937 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1938 struct user_struct *user = current_user();
1940 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1941 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1942 perf_mmap_data_free(counter);
1943 mutex_unlock(&counter->mmap_mutex);
1947 static struct vm_operations_struct perf_mmap_vmops = {
1948 .open = perf_mmap_open,
1949 .close = perf_mmap_close,
1950 .fault = perf_mmap_fault,
1951 .page_mkwrite = perf_mmap_fault,
1954 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1956 struct perf_counter *counter = file->private_data;
1957 unsigned long user_locked, user_lock_limit;
1958 struct user_struct *user = current_user();
1959 unsigned long locked, lock_limit;
1960 unsigned long vma_size;
1961 unsigned long nr_pages;
1962 long user_extra, extra;
1965 if (!(vma->vm_flags & VM_SHARED))
1968 vma_size = vma->vm_end - vma->vm_start;
1969 nr_pages = (vma_size / PAGE_SIZE) - 1;
1972 * If we have data pages ensure they're a power-of-two number, so we
1973 * can do bitmasks instead of modulo.
1975 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1978 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1981 if (vma->vm_pgoff != 0)
1984 WARN_ON_ONCE(counter->ctx->parent_ctx);
1985 mutex_lock(&counter->mmap_mutex);
1986 if (atomic_inc_not_zero(&counter->mmap_count)) {
1987 if (nr_pages != counter->data->nr_pages)
1992 user_extra = nr_pages + 1;
1993 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1996 * Increase the limit linearly with more CPUs:
1998 user_lock_limit *= num_online_cpus();
2000 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2003 if (user_locked > user_lock_limit)
2004 extra = user_locked - user_lock_limit;
2006 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2007 lock_limit >>= PAGE_SHIFT;
2008 locked = vma->vm_mm->locked_vm + extra;
2010 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
2015 WARN_ON(counter->data);
2016 ret = perf_mmap_data_alloc(counter, nr_pages);
2020 atomic_set(&counter->mmap_count, 1);
2021 atomic_long_add(user_extra, &user->locked_vm);
2022 vma->vm_mm->locked_vm += extra;
2023 counter->data->nr_locked = extra;
2024 if (vma->vm_flags & VM_WRITE)
2025 counter->data->writable = 1;
2028 mutex_unlock(&counter->mmap_mutex);
2030 vma->vm_flags |= VM_RESERVED;
2031 vma->vm_ops = &perf_mmap_vmops;
2036 static int perf_fasync(int fd, struct file *filp, int on)
2038 struct inode *inode = filp->f_path.dentry->d_inode;
2039 struct perf_counter *counter = filp->private_data;
2042 mutex_lock(&inode->i_mutex);
2043 retval = fasync_helper(fd, filp, on, &counter->fasync);
2044 mutex_unlock(&inode->i_mutex);
2052 static const struct file_operations perf_fops = {
2053 .release = perf_release,
2056 .unlocked_ioctl = perf_ioctl,
2057 .compat_ioctl = perf_ioctl,
2059 .fasync = perf_fasync,
2063 * Perf counter wakeup
2065 * If there's data, ensure we set the poll() state and publish everything
2066 * to user-space before waking everybody up.
2069 void perf_counter_wakeup(struct perf_counter *counter)
2071 wake_up_all(&counter->waitq);
2073 if (counter->pending_kill) {
2074 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2075 counter->pending_kill = 0;
2082 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2084 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2085 * single linked list and use cmpxchg() to add entries lockless.
2088 static void perf_pending_counter(struct perf_pending_entry *entry)
2090 struct perf_counter *counter = container_of(entry,
2091 struct perf_counter, pending);
2093 if (counter->pending_disable) {
2094 counter->pending_disable = 0;
2095 perf_counter_disable(counter);
2098 if (counter->pending_wakeup) {
2099 counter->pending_wakeup = 0;
2100 perf_counter_wakeup(counter);
2104 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2106 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2110 static void perf_pending_queue(struct perf_pending_entry *entry,
2111 void (*func)(struct perf_pending_entry *))
2113 struct perf_pending_entry **head;
2115 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2120 head = &get_cpu_var(perf_pending_head);
2123 entry->next = *head;
2124 } while (cmpxchg(head, entry->next, entry) != entry->next);
2126 set_perf_counter_pending();
2128 put_cpu_var(perf_pending_head);
2131 static int __perf_pending_run(void)
2133 struct perf_pending_entry *list;
2136 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2137 while (list != PENDING_TAIL) {
2138 void (*func)(struct perf_pending_entry *);
2139 struct perf_pending_entry *entry = list;
2146 * Ensure we observe the unqueue before we issue the wakeup,
2147 * so that we won't be waiting forever.
2148 * -- see perf_not_pending().
2159 static inline int perf_not_pending(struct perf_counter *counter)
2162 * If we flush on whatever cpu we run, there is a chance we don't
2166 __perf_pending_run();
2170 * Ensure we see the proper queue state before going to sleep
2171 * so that we do not miss the wakeup. -- see perf_pending_handle()
2174 return counter->pending.next == NULL;
2177 static void perf_pending_sync(struct perf_counter *counter)
2179 wait_event(counter->waitq, perf_not_pending(counter));
2182 void perf_counter_do_pending(void)
2184 __perf_pending_run();
2188 * Callchain support -- arch specific
2191 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2200 struct perf_output_handle {
2201 struct perf_counter *counter;
2202 struct perf_mmap_data *data;
2204 unsigned long offset;
2208 unsigned long flags;
2211 static bool perf_output_space(struct perf_mmap_data *data,
2212 unsigned int offset, unsigned int head)
2217 if (!data->writable)
2220 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2222 * Userspace could choose to issue a mb() before updating the tail
2223 * pointer. So that all reads will be completed before the write is
2226 tail = ACCESS_ONCE(data->user_page->data_tail);
2229 offset = (offset - tail) & mask;
2230 head = (head - tail) & mask;
2232 if ((int)(head - offset) < 0)
2238 static void perf_output_wakeup(struct perf_output_handle *handle)
2240 atomic_set(&handle->data->poll, POLL_IN);
2243 handle->counter->pending_wakeup = 1;
2244 perf_pending_queue(&handle->counter->pending,
2245 perf_pending_counter);
2247 perf_counter_wakeup(handle->counter);
2251 * Curious locking construct.
2253 * We need to ensure a later event doesn't publish a head when a former
2254 * event isn't done writing. However since we need to deal with NMIs we
2255 * cannot fully serialize things.
2257 * What we do is serialize between CPUs so we only have to deal with NMI
2258 * nesting on a single CPU.
2260 * We only publish the head (and generate a wakeup) when the outer-most
2263 static void perf_output_lock(struct perf_output_handle *handle)
2265 struct perf_mmap_data *data = handle->data;
2270 local_irq_save(handle->flags);
2271 cpu = smp_processor_id();
2273 if (in_nmi() && atomic_read(&data->lock) == cpu)
2276 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2282 static void perf_output_unlock(struct perf_output_handle *handle)
2284 struct perf_mmap_data *data = handle->data;
2288 data->done_head = data->head;
2290 if (!handle->locked)
2295 * The xchg implies a full barrier that ensures all writes are done
2296 * before we publish the new head, matched by a rmb() in userspace when
2297 * reading this position.
2299 while ((head = atomic_long_xchg(&data->done_head, 0)))
2300 data->user_page->data_head = head;
2303 * NMI can happen here, which means we can miss a done_head update.
2306 cpu = atomic_xchg(&data->lock, -1);
2307 WARN_ON_ONCE(cpu != smp_processor_id());
2310 * Therefore we have to validate we did not indeed do so.
2312 if (unlikely(atomic_long_read(&data->done_head))) {
2314 * Since we had it locked, we can lock it again.
2316 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2322 if (atomic_xchg(&data->wakeup, 0))
2323 perf_output_wakeup(handle);
2325 local_irq_restore(handle->flags);
2328 static void perf_output_copy(struct perf_output_handle *handle,
2329 const void *buf, unsigned int len)
2331 unsigned int pages_mask;
2332 unsigned int offset;
2336 offset = handle->offset;
2337 pages_mask = handle->data->nr_pages - 1;
2338 pages = handle->data->data_pages;
2341 unsigned int page_offset;
2344 nr = (offset >> PAGE_SHIFT) & pages_mask;
2345 page_offset = offset & (PAGE_SIZE - 1);
2346 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2348 memcpy(pages[nr] + page_offset, buf, size);
2355 handle->offset = offset;
2358 * Check we didn't copy past our reservation window, taking the
2359 * possible unsigned int wrap into account.
2361 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2364 #define perf_output_put(handle, x) \
2365 perf_output_copy((handle), &(x), sizeof(x))
2367 static int perf_output_begin(struct perf_output_handle *handle,
2368 struct perf_counter *counter, unsigned int size,
2369 int nmi, int sample)
2371 struct perf_mmap_data *data;
2372 unsigned int offset, head;
2375 struct perf_event_header header;
2381 * For inherited counters we send all the output towards the parent.
2383 if (counter->parent)
2384 counter = counter->parent;
2387 data = rcu_dereference(counter->data);
2391 handle->data = data;
2392 handle->counter = counter;
2394 handle->sample = sample;
2396 if (!data->nr_pages)
2399 have_lost = atomic_read(&data->lost);
2401 size += sizeof(lost_event);
2403 perf_output_lock(handle);
2406 offset = head = atomic_long_read(&data->head);
2408 if (unlikely(!perf_output_space(data, offset, head)))
2410 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2412 handle->offset = offset;
2413 handle->head = head;
2415 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2416 atomic_set(&data->wakeup, 1);
2419 lost_event.header.type = PERF_EVENT_LOST;
2420 lost_event.header.misc = 0;
2421 lost_event.header.size = sizeof(lost_event);
2422 lost_event.id = counter->id;
2423 lost_event.lost = atomic_xchg(&data->lost, 0);
2425 perf_output_put(handle, lost_event);
2431 atomic_inc(&data->lost);
2432 perf_output_unlock(handle);
2439 static void perf_output_end(struct perf_output_handle *handle)
2441 struct perf_counter *counter = handle->counter;
2442 struct perf_mmap_data *data = handle->data;
2444 int wakeup_events = counter->attr.wakeup_events;
2446 if (handle->sample && wakeup_events) {
2447 int events = atomic_inc_return(&data->events);
2448 if (events >= wakeup_events) {
2449 atomic_sub(wakeup_events, &data->events);
2450 atomic_set(&data->wakeup, 1);
2454 perf_output_unlock(handle);
2458 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2461 * only top level counters have the pid namespace they were created in
2463 if (counter->parent)
2464 counter = counter->parent;
2466 return task_tgid_nr_ns(p, counter->ns);
2469 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2472 * only top level counters have the pid namespace they were created in
2474 if (counter->parent)
2475 counter = counter->parent;
2477 return task_pid_nr_ns(p, counter->ns);
2480 static void perf_counter_output(struct perf_counter *counter, int nmi,
2481 struct perf_sample_data *data)
2484 u64 sample_type = counter->attr.sample_type;
2485 struct perf_output_handle handle;
2486 struct perf_event_header header;
2495 struct perf_callchain_entry *callchain = NULL;
2496 int callchain_size = 0;
2503 header.size = sizeof(header);
2505 header.misc = PERF_EVENT_MISC_OVERFLOW;
2506 header.misc |= perf_misc_flags(data->regs);
2508 if (sample_type & PERF_SAMPLE_IP) {
2509 ip = perf_instruction_pointer(data->regs);
2510 header.type |= PERF_SAMPLE_IP;
2511 header.size += sizeof(ip);
2514 if (sample_type & PERF_SAMPLE_TID) {
2515 /* namespace issues */
2516 tid_entry.pid = perf_counter_pid(counter, current);
2517 tid_entry.tid = perf_counter_tid(counter, current);
2519 header.type |= PERF_SAMPLE_TID;
2520 header.size += sizeof(tid_entry);
2523 if (sample_type & PERF_SAMPLE_TIME) {
2525 * Maybe do better on x86 and provide cpu_clock_nmi()
2527 time = sched_clock();
2529 header.type |= PERF_SAMPLE_TIME;
2530 header.size += sizeof(u64);
2533 if (sample_type & PERF_SAMPLE_ADDR) {
2534 header.type |= PERF_SAMPLE_ADDR;
2535 header.size += sizeof(u64);
2538 if (sample_type & PERF_SAMPLE_ID) {
2539 header.type |= PERF_SAMPLE_ID;
2540 header.size += sizeof(u64);
2543 if (sample_type & PERF_SAMPLE_CPU) {
2544 header.type |= PERF_SAMPLE_CPU;
2545 header.size += sizeof(cpu_entry);
2547 cpu_entry.cpu = raw_smp_processor_id();
2550 if (sample_type & PERF_SAMPLE_PERIOD) {
2551 header.type |= PERF_SAMPLE_PERIOD;
2552 header.size += sizeof(u64);
2555 if (sample_type & PERF_SAMPLE_GROUP) {
2556 header.type |= PERF_SAMPLE_GROUP;
2557 header.size += sizeof(u64) +
2558 counter->nr_siblings * sizeof(group_entry);
2561 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2562 callchain = perf_callchain(data->regs);
2565 callchain_size = (1 + callchain->nr) * sizeof(u64);
2567 header.type |= PERF_SAMPLE_CALLCHAIN;
2568 header.size += callchain_size;
2572 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2576 perf_output_put(&handle, header);
2578 if (sample_type & PERF_SAMPLE_IP)
2579 perf_output_put(&handle, ip);
2581 if (sample_type & PERF_SAMPLE_TID)
2582 perf_output_put(&handle, tid_entry);
2584 if (sample_type & PERF_SAMPLE_TIME)
2585 perf_output_put(&handle, time);
2587 if (sample_type & PERF_SAMPLE_ADDR)
2588 perf_output_put(&handle, data->addr);
2590 if (sample_type & PERF_SAMPLE_ID)
2591 perf_output_put(&handle, counter->id);
2593 if (sample_type & PERF_SAMPLE_CPU)
2594 perf_output_put(&handle, cpu_entry);
2596 if (sample_type & PERF_SAMPLE_PERIOD)
2597 perf_output_put(&handle, data->period);
2600 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2602 if (sample_type & PERF_SAMPLE_GROUP) {
2603 struct perf_counter *leader, *sub;
2604 u64 nr = counter->nr_siblings;
2606 perf_output_put(&handle, nr);
2608 leader = counter->group_leader;
2609 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2611 sub->pmu->read(sub);
2613 group_entry.id = sub->id;
2614 group_entry.counter = atomic64_read(&sub->count);
2616 perf_output_put(&handle, group_entry);
2621 perf_output_copy(&handle, callchain, callchain_size);
2623 perf_output_end(&handle);
2630 struct perf_read_event {
2631 struct perf_event_header header;
2640 perf_counter_read_event(struct perf_counter *counter,
2641 struct task_struct *task)
2643 struct perf_output_handle handle;
2644 struct perf_read_event event = {
2646 .type = PERF_EVENT_READ,
2648 .size = sizeof(event) - sizeof(event.format),
2650 .pid = perf_counter_pid(counter, task),
2651 .tid = perf_counter_tid(counter, task),
2652 .value = atomic64_read(&counter->count),
2656 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2657 event.header.size += sizeof(u64);
2658 event.format[i++] = counter->total_time_enabled;
2661 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2662 event.header.size += sizeof(u64);
2663 event.format[i++] = counter->total_time_running;
2666 if (counter->attr.read_format & PERF_FORMAT_ID) {
2669 event.header.size += sizeof(u64);
2670 if (counter->parent)
2671 id = counter->parent->id;
2675 event.format[i++] = id;
2678 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2682 perf_output_copy(&handle, &event, event.header.size);
2683 perf_output_end(&handle);
2690 struct perf_fork_event {
2691 struct task_struct *task;
2694 struct perf_event_header header;
2701 static void perf_counter_fork_output(struct perf_counter *counter,
2702 struct perf_fork_event *fork_event)
2704 struct perf_output_handle handle;
2705 int size = fork_event->event.header.size;
2706 struct task_struct *task = fork_event->task;
2707 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2712 fork_event->event.pid = perf_counter_pid(counter, task);
2713 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2715 perf_output_put(&handle, fork_event->event);
2716 perf_output_end(&handle);
2719 static int perf_counter_fork_match(struct perf_counter *counter)
2721 if (counter->attr.comm || counter->attr.mmap)
2727 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2728 struct perf_fork_event *fork_event)
2730 struct perf_counter *counter;
2732 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2736 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2737 if (perf_counter_fork_match(counter))
2738 perf_counter_fork_output(counter, fork_event);
2743 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2745 struct perf_cpu_context *cpuctx;
2746 struct perf_counter_context *ctx;
2748 cpuctx = &get_cpu_var(perf_cpu_context);
2749 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2750 put_cpu_var(perf_cpu_context);
2754 * doesn't really matter which of the child contexts the
2755 * events ends up in.
2757 ctx = rcu_dereference(current->perf_counter_ctxp);
2759 perf_counter_fork_ctx(ctx, fork_event);
2763 void perf_counter_fork(struct task_struct *task)
2765 struct perf_fork_event fork_event;
2767 if (!atomic_read(&nr_comm_counters) &&
2768 !atomic_read(&nr_mmap_counters))
2771 fork_event = (struct perf_fork_event){
2775 .type = PERF_EVENT_FORK,
2776 .size = sizeof(fork_event.event),
2781 perf_counter_fork_event(&fork_event);
2788 struct perf_comm_event {
2789 struct task_struct *task;
2794 struct perf_event_header header;
2801 static void perf_counter_comm_output(struct perf_counter *counter,
2802 struct perf_comm_event *comm_event)
2804 struct perf_output_handle handle;
2805 int size = comm_event->event.header.size;
2806 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2811 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2812 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2814 perf_output_put(&handle, comm_event->event);
2815 perf_output_copy(&handle, comm_event->comm,
2816 comm_event->comm_size);
2817 perf_output_end(&handle);
2820 static int perf_counter_comm_match(struct perf_counter *counter)
2822 if (counter->attr.comm)
2828 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2829 struct perf_comm_event *comm_event)
2831 struct perf_counter *counter;
2833 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2837 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2838 if (perf_counter_comm_match(counter))
2839 perf_counter_comm_output(counter, comm_event);
2844 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2846 struct perf_cpu_context *cpuctx;
2847 struct perf_counter_context *ctx;
2849 char *comm = comm_event->task->comm;
2851 size = ALIGN(strlen(comm)+1, sizeof(u64));
2853 comm_event->comm = comm;
2854 comm_event->comm_size = size;
2856 comm_event->event.header.size = sizeof(comm_event->event) + size;
2858 cpuctx = &get_cpu_var(perf_cpu_context);
2859 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2860 put_cpu_var(perf_cpu_context);
2864 * doesn't really matter which of the child contexts the
2865 * events ends up in.
2867 ctx = rcu_dereference(current->perf_counter_ctxp);
2869 perf_counter_comm_ctx(ctx, comm_event);
2873 void perf_counter_comm(struct task_struct *task)
2875 struct perf_comm_event comm_event;
2877 if (!atomic_read(&nr_comm_counters))
2880 comm_event = (struct perf_comm_event){
2883 .header = { .type = PERF_EVENT_COMM, },
2887 perf_counter_comm_event(&comm_event);
2894 struct perf_mmap_event {
2895 struct vm_area_struct *vma;
2897 const char *file_name;
2901 struct perf_event_header header;
2911 static void perf_counter_mmap_output(struct perf_counter *counter,
2912 struct perf_mmap_event *mmap_event)
2914 struct perf_output_handle handle;
2915 int size = mmap_event->event.header.size;
2916 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2921 mmap_event->event.pid = perf_counter_pid(counter, current);
2922 mmap_event->event.tid = perf_counter_tid(counter, current);
2924 perf_output_put(&handle, mmap_event->event);
2925 perf_output_copy(&handle, mmap_event->file_name,
2926 mmap_event->file_size);
2927 perf_output_end(&handle);
2930 static int perf_counter_mmap_match(struct perf_counter *counter,
2931 struct perf_mmap_event *mmap_event)
2933 if (counter->attr.mmap)
2939 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2940 struct perf_mmap_event *mmap_event)
2942 struct perf_counter *counter;
2944 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2948 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2949 if (perf_counter_mmap_match(counter, mmap_event))
2950 perf_counter_mmap_output(counter, mmap_event);
2955 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2957 struct perf_cpu_context *cpuctx;
2958 struct perf_counter_context *ctx;
2959 struct vm_area_struct *vma = mmap_event->vma;
2960 struct file *file = vma->vm_file;
2967 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2969 name = strncpy(tmp, "//enomem", sizeof(tmp));
2972 name = d_path(&file->f_path, buf, PATH_MAX);
2974 name = strncpy(tmp, "//toolong", sizeof(tmp));
2978 name = arch_vma_name(mmap_event->vma);
2983 name = strncpy(tmp, "[vdso]", sizeof(tmp));
2987 name = strncpy(tmp, "//anon", sizeof(tmp));
2992 size = ALIGN(strlen(name)+1, sizeof(u64));
2994 mmap_event->file_name = name;
2995 mmap_event->file_size = size;
2997 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2999 cpuctx = &get_cpu_var(perf_cpu_context);
3000 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3001 put_cpu_var(perf_cpu_context);
3005 * doesn't really matter which of the child contexts the
3006 * events ends up in.
3008 ctx = rcu_dereference(current->perf_counter_ctxp);
3010 perf_counter_mmap_ctx(ctx, mmap_event);
3016 void __perf_counter_mmap(struct vm_area_struct *vma)
3018 struct perf_mmap_event mmap_event;
3020 if (!atomic_read(&nr_mmap_counters))
3023 mmap_event = (struct perf_mmap_event){
3026 .header = { .type = PERF_EVENT_MMAP, },
3027 .start = vma->vm_start,
3028 .len = vma->vm_end - vma->vm_start,
3029 .pgoff = vma->vm_pgoff,
3033 perf_counter_mmap_event(&mmap_event);
3037 * Log sample_period changes so that analyzing tools can re-normalize the
3042 struct perf_event_header header;
3048 static void perf_log_period(struct perf_counter *counter, u64 period)
3050 struct perf_output_handle handle;
3051 struct freq_event event;
3054 if (counter->hw.sample_period == period)
3057 if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
3060 event = (struct freq_event) {
3062 .type = PERF_EVENT_PERIOD,
3064 .size = sizeof(event),
3066 .time = sched_clock(),
3071 ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3075 perf_output_put(&handle, event);
3076 perf_output_end(&handle);
3080 * IRQ throttle logging
3083 static void perf_log_throttle(struct perf_counter *counter, int enable)
3085 struct perf_output_handle handle;
3089 struct perf_event_header header;
3092 } throttle_event = {
3094 .type = PERF_EVENT_THROTTLE + 1,
3096 .size = sizeof(throttle_event),
3098 .time = sched_clock(),
3102 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3106 perf_output_put(&handle, throttle_event);
3107 perf_output_end(&handle);
3111 * Generic counter overflow handling, sampling.
3114 int perf_counter_overflow(struct perf_counter *counter, int nmi,
3115 struct perf_sample_data *data)
3117 int events = atomic_read(&counter->event_limit);
3118 int throttle = counter->pmu->unthrottle != NULL;
3119 struct hw_perf_counter *hwc = &counter->hw;
3125 if (hwc->interrupts != MAX_INTERRUPTS) {
3127 if (HZ * hwc->interrupts >
3128 (u64)sysctl_perf_counter_sample_rate) {
3129 hwc->interrupts = MAX_INTERRUPTS;
3130 perf_log_throttle(counter, 0);
3135 * Keep re-disabling counters even though on the previous
3136 * pass we disabled it - just in case we raced with a
3137 * sched-in and the counter got enabled again:
3143 if (counter->attr.freq) {
3144 u64 now = sched_clock();
3145 s64 delta = now - hwc->freq_stamp;
3147 hwc->freq_stamp = now;
3149 if (delta > 0 && delta < TICK_NSEC)
3150 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3154 * XXX event_limit might not quite work as expected on inherited
3158 counter->pending_kill = POLL_IN;
3159 if (events && atomic_dec_and_test(&counter->event_limit)) {
3161 counter->pending_kill = POLL_HUP;
3163 counter->pending_disable = 1;
3164 perf_pending_queue(&counter->pending,
3165 perf_pending_counter);
3167 perf_counter_disable(counter);
3170 perf_counter_output(counter, nmi, data);
3175 * Generic software counter infrastructure
3178 static void perf_swcounter_update(struct perf_counter *counter)
3180 struct hw_perf_counter *hwc = &counter->hw;
3185 prev = atomic64_read(&hwc->prev_count);
3186 now = atomic64_read(&hwc->count);
3187 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
3192 atomic64_add(delta, &counter->count);
3193 atomic64_sub(delta, &hwc->period_left);
3196 static void perf_swcounter_set_period(struct perf_counter *counter)
3198 struct hw_perf_counter *hwc = &counter->hw;
3199 s64 left = atomic64_read(&hwc->period_left);
3200 s64 period = hwc->sample_period;
3202 if (unlikely(left <= -period)) {
3204 atomic64_set(&hwc->period_left, left);
3205 hwc->last_period = period;
3208 if (unlikely(left <= 0)) {
3210 atomic64_add(period, &hwc->period_left);
3211 hwc->last_period = period;
3214 atomic64_set(&hwc->prev_count, -left);
3215 atomic64_set(&hwc->count, -left);
3218 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3220 enum hrtimer_restart ret = HRTIMER_RESTART;
3221 struct perf_sample_data data;
3222 struct perf_counter *counter;
3225 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3226 counter->pmu->read(counter);
3229 data.regs = get_irq_regs();
3231 * In case we exclude kernel IPs or are somehow not in interrupt
3232 * context, provide the next best thing, the user IP.
3234 if ((counter->attr.exclude_kernel || !data.regs) &&
3235 !counter->attr.exclude_user)
3236 data.regs = task_pt_regs(current);
3239 if (perf_counter_overflow(counter, 0, &data))
3240 ret = HRTIMER_NORESTART;
3243 period = max_t(u64, 10000, counter->hw.sample_period);
3244 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3249 static void perf_swcounter_overflow(struct perf_counter *counter,
3250 int nmi, struct perf_sample_data *data)
3252 data->period = counter->hw.last_period;
3254 perf_swcounter_update(counter);
3255 perf_swcounter_set_period(counter);
3256 if (perf_counter_overflow(counter, nmi, data))
3257 /* soft-disable the counter */
3261 static int perf_swcounter_is_counting(struct perf_counter *counter)
3263 struct perf_counter_context *ctx;
3264 unsigned long flags;
3267 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3270 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3274 * If the counter is inactive, it could be just because
3275 * its task is scheduled out, or because it's in a group
3276 * which could not go on the PMU. We want to count in
3277 * the first case but not the second. If the context is
3278 * currently active then an inactive software counter must
3279 * be the second case. If it's not currently active then
3280 * we need to know whether the counter was active when the
3281 * context was last active, which we can determine by
3282 * comparing counter->tstamp_stopped with ctx->time.
3284 * We are within an RCU read-side critical section,
3285 * which protects the existence of *ctx.
3288 spin_lock_irqsave(&ctx->lock, flags);
3290 /* Re-check state now we have the lock */
3291 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3292 counter->ctx->is_active ||
3293 counter->tstamp_stopped < ctx->time)
3295 spin_unlock_irqrestore(&ctx->lock, flags);
3299 static int perf_swcounter_match(struct perf_counter *counter,
3300 enum perf_type_id type,
3301 u32 event, struct pt_regs *regs)
3303 if (!perf_swcounter_is_counting(counter))
3306 if (counter->attr.type != type)
3308 if (counter->attr.config != event)
3312 if (counter->attr.exclude_user && user_mode(regs))
3315 if (counter->attr.exclude_kernel && !user_mode(regs))
3322 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3323 int nmi, struct perf_sample_data *data)
3325 int neg = atomic64_add_negative(nr, &counter->hw.count);
3327 if (counter->hw.sample_period && !neg && data->regs)
3328 perf_swcounter_overflow(counter, nmi, data);
3331 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3332 enum perf_type_id type,
3333 u32 event, u64 nr, int nmi,
3334 struct perf_sample_data *data)
3336 struct perf_counter *counter;
3338 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3342 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3343 if (perf_swcounter_match(counter, type, event, data->regs))
3344 perf_swcounter_add(counter, nr, nmi, data);
3349 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3352 return &cpuctx->recursion[3];
3355 return &cpuctx->recursion[2];
3358 return &cpuctx->recursion[1];
3360 return &cpuctx->recursion[0];
3363 static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3365 struct perf_sample_data *data)
3367 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3368 int *recursion = perf_swcounter_recursion_context(cpuctx);
3369 struct perf_counter_context *ctx;
3377 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3381 * doesn't really matter which of the child contexts the
3382 * events ends up in.
3384 ctx = rcu_dereference(current->perf_counter_ctxp);
3386 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
3393 put_cpu_var(perf_cpu_context);
3396 void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3397 struct pt_regs *regs, u64 addr)
3399 struct perf_sample_data data = {
3404 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
3407 static void perf_swcounter_read(struct perf_counter *counter)
3409 perf_swcounter_update(counter);
3412 static int perf_swcounter_enable(struct perf_counter *counter)
3414 perf_swcounter_set_period(counter);
3418 static void perf_swcounter_disable(struct perf_counter *counter)
3420 perf_swcounter_update(counter);
3423 static const struct pmu perf_ops_generic = {
3424 .enable = perf_swcounter_enable,
3425 .disable = perf_swcounter_disable,
3426 .read = perf_swcounter_read,
3430 * Software counter: cpu wall time clock
3433 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3435 int cpu = raw_smp_processor_id();
3439 now = cpu_clock(cpu);
3440 prev = atomic64_read(&counter->hw.prev_count);
3441 atomic64_set(&counter->hw.prev_count, now);
3442 atomic64_add(now - prev, &counter->count);
3445 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3447 struct hw_perf_counter *hwc = &counter->hw;
3448 int cpu = raw_smp_processor_id();
3450 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3451 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3452 hwc->hrtimer.function = perf_swcounter_hrtimer;
3453 if (hwc->sample_period) {
3454 u64 period = max_t(u64, 10000, hwc->sample_period);
3455 __hrtimer_start_range_ns(&hwc->hrtimer,
3456 ns_to_ktime(period), 0,
3457 HRTIMER_MODE_REL, 0);
3463 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3465 if (counter->hw.sample_period)
3466 hrtimer_cancel(&counter->hw.hrtimer);
3467 cpu_clock_perf_counter_update(counter);
3470 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3472 cpu_clock_perf_counter_update(counter);
3475 static const struct pmu perf_ops_cpu_clock = {
3476 .enable = cpu_clock_perf_counter_enable,
3477 .disable = cpu_clock_perf_counter_disable,
3478 .read = cpu_clock_perf_counter_read,
3482 * Software counter: task time clock
3485 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3490 prev = atomic64_xchg(&counter->hw.prev_count, now);
3492 atomic64_add(delta, &counter->count);
3495 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3497 struct hw_perf_counter *hwc = &counter->hw;
3500 now = counter->ctx->time;
3502 atomic64_set(&hwc->prev_count, now);
3503 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3504 hwc->hrtimer.function = perf_swcounter_hrtimer;
3505 if (hwc->sample_period) {
3506 u64 period = max_t(u64, 10000, hwc->sample_period);
3507 __hrtimer_start_range_ns(&hwc->hrtimer,
3508 ns_to_ktime(period), 0,
3509 HRTIMER_MODE_REL, 0);
3515 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3517 if (counter->hw.sample_period)
3518 hrtimer_cancel(&counter->hw.hrtimer);
3519 task_clock_perf_counter_update(counter, counter->ctx->time);
3523 static void task_clock_perf_counter_read(struct perf_counter *counter)
3528 update_context_time(counter->ctx);
3529 time = counter->ctx->time;
3531 u64 now = perf_clock();
3532 u64 delta = now - counter->ctx->timestamp;
3533 time = counter->ctx->time + delta;
3536 task_clock_perf_counter_update(counter, time);
3539 static const struct pmu perf_ops_task_clock = {
3540 .enable = task_clock_perf_counter_enable,
3541 .disable = task_clock_perf_counter_disable,
3542 .read = task_clock_perf_counter_read,
3545 #ifdef CONFIG_EVENT_PROFILE
3546 void perf_tpcounter_event(int event_id)
3548 struct perf_sample_data data = {
3549 .regs = get_irq_regs();
3554 data.regs = task_pt_regs(current);
3556 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
3558 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3560 extern int ftrace_profile_enable(int);
3561 extern void ftrace_profile_disable(int);
3563 static void tp_perf_counter_destroy(struct perf_counter *counter)
3565 ftrace_profile_disable(perf_event_id(&counter->attr));
3568 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3570 int event_id = perf_event_id(&counter->attr);
3573 ret = ftrace_profile_enable(event_id);
3577 counter->destroy = tp_perf_counter_destroy;
3579 return &perf_ops_generic;
3582 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3588 atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
3590 static void sw_perf_counter_destroy(struct perf_counter *counter)
3592 u64 event = counter->attr.config;
3594 WARN_ON(counter->parent);
3596 atomic_dec(&perf_swcounter_enabled[event]);
3599 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3601 const struct pmu *pmu = NULL;
3602 u64 event = counter->attr.config;
3605 * Software counters (currently) can't in general distinguish
3606 * between user, kernel and hypervisor events.
3607 * However, context switches and cpu migrations are considered
3608 * to be kernel events, and page faults are never hypervisor
3612 case PERF_COUNT_SW_CPU_CLOCK:
3613 pmu = &perf_ops_cpu_clock;
3616 case PERF_COUNT_SW_TASK_CLOCK:
3618 * If the user instantiates this as a per-cpu counter,
3619 * use the cpu_clock counter instead.
3621 if (counter->ctx->task)
3622 pmu = &perf_ops_task_clock;
3624 pmu = &perf_ops_cpu_clock;
3627 case PERF_COUNT_SW_PAGE_FAULTS:
3628 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
3629 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
3630 case PERF_COUNT_SW_CONTEXT_SWITCHES:
3631 case PERF_COUNT_SW_CPU_MIGRATIONS:
3632 if (!counter->parent) {
3633 atomic_inc(&perf_swcounter_enabled[event]);
3634 counter->destroy = sw_perf_counter_destroy;
3636 pmu = &perf_ops_generic;
3644 * Allocate and initialize a counter structure
3646 static struct perf_counter *
3647 perf_counter_alloc(struct perf_counter_attr *attr,
3649 struct perf_counter_context *ctx,
3650 struct perf_counter *group_leader,
3651 struct perf_counter *parent_counter,
3654 const struct pmu *pmu;
3655 struct perf_counter *counter;
3656 struct hw_perf_counter *hwc;
3659 counter = kzalloc(sizeof(*counter), gfpflags);
3661 return ERR_PTR(-ENOMEM);
3664 * Single counters are their own group leaders, with an
3665 * empty sibling list:
3668 group_leader = counter;
3670 mutex_init(&counter->child_mutex);
3671 INIT_LIST_HEAD(&counter->child_list);
3673 INIT_LIST_HEAD(&counter->list_entry);
3674 INIT_LIST_HEAD(&counter->event_entry);
3675 INIT_LIST_HEAD(&counter->sibling_list);
3676 init_waitqueue_head(&counter->waitq);
3678 mutex_init(&counter->mmap_mutex);
3681 counter->attr = *attr;
3682 counter->group_leader = group_leader;
3683 counter->pmu = NULL;
3685 counter->oncpu = -1;
3687 counter->parent = parent_counter;
3689 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3690 counter->id = atomic64_inc_return(&perf_counter_id);
3692 counter->state = PERF_COUNTER_STATE_INACTIVE;
3695 counter->state = PERF_COUNTER_STATE_OFF;
3700 hwc->sample_period = attr->sample_period;
3701 if (attr->freq && attr->sample_freq)
3702 hwc->sample_period = 1;
3704 atomic64_set(&hwc->period_left, hwc->sample_period);
3707 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3709 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3712 switch (attr->type) {
3714 case PERF_TYPE_HARDWARE:
3715 case PERF_TYPE_HW_CACHE:
3716 pmu = hw_perf_counter_init(counter);
3719 case PERF_TYPE_SOFTWARE:
3720 pmu = sw_perf_counter_init(counter);
3723 case PERF_TYPE_TRACEPOINT:
3724 pmu = tp_perf_counter_init(counter);
3734 else if (IS_ERR(pmu))
3739 put_pid_ns(counter->ns);
3741 return ERR_PTR(err);
3746 if (!counter->parent) {
3747 atomic_inc(&nr_counters);
3748 if (counter->attr.mmap)
3749 atomic_inc(&nr_mmap_counters);
3750 if (counter->attr.comm)
3751 atomic_inc(&nr_comm_counters);
3757 static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3758 struct perf_counter_attr *attr)
3763 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3767 * zero the full structure, so that a short copy will be nice.
3769 memset(attr, 0, sizeof(*attr));
3771 ret = get_user(size, &uattr->size);
3775 if (size > PAGE_SIZE) /* silly large */
3778 if (!size) /* abi compat */
3779 size = PERF_ATTR_SIZE_VER0;
3781 if (size < PERF_ATTR_SIZE_VER0)
3785 * If we're handed a bigger struct than we know of,
3786 * ensure all the unknown bits are 0.
3788 if (size > sizeof(*attr)) {
3790 unsigned long __user *addr;
3791 unsigned long __user *end;
3793 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3794 sizeof(unsigned long));
3795 end = PTR_ALIGN((void __user *)uattr + size,
3796 sizeof(unsigned long));
3798 for (; addr < end; addr += sizeof(unsigned long)) {
3799 ret = get_user(val, addr);
3807 ret = copy_from_user(attr, uattr, size);
3812 * If the type exists, the corresponding creation will verify
3815 if (attr->type >= PERF_TYPE_MAX)
3818 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3821 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3824 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3831 put_user(sizeof(*attr), &uattr->size);
3837 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3839 * @attr_uptr: event type attributes for monitoring/sampling
3842 * @group_fd: group leader counter fd
3844 SYSCALL_DEFINE5(perf_counter_open,
3845 struct perf_counter_attr __user *, attr_uptr,
3846 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3848 struct perf_counter *counter, *group_leader;
3849 struct perf_counter_attr attr;
3850 struct perf_counter_context *ctx;
3851 struct file *counter_file = NULL;
3852 struct file *group_file = NULL;
3853 int fput_needed = 0;
3854 int fput_needed2 = 0;
3857 /* for future expandability... */
3861 ret = perf_copy_attr(attr_uptr, &attr);
3865 if (!attr.exclude_kernel) {
3866 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
3871 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
3876 * Get the target context (task or percpu):
3878 ctx = find_get_context(pid, cpu);
3880 return PTR_ERR(ctx);
3883 * Look up the group leader (we will attach this counter to it):
3885 group_leader = NULL;
3886 if (group_fd != -1) {
3888 group_file = fget_light(group_fd, &fput_needed);
3890 goto err_put_context;
3891 if (group_file->f_op != &perf_fops)
3892 goto err_put_context;
3894 group_leader = group_file->private_data;
3896 * Do not allow a recursive hierarchy (this new sibling
3897 * becoming part of another group-sibling):
3899 if (group_leader->group_leader != group_leader)
3900 goto err_put_context;
3902 * Do not allow to attach to a group in a different
3903 * task or CPU context:
3905 if (group_leader->ctx != ctx)
3906 goto err_put_context;
3908 * Only a group leader can be exclusive or pinned
3910 if (attr.exclusive || attr.pinned)
3911 goto err_put_context;
3914 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3916 ret = PTR_ERR(counter);
3917 if (IS_ERR(counter))
3918 goto err_put_context;
3920 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3922 goto err_free_put_context;
3924 counter_file = fget_light(ret, &fput_needed2);
3926 goto err_free_put_context;
3928 counter->filp = counter_file;
3929 WARN_ON_ONCE(ctx->parent_ctx);
3930 mutex_lock(&ctx->mutex);
3931 perf_install_in_context(ctx, counter, cpu);
3933 mutex_unlock(&ctx->mutex);
3935 counter->owner = current;
3936 get_task_struct(current);
3937 mutex_lock(¤t->perf_counter_mutex);
3938 list_add_tail(&counter->owner_entry, ¤t->perf_counter_list);
3939 mutex_unlock(¤t->perf_counter_mutex);
3941 fput_light(counter_file, fput_needed2);
3944 fput_light(group_file, fput_needed);
3948 err_free_put_context:
3958 * inherit a counter from parent task to child task:
3960 static struct perf_counter *
3961 inherit_counter(struct perf_counter *parent_counter,
3962 struct task_struct *parent,
3963 struct perf_counter_context *parent_ctx,
3964 struct task_struct *child,
3965 struct perf_counter *group_leader,
3966 struct perf_counter_context *child_ctx)
3968 struct perf_counter *child_counter;
3971 * Instead of creating recursive hierarchies of counters,
3972 * we link inherited counters back to the original parent,
3973 * which has a filp for sure, which we use as the reference
3976 if (parent_counter->parent)
3977 parent_counter = parent_counter->parent;
3979 child_counter = perf_counter_alloc(&parent_counter->attr,
3980 parent_counter->cpu, child_ctx,
3981 group_leader, parent_counter,
3983 if (IS_ERR(child_counter))
3984 return child_counter;
3988 * Make the child state follow the state of the parent counter,
3989 * not its attr.disabled bit. We hold the parent's mutex,
3990 * so we won't race with perf_counter_{en, dis}able_family.
3992 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3993 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3995 child_counter->state = PERF_COUNTER_STATE_OFF;
3997 if (parent_counter->attr.freq)
3998 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4001 * Link it up in the child's context:
4003 add_counter_to_ctx(child_counter, child_ctx);
4006 * Get a reference to the parent filp - we will fput it
4007 * when the child counter exits. This is safe to do because
4008 * we are in the parent and we know that the filp still
4009 * exists and has a nonzero count:
4011 atomic_long_inc(&parent_counter->filp->f_count);
4014 * Link this into the parent counter's child list
4016 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4017 mutex_lock(&parent_counter->child_mutex);
4018 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
4019 mutex_unlock(&parent_counter->child_mutex);
4021 return child_counter;
4024 static int inherit_group(struct perf_counter *parent_counter,
4025 struct task_struct *parent,
4026 struct perf_counter_context *parent_ctx,
4027 struct task_struct *child,
4028 struct perf_counter_context *child_ctx)
4030 struct perf_counter *leader;
4031 struct perf_counter *sub;
4032 struct perf_counter *child_ctr;
4034 leader = inherit_counter(parent_counter, parent, parent_ctx,
4035 child, NULL, child_ctx);
4037 return PTR_ERR(leader);
4038 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
4039 child_ctr = inherit_counter(sub, parent, parent_ctx,
4040 child, leader, child_ctx);
4041 if (IS_ERR(child_ctr))
4042 return PTR_ERR(child_ctr);
4047 static void sync_child_counter(struct perf_counter *child_counter,
4048 struct task_struct *child)
4050 struct perf_counter *parent_counter = child_counter->parent;
4053 perf_counter_read_event(child_counter, child);
4055 child_val = atomic64_read(&child_counter->count);
4058 * Add back the child's count to the parent's count:
4060 atomic64_add(child_val, &parent_counter->count);
4061 atomic64_add(child_counter->total_time_enabled,
4062 &parent_counter->child_total_time_enabled);
4063 atomic64_add(child_counter->total_time_running,
4064 &parent_counter->child_total_time_running);
4067 * Remove this counter from the parent's list
4069 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4070 mutex_lock(&parent_counter->child_mutex);
4071 list_del_init(&child_counter->child_list);
4072 mutex_unlock(&parent_counter->child_mutex);
4075 * Release the parent counter, if this was the last
4078 fput(parent_counter->filp);
4082 __perf_counter_exit_task(struct perf_counter *child_counter,
4083 struct perf_counter_context *child_ctx,
4084 struct task_struct *child)
4086 struct perf_counter *parent_counter;
4088 update_counter_times(child_counter);
4089 perf_counter_remove_from_context(child_counter);
4091 parent_counter = child_counter->parent;
4093 * It can happen that parent exits first, and has counters
4094 * that are still around due to the child reference. These
4095 * counters need to be zapped - but otherwise linger.
4097 if (parent_counter) {
4098 sync_child_counter(child_counter, child);
4099 free_counter(child_counter);
4104 * When a child task exits, feed back counter values to parent counters.
4106 void perf_counter_exit_task(struct task_struct *child)
4108 struct perf_counter *child_counter, *tmp;
4109 struct perf_counter_context *child_ctx;
4110 unsigned long flags;
4112 if (likely(!child->perf_counter_ctxp))
4115 local_irq_save(flags);
4117 * We can't reschedule here because interrupts are disabled,
4118 * and either child is current or it is a task that can't be
4119 * scheduled, so we are now safe from rescheduling changing
4122 child_ctx = child->perf_counter_ctxp;
4123 __perf_counter_task_sched_out(child_ctx);
4126 * Take the context lock here so that if find_get_context is
4127 * reading child->perf_counter_ctxp, we wait until it has
4128 * incremented the context's refcount before we do put_ctx below.
4130 spin_lock(&child_ctx->lock);
4131 child->perf_counter_ctxp = NULL;
4132 if (child_ctx->parent_ctx) {
4134 * This context is a clone; unclone it so it can't get
4135 * swapped to another process while we're removing all
4136 * the counters from it.
4138 put_ctx(child_ctx->parent_ctx);
4139 child_ctx->parent_ctx = NULL;
4141 spin_unlock(&child_ctx->lock);
4142 local_irq_restore(flags);
4145 * We can recurse on the same lock type through:
4147 * __perf_counter_exit_task()
4148 * sync_child_counter()
4149 * fput(parent_counter->filp)
4151 * mutex_lock(&ctx->mutex)
4153 * But since its the parent context it won't be the same instance.
4155 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4158 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4160 __perf_counter_exit_task(child_counter, child_ctx, child);
4163 * If the last counter was a group counter, it will have appended all
4164 * its siblings to the list, but we obtained 'tmp' before that which
4165 * will still point to the list head terminating the iteration.
4167 if (!list_empty(&child_ctx->counter_list))
4170 mutex_unlock(&child_ctx->mutex);
4176 * free an unexposed, unused context as created by inheritance by
4177 * init_task below, used by fork() in case of fail.
4179 void perf_counter_free_task(struct task_struct *task)
4181 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4182 struct perf_counter *counter, *tmp;
4187 mutex_lock(&ctx->mutex);
4189 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4190 struct perf_counter *parent = counter->parent;
4192 if (WARN_ON_ONCE(!parent))
4195 mutex_lock(&parent->child_mutex);
4196 list_del_init(&counter->child_list);
4197 mutex_unlock(&parent->child_mutex);
4201 list_del_counter(counter, ctx);
4202 free_counter(counter);
4205 if (!list_empty(&ctx->counter_list))
4208 mutex_unlock(&ctx->mutex);
4214 * Initialize the perf_counter context in task_struct
4216 int perf_counter_init_task(struct task_struct *child)
4218 struct perf_counter_context *child_ctx, *parent_ctx;
4219 struct perf_counter_context *cloned_ctx;
4220 struct perf_counter *counter;
4221 struct task_struct *parent = current;
4222 int inherited_all = 1;
4225 child->perf_counter_ctxp = NULL;
4227 mutex_init(&child->perf_counter_mutex);
4228 INIT_LIST_HEAD(&child->perf_counter_list);
4230 if (likely(!parent->perf_counter_ctxp))
4234 * This is executed from the parent task context, so inherit
4235 * counters that have been marked for cloning.
4236 * First allocate and initialize a context for the child.
4239 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4243 __perf_counter_init_context(child_ctx, child);
4244 child->perf_counter_ctxp = child_ctx;
4245 get_task_struct(child);
4248 * If the parent's context is a clone, pin it so it won't get
4251 parent_ctx = perf_pin_task_context(parent);
4254 * No need to check if parent_ctx != NULL here; since we saw
4255 * it non-NULL earlier, the only reason for it to become NULL
4256 * is if we exit, and since we're currently in the middle of
4257 * a fork we can't be exiting at the same time.
4261 * Lock the parent list. No need to lock the child - not PID
4262 * hashed yet and not running, so nobody can access it.
4264 mutex_lock(&parent_ctx->mutex);
4267 * We dont have to disable NMIs - we are only looking at
4268 * the list, not manipulating it:
4270 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4271 if (counter != counter->group_leader)
4274 if (!counter->attr.inherit) {
4279 ret = inherit_group(counter, parent, parent_ctx,
4287 if (inherited_all) {
4289 * Mark the child context as a clone of the parent
4290 * context, or of whatever the parent is a clone of.
4291 * Note that if the parent is a clone, it could get
4292 * uncloned at any point, but that doesn't matter
4293 * because the list of counters and the generation
4294 * count can't have changed since we took the mutex.
4296 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4298 child_ctx->parent_ctx = cloned_ctx;
4299 child_ctx->parent_gen = parent_ctx->parent_gen;
4301 child_ctx->parent_ctx = parent_ctx;
4302 child_ctx->parent_gen = parent_ctx->generation;
4304 get_ctx(child_ctx->parent_ctx);
4307 mutex_unlock(&parent_ctx->mutex);
4309 perf_unpin_context(parent_ctx);
4314 static void __cpuinit perf_counter_init_cpu(int cpu)
4316 struct perf_cpu_context *cpuctx;
4318 cpuctx = &per_cpu(perf_cpu_context, cpu);
4319 __perf_counter_init_context(&cpuctx->ctx, NULL);
4321 spin_lock(&perf_resource_lock);
4322 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4323 spin_unlock(&perf_resource_lock);
4325 hw_perf_counter_setup(cpu);
4328 #ifdef CONFIG_HOTPLUG_CPU
4329 static void __perf_counter_exit_cpu(void *info)
4331 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4332 struct perf_counter_context *ctx = &cpuctx->ctx;
4333 struct perf_counter *counter, *tmp;
4335 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4336 __perf_counter_remove_from_context(counter);
4338 static void perf_counter_exit_cpu(int cpu)
4340 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4341 struct perf_counter_context *ctx = &cpuctx->ctx;
4343 mutex_lock(&ctx->mutex);
4344 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4345 mutex_unlock(&ctx->mutex);
4348 static inline void perf_counter_exit_cpu(int cpu) { }
4351 static int __cpuinit
4352 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4354 unsigned int cpu = (long)hcpu;
4358 case CPU_UP_PREPARE:
4359 case CPU_UP_PREPARE_FROZEN:
4360 perf_counter_init_cpu(cpu);
4363 case CPU_DOWN_PREPARE:
4364 case CPU_DOWN_PREPARE_FROZEN:
4365 perf_counter_exit_cpu(cpu);
4376 * This has to have a higher priority than migration_notifier in sched.c.
4378 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4379 .notifier_call = perf_cpu_notify,
4383 void __init perf_counter_init(void)
4385 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4386 (void *)(long)smp_processor_id());
4387 register_cpu_notifier(&perf_cpu_nb);
4390 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4392 return sprintf(buf, "%d\n", perf_reserved_percpu);
4396 perf_set_reserve_percpu(struct sysdev_class *class,
4400 struct perf_cpu_context *cpuctx;
4404 err = strict_strtoul(buf, 10, &val);
4407 if (val > perf_max_counters)
4410 spin_lock(&perf_resource_lock);
4411 perf_reserved_percpu = val;
4412 for_each_online_cpu(cpu) {
4413 cpuctx = &per_cpu(perf_cpu_context, cpu);
4414 spin_lock_irq(&cpuctx->ctx.lock);
4415 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4416 perf_max_counters - perf_reserved_percpu);
4417 cpuctx->max_pertask = mpt;
4418 spin_unlock_irq(&cpuctx->ctx.lock);
4420 spin_unlock(&perf_resource_lock);
4425 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4427 return sprintf(buf, "%d\n", perf_overcommit);
4431 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4436 err = strict_strtoul(buf, 10, &val);
4442 spin_lock(&perf_resource_lock);
4443 perf_overcommit = val;
4444 spin_unlock(&perf_resource_lock);
4449 static SYSDEV_CLASS_ATTR(
4452 perf_show_reserve_percpu,
4453 perf_set_reserve_percpu
4456 static SYSDEV_CLASS_ATTR(
4459 perf_show_overcommit,
4463 static struct attribute *perfclass_attrs[] = {
4464 &attr_reserve_percpu.attr,
4465 &attr_overcommit.attr,
4469 static struct attribute_group perfclass_attr_group = {
4470 .attrs = perfclass_attrs,
4471 .name = "perf_counters",
4474 static int __init perf_counter_sysfs_init(void)
4476 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4477 &perfclass_attr_group);
4479 device_initcall(perf_counter_sysfs_init);