perf: Introduce perf_pmu_migrate_context()
authorYan, Zheng <zheng.z.yan@intel.com>
Fri, 15 Jun 2012 06:31:33 +0000 (14:31 +0800)
committerIngo Molnar <mingo@kernel.org>
Mon, 18 Jun 2012 10:13:21 +0000 (12:13 +0200)
Originally from Peter Zijlstra. The helper migrates perf events
from one cpu to another cpu.

Signed-off-by: Zheng Yan <zheng.z.yan@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1339741902-8449-5-git-send-email-zheng.z.yan@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/perf_event.h
kernel/events/core.c

index 1ce887abcc5cdff8c8f85080a35c6b2ffbf4831c..76c5c8b724a77253e3ca635c90e53ed6230b3c5f 100644 (file)
@@ -1107,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                struct task_struct *task,
                                perf_overflow_handler_t callback,
                                void *context);
+extern void perf_pmu_migrate_context(struct pmu *pmu,
+                               int src_cpu, int dst_cpu);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
 
index fa36a39e8bb755b8d2dc6fd3a85bd9fd26ea0652..f1cf0edeb39afa1a3f6543a2f383ac8c4828127b 100644 (file)
@@ -1645,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
+       if (event->cpu != -1)
+               event->cpu = cpu;
 
        if (!task) {
                /*
@@ -6379,6 +6381,7 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_lock(&ctx->mutex);
 
        if (move_group) {
+               synchronize_rcu();
                perf_install_in_context(ctx, group_leader, event->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -6484,6 +6487,39 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
+void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+{
+       struct perf_event_context *src_ctx;
+       struct perf_event_context *dst_ctx;
+       struct perf_event *event, *tmp;
+       LIST_HEAD(events);
+
+       src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
+       dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
+
+       mutex_lock(&src_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+                                event_entry) {
+               perf_remove_from_context(event);
+               put_ctx(src_ctx);
+               list_add(&event->event_entry, &events);
+       }
+       mutex_unlock(&src_ctx->mutex);
+
+       synchronize_rcu();
+
+       mutex_lock(&dst_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &events, event_entry) {
+               list_del(&event->event_entry);
+               if (event->state >= PERF_EVENT_STATE_OFF)
+                       event->state = PERF_EVENT_STATE_INACTIVE;
+               perf_install_in_context(dst_ctx, event, dst_cpu);
+               get_ctx(dst_ctx);
+       }
+       mutex_unlock(&dst_ctx->mutex);
+}
+EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+
 static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
 {