perf_counter: Rename enums
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 11 Jun 2009 11:19:29 +0000 (13:19 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 11 Jun 2009 15:53:41 +0000 (17:53 +0200)
Rename the perf enums to be in the 'perf_' namespace and strictly
enumerate the ABI bits.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
kernel/perf_counter.c

index 95c797c480e8dc582ea417233b0ac11a6e73bf5a..d5911b02bc8c36430db4927798afbe73388d9fe1 100644 (file)
 /*
  * attr.type
  */
-enum perf_event_types {
+enum perf_type_id {
        PERF_TYPE_HARDWARE              = 0,
        PERF_TYPE_SOFTWARE              = 1,
        PERF_TYPE_TRACEPOINT            = 2,
        PERF_TYPE_HW_CACHE              = 3,
+       PERF_TYPE_RAW                   = 4,
 
-       /*
-        * available TYPE space, raw is the max value.
-        */
-
-       PERF_TYPE_RAW                   = 128,
+       PERF_TYPE_MAX,                  /* non ABI */
 };
 
 /*
  * Generalized performance counter event types, used by the attr.event_id
  * parameter of the sys_perf_counter_open() syscall:
  */
-enum attr_ids {
+enum perf_hw_id {
        /*
         * Common hardware events, generalized by the kernel:
         */
@@ -53,7 +50,7 @@ enum attr_ids {
        PERF_COUNT_BRANCH_MISSES        = 5,
        PERF_COUNT_BUS_CYCLES           = 6,
 
-       PERF_HW_EVENTS_MAX              = 7,
+       PERF_HW_EVENTS_MAX,             /* non ABI */
 };
 
 /*
@@ -63,30 +60,30 @@ enum attr_ids {
  *       { read, write, prefetch } x
  *       { accesses, misses }
  */
-enum hw_cache_id {
-       PERF_COUNT_HW_CACHE_L1D,
-       PERF_COUNT_HW_CACHE_L1I,
-       PERF_COUNT_HW_CACHE_L2,
-       PERF_COUNT_HW_CACHE_DTLB,
-       PERF_COUNT_HW_CACHE_ITLB,
-       PERF_COUNT_HW_CACHE_BPU,
-
-       PERF_COUNT_HW_CACHE_MAX,
+enum perf_hw_cache_id {
+       PERF_COUNT_HW_CACHE_L1D         = 0,
+       PERF_COUNT_HW_CACHE_L1I         = 1,
+       PERF_COUNT_HW_CACHE_L2          = 2,
+       PERF_COUNT_HW_CACHE_DTLB        = 3,
+       PERF_COUNT_HW_CACHE_ITLB        = 4,
+       PERF_COUNT_HW_CACHE_BPU         = 5,
+
+       PERF_COUNT_HW_CACHE_MAX,        /* non ABI */
 };
 
-enum hw_cache_op_id {
-       PERF_COUNT_HW_CACHE_OP_READ,
-       PERF_COUNT_HW_CACHE_OP_WRITE,
-       PERF_COUNT_HW_CACHE_OP_PREFETCH,
+enum perf_hw_cache_op_id {
+       PERF_COUNT_HW_CACHE_OP_READ     = 0,
+       PERF_COUNT_HW_CACHE_OP_WRITE    = 1,
+       PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
 
-       PERF_COUNT_HW_CACHE_OP_MAX,
+       PERF_COUNT_HW_CACHE_OP_MAX,     /* non ABI */
 };
 
-enum hw_cache_op_result_id {
-       PERF_COUNT_HW_CACHE_RESULT_ACCESS,
-       PERF_COUNT_HW_CACHE_RESULT_MISS,
+enum perf_hw_cache_op_result_id {
+       PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
+       PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
 
-       PERF_COUNT_HW_CACHE_RESULT_MAX,
+       PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non ABI */
 };
 
 /*
@@ -95,7 +92,7 @@ enum hw_cache_op_result_id {
  * physical and sw events of the kernel (and allow the profiling of them as
  * well):
  */
-enum sw_event_ids {
+enum perf_sw_ids {
        PERF_COUNT_CPU_CLOCK            = 0,
        PERF_COUNT_TASK_CLOCK           = 1,
        PERF_COUNT_PAGE_FAULTS          = 2,
@@ -104,7 +101,7 @@ enum sw_event_ids {
        PERF_COUNT_PAGE_FAULTS_MIN      = 5,
        PERF_COUNT_PAGE_FAULTS_MAJ      = 6,
 
-       PERF_SW_EVENTS_MAX              = 7,
+       PERF_SW_EVENTS_MAX,             /* non ABI */
 };
 
 /*
index 3b2829de5590e8efefac9486340a43d9a64d21b9..c02535bed26ff70d7b24d4010b4c9ec4c094dde8 100644 (file)
@@ -3162,7 +3162,7 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
 }
 
 static int perf_swcounter_match(struct perf_counter *counter,
-                               enum perf_event_types type,
+                               enum perf_type_id type,
                                u32 event, struct pt_regs *regs)
 {
        if (!perf_swcounter_is_counting(counter))
@@ -3194,7 +3194,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
 }
 
 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
-                                    enum perf_event_types type, u32 event,
+                                    enum perf_type_id type, u32 event,
                                     u64 nr, int nmi, struct pt_regs *regs,
                                     u64 addr)
 {
@@ -3225,7 +3225,7 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
        return &cpuctx->recursion[0];
 }
 
-static void __perf_swcounter_event(enum perf_event_types type, u32 event,
+static void __perf_swcounter_event(enum perf_type_id type, u32 event,
                                   u64 nr, int nmi, struct pt_regs *regs,
                                   u64 addr)
 {