From: Albin Tonnerre Date: Wed, 29 Jan 2014 14:28:57 +0000 (+0000) Subject: ARM: perf: add support for the Cortex-A12 PMU X-Git-Tag: firefly_0821_release~5434 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=8d9206dbd46531d083d1c6c63457d6bbe7be2ec3;p=firefly-linux-kernel-4.4.55.git ARM: perf: add support for the Cortex-A12 PMU Cortex-A12 implements Performance Monitors compliant with the PMUv2 architecture. This patch adds support for the Cortex-A12 PMU to the ARM perf backend. Signed-off-by: Albin Tonnerre Signed-off-by: Will Deacon Conflicts: arch/arm/kernel/perf_event_v7.c --- diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 4ce82d045a6b..27e506e1cd12 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -8,6 +8,7 @@ Required properties: - compatible : should be one of "arm,cortex-a15-pmu" + "arm,cortex-a12-pmu" "arm,cortex-a9-pmu" "arm,cortex-a8-pmu" "arm,cortex-a7-pmu" diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 0b48a38e3cf4..00f6337f5c79 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -213,6 +213,7 @@ static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = { */ static struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, + {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 654db5030c31..721de731fcbc 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -109,6 +109,19 @@ enum armv7_a15_perf_types { ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76, }; +/* ARMv7 Cortex-A12 specific event types */ +enum armv7_a12_perf_types { + ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40, + ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41, + + ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50, + ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51, + + ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76, + + ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7, +}; + /* * Cortex-A8 HW events mapping * @@ -731,6 +744,130 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }; +/* + * Cortex-A12 HW events mapping + */ +static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + /* + * Not all performance counters differentiate between read + * and write accesses/misses so we're not always strictly + * correct, but it's the best we can do. Writes and reads get + * combined in these cases. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + /* * Perf Events' indices */ @@ -1257,6 +1394,12 @@ static int armv7_a7_map_event(struct perf_event *event) &armv7_a7_perf_cache_map, 0xFF); } +static int armv7_a12_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv7_a12_perf_map, + &armv7_a12_perf_cache_map, 0xFF); +} + static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; @@ -1330,6 +1473,16 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->set_event_filter = armv7pmu_set_event_filter; return 0; } + +static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A12"; + cpu_pmu->map_event = armv7_a12_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->set_event_filter = armv7pmu_set_event_filter; + return 0; +} #else static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { @@ -1355,4 +1508,9 @@ static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { return -ENODEV; } + +static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} #endif /* CONFIG_CPU_V7 */