2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/of_address.h>
22 #include <linux/slab.h>
24 #include <asm/cacheflush.h>
25 #include <asm/irq_regs.h>
27 #include <asm/smp_plat.h>
29 #define DRIVER_NAME "CCI"
31 #define CCI_PORT_CTRL 0x0
32 #define CCI_CTRL_STATUS 0xc
34 #define CCI_ENABLE_SNOOP_REQ 0x1
35 #define CCI_ENABLE_DVM_REQ 0x2
36 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
40 unsigned int nb_ace_lite;
43 enum cci_ace_port_type {
44 ACE_INVALID_PORT = 0x0,
52 enum cci_ace_port_type type;
53 struct device_node *dn;
56 static struct cci_ace_port *ports;
57 static unsigned int nb_cci_ports;
59 static void __iomem *cci_ctrl_base;
60 static unsigned long cci_ctrl_phys;
62 #ifdef CONFIG_HW_PERF_EVENTS
64 static void __iomem *cci_pmu_base;
66 #define CCI400_PMCR 0x0100
68 #define CCI400_PMU_CYCLE_CNTR_BASE 0x0000
69 #define CCI400_PMU_CNTR_BASE(idx) (CCI400_PMU_CYCLE_CNTR_BASE + (idx) * 0x1000)
71 #define CCI400_PMCR_CEN 0x00000001
72 #define CCI400_PMCR_RST 0x00000002
73 #define CCI400_PMCR_CCR 0x00000004
74 #define CCI400_PMCR_CCD 0x00000008
75 #define CCI400_PMCR_EX 0x00000010
76 #define CCI400_PMCR_DP 0x00000020
77 #define CCI400_PMCR_NCNT_MASK 0x0000F800
78 #define CCI400_PMCR_NCNT_SHIFT 11
80 #define CCI400_PMU_EVT_SEL 0x000
81 #define CCI400_PMU_CNTR 0x004
82 #define CCI400_PMU_CNTR_CTRL 0x008
83 #define CCI400_PMU_OVERFLOW 0x00C
85 #define CCI400_PMU_OVERFLOW_FLAG 1
87 enum cci400_perf_events {
88 CCI400_PMU_CYCLES = 0xFF
91 #define CCI400_PMU_EVENT_MASK 0xff
92 #define CCI400_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
93 #define CCI400_PMU_EVENT_CODE(event) (event & 0x1f)
95 #define CCI400_PMU_EVENT_SOURCE_S0 0
96 #define CCI400_PMU_EVENT_SOURCE_S4 4
97 #define CCI400_PMU_EVENT_SOURCE_M0 5
98 #define CCI400_PMU_EVENT_SOURCE_M2 7
100 #define CCI400_PMU_EVENT_SLAVE_MIN 0x0
101 #define CCI400_PMU_EVENT_SLAVE_MAX 0x13
103 #define CCI400_PMU_EVENT_MASTER_MIN 0x14
104 #define CCI400_PMU_EVENT_MASTER_MAX 0x1A
106 #define CCI400_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
108 #define CCI400_PMU_CYCLE_COUNTER_IDX 0
109 #define CCI400_PMU_COUNTER0_IDX 1
110 #define CCI400_PMU_COUNTER_LAST(cci_pmu) (CCI400_PMU_CYCLE_COUNTER_IDX + cci_pmu->num_events - 1)
113 static struct perf_event *events[CCI400_PMU_MAX_HW_EVENTS];
114 static unsigned long used_mask[BITS_TO_LONGS(CCI400_PMU_MAX_HW_EVENTS)];
115 static struct pmu_hw_events cci_hw_events = {
117 .used_mask = used_mask,
120 static int cci_pmu_validate_hw_event(u8 hw_event)
122 u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
123 u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
125 if (ev_source <= CCI400_PMU_EVENT_SOURCE_S4 &&
126 ev_code <= CCI400_PMU_EVENT_SLAVE_MAX)
128 else if (CCI400_PMU_EVENT_SOURCE_M0 <= ev_source &&
129 ev_source <= CCI400_PMU_EVENT_SOURCE_M2 &&
130 CCI400_PMU_EVENT_MASTER_MIN <= ev_code &&
131 ev_code <= CCI400_PMU_EVENT_MASTER_MAX)
137 static inline int cci_pmu_counter_is_valid(struct arm_pmu *cci_pmu, int idx)
139 return CCI400_PMU_CYCLE_COUNTER_IDX <= idx &&
140 idx <= CCI400_PMU_COUNTER_LAST(cci_pmu);
143 static inline u32 cci_pmu_read_register(int idx, unsigned int offset)
145 return readl_relaxed(cci_pmu_base + CCI400_PMU_CNTR_BASE(idx) + offset);
148 static inline void cci_pmu_write_register(u32 value, int idx, unsigned int offset)
150 return writel_relaxed(value, cci_pmu_base + CCI400_PMU_CNTR_BASE(idx) + offset);
153 static inline void cci_pmu_disable_counter(int idx)
155 cci_pmu_write_register(0, idx, CCI400_PMU_CNTR_CTRL);
158 static inline void cci_pmu_enable_counter(int idx)
160 cci_pmu_write_register(1, idx, CCI400_PMU_CNTR_CTRL);
163 static inline void cci_pmu_select_event(int idx, unsigned long event)
165 event &= CCI400_PMU_EVENT_MASK;
166 cci_pmu_write_register(event, idx, CCI400_PMU_EVT_SEL);
169 static u32 cci_pmu_get_max_counters(void)
171 u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI400_PMCR) &
172 CCI400_PMCR_NCNT_MASK) >> CCI400_PMCR_NCNT_SHIFT;
174 /* add 1 for cycle counter */
178 static struct pmu_hw_events *cci_pmu_get_hw_events(void)
180 return &cci_hw_events;
183 static int cci_pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
185 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
186 struct hw_perf_event *hw_event = &event->hw;
187 unsigned long cci_event = hw_event->config_base & CCI400_PMU_EVENT_MASK;
190 if (cci_event == CCI400_PMU_CYCLES) {
191 if (test_and_set_bit(CCI400_PMU_CYCLE_COUNTER_IDX, hw->used_mask))
194 return CCI400_PMU_CYCLE_COUNTER_IDX;
197 for (idx = CCI400_PMU_COUNTER0_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); ++idx) {
198 if (!test_and_set_bit(idx, hw->used_mask))
202 /* No counters available */
206 static int cci_pmu_map_event(struct perf_event *event)
209 u8 config = event->attr.config & CCI400_PMU_EVENT_MASK;
211 if (event->attr.type < PERF_TYPE_MAX)
214 /* 0xff is used to represent CCI Cycles */
218 mapping = cci_pmu_validate_hw_event(config);
223 static int cci_pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
226 struct platform_device *pmu_device = cci_pmu->plat_device;
228 if (unlikely(!pmu_device))
231 /* CCI exports 6 interrupts - 1 nERRORIRQ + 5 nEVNTCNTOVERFLOW (PMU)
232 nERRORIRQ will be handled by secure firmware on TC2. So we
233 assume that all CCI interrupts listed in the linux device
234 tree are PMU interrupts.
236 The following code should then be able to handle different routing
237 of the CCI PMU interrupts.
239 while ((irq = platform_get_irq(pmu_device, i)) > 0) {
240 err = request_irq(irq, handler, 0, "arm-cci-pmu", cci_pmu);
242 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
252 static irqreturn_t cci_pmu_handle_irq(int irq_num, void *dev)
254 struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
255 struct pmu_hw_events *events = cci_pmu->get_hw_events();
256 struct perf_sample_data data;
257 struct pt_regs *regs;
260 regs = get_irq_regs();
262 /* Iterate over counters and update the corresponding perf events.
263 This should work regardless of whether we have per-counter overflow
264 interrupt or a combined overflow interrupt. */
265 for (idx = CCI400_PMU_CYCLE_COUNTER_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); idx++) {
266 struct perf_event *event = events->events[idx];
267 struct hw_perf_event *hw_counter;
272 hw_counter = &event->hw;
274 /* Did this counter overflow? */
275 if (!(cci_pmu_read_register(idx, CCI400_PMU_OVERFLOW) & CCI400_PMU_OVERFLOW_FLAG))
277 cci_pmu_write_register(CCI400_PMU_OVERFLOW_FLAG, idx, CCI400_PMU_OVERFLOW);
279 armpmu_event_update(event);
280 perf_sample_data_init(&data, 0, hw_counter->last_period);
281 if (!armpmu_event_set_period(event))
284 if (perf_event_overflow(event, &data, regs))
285 cci_pmu->disable(event);
292 static void cci_pmu_free_irq(struct arm_pmu *cci_pmu)
295 struct platform_device *pmu_device = cci_pmu->plat_device;
297 while ((irq = platform_get_irq(pmu_device, i)) > 0) {
298 free_irq(irq, cci_pmu);
303 static void cci_pmu_enable_event(struct perf_event *event)
306 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
307 struct pmu_hw_events *events = cci_pmu->get_hw_events();
308 struct hw_perf_event *hw_counter = &event->hw;
309 int idx = hw_counter->idx;
311 if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
312 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
316 raw_spin_lock_irqsave(&events->pmu_lock, flags);
318 /* Configure the event to count, unless you are counting cycles */
319 if (idx != CCI400_PMU_CYCLE_COUNTER_IDX)
320 cci_pmu_select_event(idx, hw_counter->config_base);
322 cci_pmu_enable_counter(idx);
324 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
327 static void cci_pmu_disable_event(struct perf_event *event)
330 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
331 struct pmu_hw_events *events = cci_pmu->get_hw_events();
332 struct hw_perf_event *hw_counter = &event->hw;
333 int idx = hw_counter->idx;
335 if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
336 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
340 raw_spin_lock_irqsave(&events->pmu_lock, flags);
342 cci_pmu_disable_counter(idx);
344 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
347 static void cci_pmu_start(struct arm_pmu *cci_pmu)
351 struct pmu_hw_events *events = cci_pmu->get_hw_events();
353 raw_spin_lock_irqsave(&events->pmu_lock, flags);
355 /* Enable all the PMU counters. */
356 val = readl(cci_ctrl_base + CCI400_PMCR) | CCI400_PMCR_CEN;
357 writel(val, cci_ctrl_base + CCI400_PMCR);
359 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
362 static void cci_pmu_stop(struct arm_pmu *cci_pmu)
366 struct pmu_hw_events *events = cci_pmu->get_hw_events();
368 raw_spin_lock_irqsave(&events->pmu_lock, flags);
370 /* Disable all the PMU counters. */
371 val = readl(cci_ctrl_base + CCI400_PMCR) & ~CCI400_PMCR_CEN;
372 writel(val, cci_ctrl_base + CCI400_PMCR);
374 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
377 static u32 cci_pmu_read_counter(struct perf_event *event)
379 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
380 struct hw_perf_event *hw_counter = &event->hw;
381 int idx = hw_counter->idx;
384 if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
385 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
388 value = cci_pmu_read_register(idx, CCI400_PMU_CNTR);
393 static void cci_pmu_write_counter(struct perf_event *event, u32 value)
395 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
396 struct hw_perf_event *hw_counter = &event->hw;
397 int idx = hw_counter->idx;
399 if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx)))
400 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
402 cci_pmu_write_register(value, idx, CCI400_PMU_CNTR);
405 static struct arm_pmu cci_pmu = {
407 .max_period = (1LLU << 32) - 1,
408 .get_hw_events = cci_pmu_get_hw_events,
409 .get_event_idx = cci_pmu_get_event_idx,
410 .map_event = cci_pmu_map_event,
411 .request_irq = cci_pmu_request_irq,
412 .handle_irq = cci_pmu_handle_irq,
413 .free_irq = cci_pmu_free_irq,
414 .enable = cci_pmu_enable_event,
415 .disable = cci_pmu_disable_event,
416 .start = cci_pmu_start,
417 .stop = cci_pmu_stop,
418 .read_counter = cci_pmu_read_counter,
419 .write_counter = cci_pmu_write_counter,
422 static int cci_pmu_probe(struct platform_device *pdev)
424 struct resource *res;
426 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
427 cci_pmu_base = devm_ioremap_resource(&pdev->dev, res);
428 if (IS_ERR(cci_pmu_base))
429 return PTR_ERR(cci_pmu_base);
431 cci_pmu.plat_device = pdev;
432 cci_pmu.num_events = cci_pmu_get_max_counters();
433 raw_spin_lock_init(&cci_hw_events.pmu_lock);
434 cpumask_setall(&cci_pmu.valid_cpus);
436 return armpmu_register(&cci_pmu, -1);
439 static const struct of_device_id arm_cci_pmu_matches[] = {
440 {.compatible = "arm,cci-400-pmu"},
444 static struct platform_driver cci_pmu_platform_driver = {
447 .of_match_table = arm_cci_pmu_matches,
449 .probe = cci_pmu_probe,
452 static int __init cci_pmu_init(void)
454 if (platform_driver_register(&cci_pmu_platform_driver))
455 WARN(1, "unable to register CCI platform driver\n");
461 static int __init cci_pmu_init(void)
466 #endif /* CONFIG_HW_PERF_EVENTS */
474 * Use the port MSB as valid flag, shift can be made dynamic
475 * by computing number of bits required for port indexes.
476 * Code disabling CCI cpu ports runs with D-cache invalidated
477 * and SCTLR bit clear so data accesses must be kept to a minimum
478 * to improve performance; for now shift is left static to
479 * avoid one more data access while disabling the CCI port.
481 #define PORT_VALID_SHIFT 31
482 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
484 static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
486 port->port = PORT_VALID | index;
490 static inline bool cpu_port_is_valid(struct cpu_port *port)
492 return !!(port->port & PORT_VALID);
495 static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
497 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
500 static struct cpu_port cpu_port[NR_CPUS];
503 * __cci_ace_get_port - Function to retrieve the port index connected to
506 * @dn: device node of the device to look-up
510 * - CCI port index if success
511 * - -ENODEV if failure
513 static int __cci_ace_get_port(struct device_node *dn, int type)
517 struct device_node *cci_portn;
519 cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
520 for (i = 0; i < nb_cci_ports; i++) {
521 ace_match = ports[i].type == type;
522 if (ace_match && cci_portn == ports[i].dn)
528 int cci_ace_get_port(struct device_node *dn)
530 return __cci_ace_get_port(dn, ACE_LITE_PORT);
532 EXPORT_SYMBOL_GPL(cci_ace_get_port);
534 static void __init cci_ace_init_ports(void)
539 struct device_node *cpun, *cpus;
541 cpus = of_find_node_by_path("/cpus");
542 if (WARN(!cpus, "Missing cpus node, bailing out\n"))
545 if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
546 ac = of_n_addr_cells(cpus);
549 * Port index look-up speeds up the function disabling ports by CPU,
550 * since the logical to port index mapping is done once and does
551 * not change after system boot.
552 * The stashed index array is initialized for all possible CPUs
555 for_each_child_of_node(cpus, cpun) {
556 if (of_node_cmp(cpun->type, "cpu"))
558 cell = of_get_property(cpun, "reg", NULL);
559 if (WARN(!cell, "%s: missing reg property\n", cpun->full_name))
562 hwid = of_read_number(cell, ac);
563 cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
565 if (cpu < 0 || !cpu_possible(cpu))
567 port = __cci_ace_get_port(cpun, ACE_PORT);
571 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
574 for_each_possible_cpu(cpu) {
575 WARN(!cpu_port_is_valid(&cpu_port[cpu]),
576 "CPU %u does not have an associated CCI port\n",
581 * Functions to enable/disable a CCI interconnect slave port
583 * They are called by low-level power management code to disable slave
584 * interfaces snoops and DVM broadcast.
585 * Since they may execute with cache data allocation disabled and
586 * after the caches have been cleaned and invalidated the functions provide
587 * no explicit locking since they may run with D-cache disabled, so normal
588 * cacheable kernel locks based on ldrex/strex may not work.
589 * Locking has to be provided by BSP implementations to ensure proper
594 * cci_port_control() - function to control a CCI port
596 * @port: index of the port to setup
597 * @enable: if true enables the port, if false disables it
599 static void notrace cci_port_control(unsigned int port, bool enable)
601 void __iomem *base = ports[port].base;
603 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
605 * This function is called from power down procedures
606 * and must not execute any instruction that might
607 * cause the processor to be put in a quiescent state
608 * (eg wfi). Hence, cpu_relax() can not be added to this
609 * read loop to optimize power, since it might hide possibly
610 * disruptive operations.
612 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
617 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
620 * @mpidr: mpidr of the CPU whose CCI port should be disabled
622 * Disabling a CCI port for a CPU implies disabling the CCI port
623 * controlling that CPU cluster. Code disabling CPU CCI ports
624 * must make sure that the CPU running the code is the last active CPU
625 * in the cluster ie all other CPUs are quiescent in a low power state.
629 * -ENODEV on port look-up failure
631 int notrace cci_disable_port_by_cpu(u64 mpidr)
635 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
636 is_valid = cpu_port_is_valid(&cpu_port[cpu]);
637 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
638 cci_port_control(cpu_port[cpu].port, false);
644 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
647 * cci_enable_port_for_self() - enable a CCI port for calling CPU
649 * Enabling a CCI port for the calling CPU implies enabling the CCI
650 * port controlling that CPU's cluster. Caller must make sure that the
651 * CPU running the code is the first active CPU in the cluster and all
652 * other CPUs are quiescent in a low power state or waiting for this CPU
653 * to complete the CCI initialization.
655 * Because this is called when the MMU is still off and with no stack,
656 * the code must be position independent and ideally rely on callee
657 * clobbered registers only. To achieve this we must code this function
658 * entirely in assembler.
660 * On success this returns with the proper CCI port enabled. In case of
661 * any failure this never returns as the inability to enable the CCI is
662 * fatal and there is no possible recovery at this stage.
664 asmlinkage void __naked cci_enable_port_for_self(void)
668 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
669 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
672 " add r1, r1, r2 @ &cpu_port \n"
673 " add ip, r1, %[sizeof_cpu_port] \n"
675 /* Loop over the cpu_port array looking for a matching MPIDR */
676 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
677 " cmp r2, r0 @ compare MPIDR \n"
680 /* Found a match, now test port validity */
681 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
682 " tst r3, #"__stringify(PORT_VALID)" \n"
685 /* no match, loop with the next cpu_port entry */
686 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
687 " cmp r1, ip @ done? \n"
690 /* CCI port not found -- cheaply try to stall this CPU */
691 "cci_port_not_found: \n"
694 " b cci_port_not_found \n"
696 /* Use matched port index to look up the corresponding ports entry */
697 "3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
699 " ldmia r0, {r1, r2} \n"
700 " sub r1, r1, r0 @ virt - phys \n"
701 " ldr r0, [r0, r2] @ *(&ports) \n"
702 " mov r2, %[sizeof_struct_ace_port] \n"
703 " mla r0, r2, r3, r0 @ &ports[index] \n"
704 " sub r0, r0, r1 @ virt_to_phys() \n"
706 /* Enable the CCI port */
707 " ldr r0, [r0, %[offsetof_port_phys]] \n"
708 " mov r3, %[cci_enable_req]\n"
709 " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
711 /* poll the status reg for completion */
714 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
715 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
716 " tst r1, %[cci_control_status_bits] \n"
723 "5: .word cpu_port - . \n"
725 " .word ports - 6b \n"
726 "7: .word cci_ctrl_phys - . \n"
728 [sizeof_cpu_port] "i" (sizeof(cpu_port)),
729 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
730 [cci_control_status_bits] "i" cpu_to_le32(1),
732 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
734 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
736 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
737 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
738 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
739 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
745 * __cci_control_port_by_device() - function to control a CCI port by device
748 * @dn: device node pointer of the device whose CCI port should be
750 * @enable: if true enables the port, if false disables it
754 * -ENODEV on port look-up failure
756 int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
763 port = __cci_ace_get_port(dn, ACE_LITE_PORT);
764 if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
767 cci_port_control(port, enable);
770 EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
773 * __cci_control_port_by_index() - function to control a CCI port by port index
775 * @port: port index previously retrieved with cci_ace_get_port()
776 * @enable: if true enables the port, if false disables it
780 * -ENODEV on port index out of range
781 * -EPERM if operation carried out on an ACE PORT
783 int notrace __cci_control_port_by_index(u32 port, bool enable)
785 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
788 * CCI control for ports connected to CPUS is extremely fragile
789 * and must be made to go through a specific and controlled
790 * interface (ie cci_disable_port_by_cpu(); control by general purpose
791 * indexing is therefore disabled for ACE ports.
793 if (ports[port].type == ACE_PORT)
796 cci_port_control(port, enable);
799 EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
801 static const struct cci_nb_ports cci400_ports = {
806 static const struct of_device_id arm_cci_matches[] = {
807 {.compatible = "arm,cci-400", .data = &cci400_ports },
811 static const struct of_device_id arm_cci_ctrl_if_matches[] = {
812 {.compatible = "arm,cci-400-ctrl-if", },
816 static int __init cci_probe(void)
818 struct cci_nb_ports const *cci_config;
819 int ret, i, nb_ace = 0, nb_ace_lite = 0;
820 struct device_node *np, *cp;
822 const char *match_str;
825 np = of_find_matching_node(NULL, arm_cci_matches);
829 cci_config = of_match_node(arm_cci_matches, np)->data;
833 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
835 ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
839 ret = of_address_to_resource(np, 0, &res);
841 cci_ctrl_base = ioremap(res.start, resource_size(&res));
842 cci_ctrl_phys = res.start;
844 if (ret || !cci_ctrl_base) {
845 WARN(1, "unable to ioremap CCI ctrl\n");
850 for_each_child_of_node(np, cp) {
851 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
854 i = nb_ace + nb_ace_lite;
856 if (i >= nb_cci_ports)
859 if (of_property_read_string(cp, "interface-type",
861 WARN(1, "node %s missing interface-type property\n",
865 is_ace = strcmp(match_str, "ace") == 0;
866 if (!is_ace && strcmp(match_str, "ace-lite")) {
867 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
872 ret = of_address_to_resource(cp, 0, &res);
874 ports[i].base = ioremap(res.start, resource_size(&res));
875 ports[i].phys = res.start;
877 if (ret || !ports[i].base) {
878 WARN(1, "unable to ioremap CCI port %d\n", i);
883 if (WARN_ON(nb_ace >= cci_config->nb_ace))
885 ports[i].type = ACE_PORT;
888 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
890 ports[i].type = ACE_LITE_PORT;
896 /* initialize a stashed array of ACE ports to speed-up look-up */
897 cci_ace_init_ports();
900 * Multi-cluster systems may need this data when non-coherent, during
901 * cluster power-up/power-down. Make sure it reaches main memory.
903 sync_cache_w(&cci_ctrl_base);
904 sync_cache_w(&cci_ctrl_phys);
905 sync_cache_w(&ports);
906 sync_cache_w(&cpu_port);
907 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
908 pr_info("ARM CCI driver probed\n");
917 static int cci_init_status = -EAGAIN;
918 static DEFINE_MUTEX(cci_probing);
920 static int __init cci_init(void)
922 if (cci_init_status != -EAGAIN)
923 return cci_init_status;
925 mutex_lock(&cci_probing);
926 if (cci_init_status == -EAGAIN)
927 cci_init_status = cci_probe();
928 mutex_unlock(&cci_probing);
929 return cci_init_status;
933 * To sort out early init calls ordering a helper function is provided to
934 * check if the CCI driver has beed initialized. Function check if the driver
935 * has been initialized, if not it calls the init function that probes
936 * the driver and updates the return value.
938 bool __init cci_probed(void)
940 return cci_init() == 0;
942 EXPORT_SYMBOL_GPL(cci_probed);
944 early_initcall(cci_init);
945 core_initcall(cci_pmu_init);
946 MODULE_LICENSE("GPL");
947 MODULE_DESCRIPTION("ARM CCI support");