arm: arch_timer: split cntfrq accessor
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / arch_timer.c
1 /*
2  *  linux/arch/arm/kernel/arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/smp.h>
16 #include <linux/cpu.h>
17 #include <linux/jiffies.h>
18 #include <linux/clockchips.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_irq.h>
21 #include <linux/io.h>
22
23 #include <asm/delay.h>
24 #include <asm/localtimer.h>
25 #include <asm/arch_timer.h>
26 #include <asm/sched_clock.h>
27
28 static u32 arch_timer_rate;
29
30 enum ppi_nr {
31         PHYS_SECURE_PPI,
32         PHYS_NONSECURE_PPI,
33         VIRT_PPI,
34         HYP_PPI,
35         MAX_TIMER_PPI
36 };
37
38 static int arch_timer_ppi[MAX_TIMER_PPI];
39
40 static struct clock_event_device __percpu **arch_timer_evt;
41 static struct delay_timer arch_delay_timer;
42
43 static bool arch_timer_use_virtual = true;
44
45 /*
46  * Architected system timer support.
47  */
48
49 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
50 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
51 #define ARCH_TIMER_CTRL_IT_STAT         (1 << 2)
52
53 #define ARCH_TIMER_REG_CTRL             0
54 #define ARCH_TIMER_REG_TVAL             1
55
56 #define ARCH_TIMER_PHYS_ACCESS          0
57 #define ARCH_TIMER_VIRT_ACCESS          1
58
59 /*
60  * These register accessors are marked inline so the compiler can
61  * nicely work out which register we want, and chuck away the rest of
62  * the code. At least it does so with a recent GCC (4.6.3).
63  */
64 static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
65 {
66         if (access == ARCH_TIMER_PHYS_ACCESS) {
67                 switch (reg) {
68                 case ARCH_TIMER_REG_CTRL:
69                         asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
70                         break;
71                 case ARCH_TIMER_REG_TVAL:
72                         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
73                         break;
74                 }
75         }
76
77         if (access == ARCH_TIMER_VIRT_ACCESS) {
78                 switch (reg) {
79                 case ARCH_TIMER_REG_CTRL:
80                         asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
81                         break;
82                 case ARCH_TIMER_REG_TVAL:
83                         asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
84                         break;
85                 }
86         }
87
88         isb();
89 }
90
91 static inline u32 arch_timer_reg_read(const int access, const int reg)
92 {
93         u32 val = 0;
94
95         if (access == ARCH_TIMER_PHYS_ACCESS) {
96                 switch (reg) {
97                 case ARCH_TIMER_REG_CTRL:
98                         asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
99                         break;
100                 case ARCH_TIMER_REG_TVAL:
101                         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
102                         break;
103                 }
104         }
105
106         if (access == ARCH_TIMER_VIRT_ACCESS) {
107                 switch (reg) {
108                 case ARCH_TIMER_REG_CTRL:
109                         asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
110                         break;
111                 case ARCH_TIMER_REG_TVAL:
112                         asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
113                         break;
114                 }
115         }
116
117         return val;
118 }
119
120 static inline u32 arch_timer_get_cntfrq(void)
121 {
122         u32 val;
123         asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
124         return val;
125 }
126
127 static inline u64 arch_counter_get_cntpct(void)
128 {
129         u64 cval;
130         asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
131         return cval;
132 }
133
134 static inline u64 arch_counter_get_cntvct(void)
135 {
136         u64 cval;
137         asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
138         return cval;
139 }
140
141 static irqreturn_t inline timer_handler(const int access,
142                                         struct clock_event_device *evt)
143 {
144         unsigned long ctrl;
145         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
146         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
147                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
148                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
149                 evt->event_handler(evt);
150                 return IRQ_HANDLED;
151         }
152
153         return IRQ_NONE;
154 }
155
156 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
157 {
158         struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
159
160         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
161 }
162
163 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
164 {
165         struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
166
167         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
168 }
169
170 static inline void timer_set_mode(const int access, int mode)
171 {
172         unsigned long ctrl;
173         switch (mode) {
174         case CLOCK_EVT_MODE_UNUSED:
175         case CLOCK_EVT_MODE_SHUTDOWN:
176                 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
177                 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
178                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
179                 break;
180         default:
181                 break;
182         }
183 }
184
185 static void arch_timer_set_mode_virt(enum clock_event_mode mode,
186                                      struct clock_event_device *clk)
187 {
188         timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
189 }
190
191 static void arch_timer_set_mode_phys(enum clock_event_mode mode,
192                                      struct clock_event_device *clk)
193 {
194         timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
195 }
196
197 static inline void set_next_event(const int access, unsigned long evt)
198 {
199         unsigned long ctrl;
200         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
201         ctrl |= ARCH_TIMER_CTRL_ENABLE;
202         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
203         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
204         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
205 }
206
207 static int arch_timer_set_next_event_virt(unsigned long evt,
208                                           struct clock_event_device *unused)
209 {
210         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
211         return 0;
212 }
213
214 static int arch_timer_set_next_event_phys(unsigned long evt,
215                                           struct clock_event_device *unused)
216 {
217         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
218         return 0;
219 }
220
221 static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
222 {
223         clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
224         clk->name = "arch_sys_timer";
225         clk->rating = 450;
226         if (arch_timer_use_virtual) {
227                 clk->irq = arch_timer_ppi[VIRT_PPI];
228                 clk->set_mode = arch_timer_set_mode_virt;
229                 clk->set_next_event = arch_timer_set_next_event_virt;
230         } else {
231                 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
232                 clk->set_mode = arch_timer_set_mode_phys;
233                 clk->set_next_event = arch_timer_set_next_event_phys;
234         }
235
236         clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
237
238         clockevents_config_and_register(clk, arch_timer_rate,
239                                         0xf, 0x7fffffff);
240
241         *__this_cpu_ptr(arch_timer_evt) = clk;
242
243         if (arch_timer_use_virtual)
244                 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
245         else {
246                 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
247                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
248                         enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
249         }
250
251         return 0;
252 }
253
254 static int arch_timer_available(void)
255 {
256         u32 freq;
257
258         if (arch_timer_rate == 0) {
259                 freq = arch_timer_get_cntfrq();
260
261                 /* Check the timer frequency. */
262                 if (freq == 0) {
263                         pr_warn("Architected timer frequency not available\n");
264                         return -EINVAL;
265                 }
266
267                 arch_timer_rate = freq;
268         }
269
270         pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
271                      (unsigned long)arch_timer_rate / 1000000,
272                      (unsigned long)(arch_timer_rate / 10000) % 100,
273                      arch_timer_use_virtual ? "virt" : "phys");
274         return 0;
275 }
276
277 /*
278  * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
279  * call it before it has been initialised. Rather than incur a performance
280  * penalty checking for initialisation, provide a default implementation that
281  * won't lead to time appearing to jump backwards.
282  */
283 static u64 arch_timer_read_zero(void)
284 {
285         return 0;
286 }
287
288 u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
289
290 static u32 arch_timer_read_counter32(void)
291 {
292         return arch_timer_read_counter();
293 }
294
295 static cycle_t arch_counter_read(struct clocksource *cs)
296 {
297         return arch_timer_read_counter();
298 }
299
300 static unsigned long arch_timer_read_current_timer(void)
301 {
302         return arch_timer_read_counter();
303 }
304
305 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
306 {
307         return arch_timer_read_counter();
308 }
309
310 static struct clocksource clocksource_counter = {
311         .name   = "arch_sys_counter",
312         .rating = 400,
313         .read   = arch_counter_read,
314         .mask   = CLOCKSOURCE_MASK(56),
315         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
316 };
317
318 static struct cyclecounter cyclecounter = {
319         .read   = arch_counter_read_cc,
320         .mask   = CLOCKSOURCE_MASK(56),
321 };
322
323 static struct timecounter timecounter;
324
325 struct timecounter *arch_timer_get_timecounter(void)
326 {
327         return &timecounter;
328 }
329
330 static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
331 {
332         pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
333                  clk->irq, smp_processor_id());
334
335         if (arch_timer_use_virtual)
336                 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
337         else {
338                 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
339                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
340                         disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
341         }
342
343         clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
344 }
345
346 static struct local_timer_ops arch_timer_ops __cpuinitdata = {
347         .setup  = arch_timer_setup,
348         .stop   = arch_timer_stop,
349 };
350
351 static struct clock_event_device arch_timer_global_evt;
352
353 static int __init arch_timer_register(void)
354 {
355         int err;
356         int ppi;
357
358         err = arch_timer_available();
359         if (err)
360                 goto out;
361
362         arch_timer_evt = alloc_percpu(struct clock_event_device *);
363         if (!arch_timer_evt) {
364                 err = -ENOMEM;
365                 goto out;
366         }
367
368         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
369         cyclecounter.mult = clocksource_counter.mult;
370         cyclecounter.shift = clocksource_counter.shift;
371         timecounter_init(&timecounter, &cyclecounter,
372                          arch_counter_get_cntpct());
373
374         if (arch_timer_use_virtual) {
375                 ppi = arch_timer_ppi[VIRT_PPI];
376                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
377                                          "arch_timer", arch_timer_evt);
378         } else {
379                 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
380                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
381                                          "arch_timer", arch_timer_evt);
382                 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
383                         ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
384                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
385                                                  "arch_timer", arch_timer_evt);
386                         if (err)
387                                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
388                                                 arch_timer_evt);
389                 }
390         }
391
392         if (err) {
393                 pr_err("arch_timer: can't register interrupt %d (%d)\n",
394                        ppi, err);
395                 goto out_free;
396         }
397
398         err = local_timer_register(&arch_timer_ops);
399         if (err) {
400                 /*
401                  * We couldn't register as a local timer (could be
402                  * because we're on a UP platform, or because some
403                  * other local timer is already present...). Try as a
404                  * global timer instead.
405                  */
406                 arch_timer_global_evt.cpumask = cpumask_of(0);
407                 err = arch_timer_setup(&arch_timer_global_evt);
408         }
409         if (err)
410                 goto out_free_irq;
411
412         /* Use the architected timer for the delay loop. */
413         arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
414         arch_delay_timer.freq = arch_timer_rate;
415         register_current_timer_delay(&arch_delay_timer);
416         return 0;
417
418 out_free_irq:
419         if (arch_timer_use_virtual)
420                 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
421         else {
422                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
423                                 arch_timer_evt);
424                 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
425                         free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
426                                         arch_timer_evt);
427         }
428
429 out_free:
430         free_percpu(arch_timer_evt);
431 out:
432         return err;
433 }
434
435 static const struct of_device_id arch_timer_of_match[] __initconst = {
436         { .compatible   = "arm,armv7-timer",    },
437         {},
438 };
439
440 int __init arch_timer_of_register(void)
441 {
442         struct device_node *np;
443         u32 freq;
444         int i;
445
446         np = of_find_matching_node(NULL, arch_timer_of_match);
447         if (!np) {
448                 pr_err("arch_timer: can't find DT node\n");
449                 return -ENODEV;
450         }
451
452         /* Try to determine the frequency from the device tree or CNTFRQ */
453         if (!of_property_read_u32(np, "clock-frequency", &freq))
454                 arch_timer_rate = freq;
455
456         for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
457                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
458
459         of_node_put(np);
460
461         /*
462          * If no interrupt provided for virtual timer, we'll have to
463          * stick to the physical timer. It'd better be accessible...
464          */
465         if (!arch_timer_ppi[VIRT_PPI]) {
466                 arch_timer_use_virtual = false;
467
468                 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
469                     !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
470                         pr_warn("arch_timer: No interrupt available, giving up\n");
471                         return -EINVAL;
472                 }
473         }
474
475         if (arch_timer_use_virtual)
476                 arch_timer_read_counter = arch_counter_get_cntvct;
477         else
478                 arch_timer_read_counter = arch_counter_get_cntpct;
479
480         return arch_timer_register();
481 }
482
483 int __init arch_timer_sched_clock_init(void)
484 {
485         int err;
486
487         err = arch_timer_available();
488         if (err)
489                 return err;
490
491         setup_sched_clock(arch_timer_read_counter32,
492                           32, arch_timer_rate);
493         return 0;
494 }