asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
+static inline u64 arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+
+ return cval;
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
return arch_timer_rate;
}
-u64 arch_timer_read_counter(void)
+/*
+ * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
+ * call it before it has been initialised. Rather than incur a performance
+ * penalty checking for initialisation, provide a default implementation that
+ * won't lead to time appearing to jump backwards.
+ */
+static u64 arch_timer_read_zero(void)
{
- return arch_counter_get_cntvct();
+ return 0;
}
+u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
+
static cycle_t arch_counter_read(struct clocksource *cs)
{
- return arch_counter_get_cntvct();
+ return arch_timer_read_counter();
}
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
- return arch_counter_get_cntvct();
+ return arch_timer_read_counter();
}
static struct clocksource clocksource_counter = {
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter,
- arch_counter_get_cntvct());
+ arch_counter_get_cntpct());
if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI];
}
}
+ if (arch_timer_use_virtual)
+ arch_timer_read_counter = arch_counter_get_cntvct;
+ else
+ arch_timer_read_counter = arch_counter_get_cntpct;
+
arch_timer_register();
arch_timer_arch_init();
}