2 * linux/kernel/time/tick-common.c
4 * This file contains the base functions to manage periodic tick
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/module.h>
22 #include <trace/events/power.h>
24 #include <asm/irq_regs.h>
26 #include "tick-internal.h"
31 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
33 * Tick next event: keeps track of the tick time
35 ktime_t tick_next_period;
39 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
40 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
41 * variable has two functions:
43 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
44 * timekeeping lock all at once. Only the CPU which is assigned to do the
45 * update is handling it.
47 * 2) Hand off the duty in the NOHZ idle case by setting the value to
48 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
49 * at it will take over and keep the time keeping alive. The handover
50 * procedure also covers cpu hotplug.
52 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
55 * Debugging: see timer_list.c
57 struct tick_device *tick_get_device(int cpu)
59 return &per_cpu(tick_cpu_device, cpu);
63 * tick_is_oneshot_available - check for a oneshot capable event device
65 int tick_is_oneshot_available(void)
67 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
69 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
71 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
73 return tick_broadcast_oneshot_available();
79 static void tick_periodic(int cpu)
81 if (tick_do_timer_cpu == cpu) {
82 write_seqlock(&jiffies_lock);
84 /* Keep track of the next tick event */
85 tick_next_period = ktime_add(tick_next_period, tick_period);
88 write_sequnlock(&jiffies_lock);
92 update_process_times(user_mode(get_irq_regs()));
93 profile_tick(CPU_PROFILING);
97 * Event handler for periodic ticks
99 void tick_handle_periodic(struct clock_event_device *dev)
101 int cpu = smp_processor_id();
102 ktime_t next = dev->next_event;
106 if (dev->state != CLOCK_EVT_STATE_ONESHOT)
110 * Setup the next period for devices, which do not have
113 next = ktime_add(next, tick_period);
115 if (!clockevents_program_event(dev, next, false))
118 * Have to be careful here. If we're in oneshot mode,
119 * before we call tick_periodic() in a loop, we need
120 * to be sure we're using a real hardware clocksource.
121 * Otherwise we could get trapped in an infinite
122 * loop, as the tick_periodic() increments jiffies,
123 * which then will increment time, possibly causing
124 * the loop to trigger again and again.
126 if (timekeeping_valid_for_hres())
132 * Setup the device for a periodic tick
134 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
136 tick_set_periodic_handler(dev, broadcast);
138 /* Broadcast setup ? */
139 if (!tick_device_is_functional(dev))
142 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
143 !tick_broadcast_oneshot_active()) {
144 clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
150 seq = read_seqbegin(&jiffies_lock);
151 next = tick_next_period;
152 } while (read_seqretry(&jiffies_lock, seq));
154 clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
157 if (!clockevents_program_event(dev, next, false))
159 next = ktime_add(next, tick_period);
165 * Setup the tick device
167 static void tick_setup_device(struct tick_device *td,
168 struct clock_event_device *newdev, int cpu,
169 const struct cpumask *cpumask)
172 void (*handler)(struct clock_event_device *) = NULL;
175 * First device setup ?
179 * If no cpu took the do_timer update, assign it to
182 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
183 if (!tick_nohz_full_cpu(cpu))
184 tick_do_timer_cpu = cpu;
186 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
187 tick_next_period = ktime_get();
188 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
192 * Startup in periodic mode first.
194 td->mode = TICKDEV_MODE_PERIODIC;
196 handler = td->evtdev->event_handler;
197 next_event = td->evtdev->next_event;
198 td->evtdev->event_handler = clockevents_handle_noop;
204 * When the device is not per cpu, pin the interrupt to the
207 if (!cpumask_equal(newdev->cpumask, cpumask))
208 irq_set_affinity(newdev->irq, cpumask);
211 * When global broadcasting is active, check if the current
212 * device is registered as a placeholder for broadcast mode.
213 * This allows us to handle this x86 misfeature in a generic
214 * way. This function also returns !=0 when we keep the
215 * current active broadcast state for this CPU.
217 if (tick_device_uses_broadcast(newdev, cpu))
220 if (td->mode == TICKDEV_MODE_PERIODIC)
221 tick_setup_periodic(newdev, 0);
223 tick_setup_oneshot(newdev, handler, next_event);
226 void tick_install_replacement(struct clock_event_device *newdev)
228 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
229 int cpu = smp_processor_id();
231 clockevents_exchange_device(td->evtdev, newdev);
232 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
233 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
234 tick_oneshot_notify();
237 static bool tick_check_percpu(struct clock_event_device *curdev,
238 struct clock_event_device *newdev, int cpu)
240 if (!cpumask_test_cpu(cpu, newdev->cpumask))
242 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
244 /* Check if irq affinity can be set */
245 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
247 /* Prefer an existing cpu local device */
248 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
253 static bool tick_check_preferred(struct clock_event_device *curdev,
254 struct clock_event_device *newdev)
256 /* Prefer oneshot capable device */
257 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
258 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
260 if (tick_oneshot_mode_active())
265 * Use the higher rated one, but prefer a CPU local device with a lower
266 * rating than a non-CPU local device
269 newdev->rating > curdev->rating ||
270 !cpumask_equal(curdev->cpumask, newdev->cpumask);
274 * Check whether the new device is a better fit than curdev. curdev
277 bool tick_check_replacement(struct clock_event_device *curdev,
278 struct clock_event_device *newdev)
280 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
283 return tick_check_preferred(curdev, newdev);
287 * Check, if the new registered device should be used. Called with
288 * clockevents_lock held and interrupts disabled.
290 void tick_check_new_device(struct clock_event_device *newdev)
292 struct clock_event_device *curdev;
293 struct tick_device *td;
296 cpu = smp_processor_id();
297 if (!cpumask_test_cpu(cpu, newdev->cpumask))
300 td = &per_cpu(tick_cpu_device, cpu);
303 /* cpu local device ? */
304 if (!tick_check_percpu(curdev, newdev, cpu))
307 /* Preference decision */
308 if (!tick_check_preferred(curdev, newdev))
311 if (!try_module_get(newdev->owner))
315 * Replace the eventually existing device by the new
316 * device. If the current device is the broadcast device, do
317 * not give it back to the clockevents layer !
319 if (tick_is_broadcast_device(curdev)) {
320 clockevents_shutdown(curdev);
323 clockevents_exchange_device(curdev, newdev);
324 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
325 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
326 tick_oneshot_notify();
331 * Can the new device be used as a broadcast device ?
333 tick_install_broadcast_device(newdev);
336 #ifdef CONFIG_HOTPLUG_CPU
338 * Transfer the do_timer job away from a dying cpu.
340 * Called with interrupts disabled. Not locking required. If
341 * tick_do_timer_cpu is owned by this cpu, nothing can change it.
343 void tick_handover_do_timer(void)
345 if (tick_do_timer_cpu == smp_processor_id()) {
346 int cpu = cpumask_first(cpu_online_mask);
348 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
354 * Shutdown an event device on a given cpu:
356 * This is called on a life CPU, when a CPU is dead. So we cannot
357 * access the hardware device itself.
358 * We just set the mode and remove it from the lists.
360 void tick_shutdown(unsigned int cpu)
362 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
363 struct clock_event_device *dev = td->evtdev;
365 td->mode = TICKDEV_MODE_PERIODIC;
368 * Prevent that the clock events layer tries to call
369 * the set mode function!
371 dev->state = CLOCK_EVT_STATE_DETACHED;
372 dev->mode = CLOCK_EVT_MODE_UNUSED;
373 clockevents_exchange_device(dev, NULL);
374 dev->event_handler = clockevents_handle_noop;
381 * tick_suspend_local - Suspend the local tick device
383 * Called from the local cpu for freeze with interrupts disabled.
385 * No locks required. Nothing can change the per cpu device.
387 void tick_suspend_local(void)
389 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
391 clockevents_shutdown(td->evtdev);
395 * tick_resume_local - Resume the local tick device
397 * Called from the local CPU for unfreeze or XEN resume magic.
399 * No locks required. Nothing can change the per cpu device.
401 void tick_resume_local(void)
403 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
404 bool broadcast = tick_resume_check_broadcast();
406 clockevents_tick_resume(td->evtdev);
408 if (td->mode == TICKDEV_MODE_PERIODIC)
409 tick_setup_periodic(td->evtdev, 0);
411 tick_resume_oneshot();
416 * tick_suspend - Suspend the tick and the broadcast device
418 * Called from syscore_suspend() via timekeeping_suspend with only one
419 * CPU online and interrupts disabled or from tick_unfreeze() under
422 * No locks required. Nothing can change the per cpu device.
424 void tick_suspend(void)
426 tick_suspend_local();
427 tick_suspend_broadcast();
431 * tick_resume - Resume the tick and the broadcast device
433 * Called from syscore_resume() via timekeeping_resume with only one
434 * CPU online and interrupts disabled.
436 * No locks required. Nothing can change the per cpu device.
438 void tick_resume(void)
440 tick_resume_broadcast();
444 static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
445 static unsigned int tick_freeze_depth;
448 * tick_freeze - Suspend the local tick and (possibly) timekeeping.
450 * Check if this is the last online CPU executing the function and if so,
451 * suspend timekeeping. Otherwise suspend the local tick.
453 * Call with interrupts disabled. Must be balanced with %tick_unfreeze().
454 * Interrupts must not be enabled before the subsequent %tick_unfreeze().
456 void tick_freeze(void)
458 raw_spin_lock(&tick_freeze_lock);
461 if (tick_freeze_depth == num_online_cpus()) {
462 trace_suspend_resume(TPS("timekeeping_freeze"),
463 smp_processor_id(), true);
464 timekeeping_suspend();
466 tick_suspend_local();
469 raw_spin_unlock(&tick_freeze_lock);
473 * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
475 * Check if this is the first CPU executing the function and if so, resume
476 * timekeeping. Otherwise resume the local tick.
478 * Call with interrupts disabled. Must be balanced with %tick_freeze().
479 * Interrupts must not be enabled after the preceding %tick_freeze().
481 void tick_unfreeze(void)
483 raw_spin_lock(&tick_freeze_lock);
485 if (tick_freeze_depth == num_online_cpus()) {
486 timekeeping_resume();
487 trace_suspend_resume(TPS("timekeeping_freeze"),
488 smp_processor_id(), false);
495 raw_spin_unlock(&tick_freeze_lock);
499 * tick_init - initialize the tick control
501 void __init tick_init(void)
503 tick_broadcast_init();