2 * linux/kernel/time/tick-common.c
4 * This file contains the base functions to manage periodic tick
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/module.h>
23 #include <asm/irq_regs.h>
25 #include "tick-internal.h"
30 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
32 * Tick next event: keeps track of the tick time
34 ktime_t tick_next_period;
38 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
39 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
40 * variable has two functions:
42 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
43 * timekeeping lock all at once. Only the CPU which is assigned to do the
44 * update is handling it.
46 * 2) Hand off the duty in the NOHZ idle case by setting the value to
47 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
48 * at it will take over and keep the time keeping alive. The handover
49 * procedure also covers cpu hotplug.
51 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
54 * Debugging: see timer_list.c
56 struct tick_device *tick_get_device(int cpu)
58 return &per_cpu(tick_cpu_device, cpu);
62 * tick_is_oneshot_available - check for a oneshot capable event device
64 int tick_is_oneshot_available(void)
66 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
68 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
70 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
72 return tick_broadcast_oneshot_available();
78 static void tick_periodic(int cpu)
80 if (tick_do_timer_cpu == cpu) {
81 write_seqlock(&jiffies_lock);
83 /* Keep track of the next tick event */
84 tick_next_period = ktime_add(tick_next_period, tick_period);
87 write_sequnlock(&jiffies_lock);
90 update_process_times(user_mode(get_irq_regs()));
91 profile_tick(CPU_PROFILING);
95 * Event handler for periodic ticks
97 void tick_handle_periodic(struct clock_event_device *dev)
99 int cpu = smp_processor_id();
104 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
107 * Setup the next period for devices, which do not have
110 next = ktime_add(dev->next_event, tick_period);
112 if (!clockevents_program_event(dev, next, false))
115 * Have to be careful here. If we're in oneshot mode,
116 * before we call tick_periodic() in a loop, we need
117 * to be sure we're using a real hardware clocksource.
118 * Otherwise we could get trapped in an infinite
119 * loop, as the tick_periodic() increments jiffies,
120 * when then will increment time, posibly causing
121 * the loop to trigger again and again.
123 if (timekeeping_valid_for_hres())
125 next = ktime_add(next, tick_period);
130 * Setup the device for a periodic tick
132 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
134 tick_set_periodic_handler(dev, broadcast);
136 /* Broadcast setup ? */
137 if (!tick_device_is_functional(dev))
140 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
141 !tick_broadcast_oneshot_active()) {
142 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
148 seq = read_seqbegin(&jiffies_lock);
149 next = tick_next_period;
150 } while (read_seqretry(&jiffies_lock, seq));
152 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
155 if (!clockevents_program_event(dev, next, false))
157 next = ktime_add(next, tick_period);
163 * Setup the tick device
165 static void tick_setup_device(struct tick_device *td,
166 struct clock_event_device *newdev, int cpu,
167 const struct cpumask *cpumask)
170 void (*handler)(struct clock_event_device *) = NULL;
173 * First device setup ?
177 * If no cpu took the do_timer update, assign it to
180 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
181 if (!tick_nohz_full_cpu(cpu))
182 tick_do_timer_cpu = cpu;
184 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
185 tick_next_period = ktime_get();
186 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
190 * Startup in periodic mode first.
192 td->mode = TICKDEV_MODE_PERIODIC;
194 handler = td->evtdev->event_handler;
195 next_event = td->evtdev->next_event;
196 td->evtdev->event_handler = clockevents_handle_noop;
202 * When the device is not per cpu, pin the interrupt to the
205 if (!cpumask_equal(newdev->cpumask, cpumask))
206 irq_set_affinity(newdev->irq, cpumask);
209 * When global broadcasting is active, check if the current
210 * device is registered as a placeholder for broadcast mode.
211 * This allows us to handle this x86 misfeature in a generic
212 * way. This function also returns !=0 when we keep the
213 * current active broadcast state for this CPU.
215 if (tick_device_uses_broadcast(newdev, cpu))
218 if (td->mode == TICKDEV_MODE_PERIODIC)
219 tick_setup_periodic(newdev, 0);
221 tick_setup_oneshot(newdev, handler, next_event);
224 void tick_install_replacement(struct clock_event_device *newdev)
226 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
227 int cpu = smp_processor_id();
229 clockevents_exchange_device(td->evtdev, newdev);
230 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
231 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
232 tick_oneshot_notify();
235 static bool tick_check_percpu(struct clock_event_device *curdev,
236 struct clock_event_device *newdev, int cpu)
238 if (!cpumask_test_cpu(cpu, newdev->cpumask))
240 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
242 /* Check if irq affinity can be set */
243 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
245 /* Prefer an existing cpu local device */
246 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
251 static bool tick_check_preferred(struct clock_event_device *curdev,
252 struct clock_event_device *newdev)
254 /* Prefer oneshot capable device */
255 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
256 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
258 if (tick_oneshot_mode_active())
263 * Use the higher rated one, but prefer a CPU local device with a lower
264 * rating than a non-CPU local device
267 newdev->rating > curdev->rating ||
268 !cpumask_equal(curdev->cpumask, newdev->cpumask);
272 * Check whether the new device is a better fit than curdev. curdev
275 bool tick_check_replacement(struct clock_event_device *curdev,
276 struct clock_event_device *newdev)
278 if (tick_check_percpu(curdev, newdev, smp_processor_id()))
281 return tick_check_preferred(curdev, newdev);
285 * Check, if the new registered device should be used. Called with
286 * clockevents_lock held and interrupts disabled.
288 void tick_check_new_device(struct clock_event_device *newdev)
290 struct clock_event_device *curdev;
291 struct tick_device *td;
294 cpu = smp_processor_id();
295 if (!cpumask_test_cpu(cpu, newdev->cpumask))
298 td = &per_cpu(tick_cpu_device, cpu);
301 /* cpu local device ? */
302 if (!tick_check_percpu(curdev, newdev, cpu))
305 /* Preference decision */
306 if (!tick_check_preferred(curdev, newdev))
309 if (!try_module_get(newdev->owner))
313 * Replace the eventually existing device by the new
314 * device. If the current device is the broadcast device, do
315 * not give it back to the clockevents layer !
317 if (tick_is_broadcast_device(curdev)) {
318 clockevents_shutdown(curdev);
321 clockevents_exchange_device(curdev, newdev);
322 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
323 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
324 tick_oneshot_notify();
329 * Can the new device be used as a broadcast device ?
331 tick_install_broadcast_device(newdev);
335 * Transfer the do_timer job away from a dying cpu.
337 * Called with interrupts disabled.
339 void tick_handover_do_timer(int *cpup)
341 if (*cpup == tick_do_timer_cpu) {
342 int cpu = cpumask_first(cpu_online_mask);
344 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
350 * Shutdown an event device on a given cpu:
352 * This is called on a life CPU, when a CPU is dead. So we cannot
353 * access the hardware device itself.
354 * We just set the mode and remove it from the lists.
356 void tick_shutdown(unsigned int *cpup)
358 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
359 struct clock_event_device *dev = td->evtdev;
361 td->mode = TICKDEV_MODE_PERIODIC;
364 * Prevent that the clock events layer tries to call
365 * the set mode function!
367 dev->mode = CLOCK_EVT_MODE_UNUSED;
368 clockevents_exchange_device(dev, NULL);
369 dev->event_handler = clockevents_handle_noop;
374 void tick_suspend(void)
376 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
378 clockevents_shutdown(td->evtdev);
381 void tick_resume(void)
383 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
384 int broadcast = tick_resume_broadcast();
386 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
389 if (td->mode == TICKDEV_MODE_PERIODIC)
390 tick_setup_periodic(td->evtdev, 0);
392 tick_resume_oneshot();
397 * tick_init - initialize the tick control
399 void __init tick_init(void)
401 tick_broadcast_init();