2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/smp.h>
23 #include "tick-internal.h"
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
30 static struct tick_device tick_broadcast_device;
31 static cpumask_var_t tick_broadcast_mask;
32 static cpumask_var_t tmpmask;
33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(int cpu);
39 static inline void tick_broadcast_clear_oneshot(int cpu) { }
43 * Debugging: see timer_list.c
45 struct tick_device *tick_get_broadcast_device(void)
47 return &tick_broadcast_device;
50 struct cpumask *tick_get_broadcast_mask(void)
52 return tick_broadcast_mask;
56 * Start the device in periodic mode
58 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
61 tick_setup_periodic(bc, 1);
65 * Check, if the device can be utilized as broadcast device:
67 void tick_install_broadcast_device(struct clock_event_device *dev)
69 struct clock_event_device *cur = tick_broadcast_device.evtdev;
71 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
72 (tick_broadcast_device.evtdev &&
73 tick_broadcast_device.evtdev->rating >= dev->rating) ||
74 (dev->features & CLOCK_EVT_FEAT_C3STOP))
77 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
79 cur->event_handler = clockevents_handle_noop;
80 tick_broadcast_device.evtdev = dev;
81 if (!cpumask_empty(tick_broadcast_mask))
82 tick_broadcast_start_periodic(dev);
84 * Inform all cpus about this. We might be in a situation
85 * where we did not switch to oneshot mode because the per cpu
86 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
87 * of a oneshot capable broadcast device. Without that
88 * notification the systems stays stuck in periodic mode
91 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
96 * Check, if the device is the broadcast device
98 int tick_is_broadcast_device(struct clock_event_device *dev)
100 return (dev && tick_broadcast_device.evtdev == dev);
103 static void err_broadcast(const struct cpumask *mask)
105 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
108 static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
111 dev->broadcast = tick_broadcast;
112 if (!dev->broadcast) {
113 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
115 dev->broadcast = err_broadcast;
120 * Check, if the device is disfunctional and a place holder, which
121 * needs to be handled by the broadcast device.
123 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
128 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
131 * Devices might be registered with both periodic and oneshot
132 * mode disabled. This signals, that the device needs to be
133 * operated from the broadcast device and is a placeholder for
134 * the cpu local device.
136 if (!tick_device_is_functional(dev)) {
137 dev->event_handler = tick_handle_periodic;
138 tick_device_setup_broadcast_func(dev);
139 cpumask_set_cpu(cpu, tick_broadcast_mask);
140 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
144 * When the new device is not affected by the stop
145 * feature and the cpu is marked in the broadcast mask
146 * then clear the broadcast bit.
148 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
149 int cpu = smp_processor_id();
150 cpumask_clear_cpu(cpu, tick_broadcast_mask);
151 tick_broadcast_clear_oneshot(cpu);
153 tick_device_setup_broadcast_func(dev);
156 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
160 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
161 int tick_receive_broadcast(void)
163 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
164 struct clock_event_device *evt = td->evtdev;
169 if (!evt->event_handler)
172 evt->event_handler(evt);
178 * Broadcast the event to the cpus, which are set in the mask (mangled).
180 static void tick_do_broadcast(struct cpumask *mask)
182 int cpu = smp_processor_id();
183 struct tick_device *td;
186 * Check, if the current cpu is in the mask
188 if (cpumask_test_cpu(cpu, mask)) {
189 cpumask_clear_cpu(cpu, mask);
190 td = &per_cpu(tick_cpu_device, cpu);
191 td->evtdev->event_handler(td->evtdev);
194 if (!cpumask_empty(mask)) {
196 * It might be necessary to actually check whether the devices
197 * have different broadcast functions. For now, just use the
198 * one of the first device. This works as long as we have this
199 * misfeature only on x86 (lapic)
201 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
202 td->evtdev->broadcast(mask);
207 * Periodic broadcast:
208 * - invoke the broadcast handlers
210 static void tick_do_periodic_broadcast(void)
212 raw_spin_lock(&tick_broadcast_lock);
214 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
215 tick_do_broadcast(tmpmask);
217 raw_spin_unlock(&tick_broadcast_lock);
221 * Event handler for periodic broadcast ticks
223 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
227 tick_do_periodic_broadcast();
230 * The device is in periodic mode. No reprogramming necessary:
232 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
236 * Setup the next period for devices, which do not have
237 * periodic mode. We read dev->next_event first and add to it
238 * when the event already expired. clockevents_program_event()
239 * sets dev->next_event only when the event is really
240 * programmed to the device.
242 for (next = dev->next_event; ;) {
243 next = ktime_add(next, tick_period);
245 if (!clockevents_program_event(dev, next, false))
247 tick_do_periodic_broadcast();
252 * Powerstate information: The system enters/leaves a state, where
253 * affected devices might stop
255 static void tick_do_broadcast_on_off(unsigned long *reason)
257 struct clock_event_device *bc, *dev;
258 struct tick_device *td;
262 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
264 cpu = smp_processor_id();
265 td = &per_cpu(tick_cpu_device, cpu);
267 bc = tick_broadcast_device.evtdev;
270 * Is the device not affected by the powerstate ?
272 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
275 if (!tick_device_is_functional(dev))
278 bc_stopped = cpumask_empty(tick_broadcast_mask);
281 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
282 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
283 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
284 if (tick_broadcast_device.mode ==
285 TICKDEV_MODE_PERIODIC)
286 clockevents_shutdown(dev);
288 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
289 tick_broadcast_force = 1;
291 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
292 if (!tick_broadcast_force &&
293 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
294 if (tick_broadcast_device.mode ==
295 TICKDEV_MODE_PERIODIC)
296 tick_setup_periodic(dev, 0);
301 if (cpumask_empty(tick_broadcast_mask)) {
303 clockevents_shutdown(bc);
304 } else if (bc_stopped) {
305 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
306 tick_broadcast_start_periodic(bc);
308 tick_broadcast_setup_oneshot(bc);
311 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
315 * Powerstate information: The system enters/leaves a state, where
316 * affected devices might stop.
318 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
320 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
321 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
322 "offline CPU #%d\n", *oncpu);
324 tick_do_broadcast_on_off(&reason);
328 * Set the periodic handler depending on broadcast on/off
330 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
333 dev->event_handler = tick_handle_periodic;
335 dev->event_handler = tick_handle_periodic_broadcast;
339 * Remove a CPU from broadcasting
341 void tick_shutdown_broadcast(unsigned int *cpup)
343 struct clock_event_device *bc;
345 unsigned int cpu = *cpup;
347 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
349 bc = tick_broadcast_device.evtdev;
350 cpumask_clear_cpu(cpu, tick_broadcast_mask);
352 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
353 if (bc && cpumask_empty(tick_broadcast_mask))
354 clockevents_shutdown(bc);
357 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
360 void tick_suspend_broadcast(void)
362 struct clock_event_device *bc;
365 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
367 bc = tick_broadcast_device.evtdev;
369 clockevents_shutdown(bc);
371 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
374 int tick_resume_broadcast(void)
376 struct clock_event_device *bc;
380 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
382 bc = tick_broadcast_device.evtdev;
385 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
387 switch (tick_broadcast_device.mode) {
388 case TICKDEV_MODE_PERIODIC:
389 if (!cpumask_empty(tick_broadcast_mask))
390 tick_broadcast_start_periodic(bc);
391 broadcast = cpumask_test_cpu(smp_processor_id(),
392 tick_broadcast_mask);
394 case TICKDEV_MODE_ONESHOT:
395 if (!cpumask_empty(tick_broadcast_mask))
396 broadcast = tick_resume_broadcast_oneshot(bc);
400 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
406 #ifdef CONFIG_TICK_ONESHOT
408 static cpumask_var_t tick_broadcast_oneshot_mask;
409 static cpumask_var_t tick_broadcast_pending_mask;
410 static cpumask_var_t tick_broadcast_force_mask;
413 * Exposed for debugging: see timer_list.c
415 struct cpumask *tick_get_broadcast_oneshot_mask(void)
417 return tick_broadcast_oneshot_mask;
421 * Called before going idle with interrupts disabled. Checks whether a
422 * broadcast event from the other core is about to happen. We detected
423 * that in tick_broadcast_oneshot_control(). The callsite can use this
424 * to avoid a deep idle transition as we are about to get the
425 * broadcast IPI right away.
427 int tick_check_broadcast_expired(void)
429 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
433 * Set broadcast interrupt affinity
435 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
436 const struct cpumask *cpumask)
438 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
441 if (cpumask_equal(bc->cpumask, cpumask))
444 bc->cpumask = cpumask;
445 irq_set_affinity(bc->irq, bc->cpumask);
448 static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
449 ktime_t expires, int force)
453 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
454 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
456 ret = clockevents_program_event(bc, expires, force);
458 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
462 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
464 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
469 * Called from irq_enter() when idle was interrupted to reenable the
472 void tick_check_oneshot_broadcast(int cpu)
474 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
475 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
477 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
482 * Handle oneshot mode broadcasting
484 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
486 struct tick_device *td;
487 ktime_t now, next_event;
488 int cpu, next_cpu = 0;
490 raw_spin_lock(&tick_broadcast_lock);
492 dev->next_event.tv64 = KTIME_MAX;
493 next_event.tv64 = KTIME_MAX;
494 cpumask_clear(tmpmask);
496 /* Find all expired events */
497 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
498 td = &per_cpu(tick_cpu_device, cpu);
499 if (td->evtdev->next_event.tv64 <= now.tv64) {
500 cpumask_set_cpu(cpu, tmpmask);
502 * Mark the remote cpu in the pending mask, so
503 * it can avoid reprogramming the cpu local
504 * timer in tick_broadcast_oneshot_control().
506 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
507 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
508 next_event.tv64 = td->evtdev->next_event.tv64;
514 * Remove the current cpu from the pending mask. The event is
515 * delivered immediately in tick_do_broadcast() !
517 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
519 /* Take care of enforced broadcast requests */
520 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
521 cpumask_clear(tick_broadcast_force_mask);
524 * Wakeup the cpus which have an expired event.
526 tick_do_broadcast(tmpmask);
529 * Two reasons for reprogram:
531 * - The global event did not expire any CPU local
532 * events. This happens in dyntick mode, as the maximum PIT
533 * delta is quite small.
535 * - There are pending events on sleeping CPUs which were not
538 if (next_event.tv64 != KTIME_MAX) {
540 * Rearm the broadcast device. If event expired,
543 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
546 raw_spin_unlock(&tick_broadcast_lock);
549 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
551 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
553 if (bc->next_event.tv64 == KTIME_MAX)
555 return bc->bound_on == cpu ? -EBUSY : 0;
558 static void broadcast_shutdown_local(struct clock_event_device *bc,
559 struct clock_event_device *dev)
562 * For hrtimer based broadcasting we cannot shutdown the cpu
563 * local device if our own event is the first one to expire or
564 * if we own the broadcast timer.
566 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
567 if (broadcast_needs_cpu(bc, smp_processor_id()))
569 if (dev->next_event.tv64 < bc->next_event.tv64)
572 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
575 static void broadcast_move_bc(int deadcpu)
577 struct clock_event_device *bc = tick_broadcast_device.evtdev;
579 if (!bc || !broadcast_needs_cpu(bc, deadcpu))
581 /* This moves the broadcast assignment to this cpu */
582 clockevents_program_event(bc, bc->next_event, 1);
586 * Powerstate information: The system enters/leaves a state, where
587 * affected devices might stop
588 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
590 int tick_broadcast_oneshot_control(unsigned long reason)
592 struct clock_event_device *bc, *dev;
593 struct tick_device *td;
599 * Periodic mode does not care about the enter/exit of power
602 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
606 * We are called with preemtion disabled from the depth of the
607 * idle code, so we can't be moved away.
609 cpu = smp_processor_id();
610 td = &per_cpu(tick_cpu_device, cpu);
613 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
616 bc = tick_broadcast_device.evtdev;
618 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
619 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
620 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
621 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
622 broadcast_shutdown_local(bc, dev);
624 * We only reprogram the broadcast timer if we
625 * did not mark ourself in the force mask and
626 * if the cpu local event is earlier than the
627 * broadcast event. If the current CPU is in
628 * the force mask, then we are going to be
629 * woken by the IPI right away.
631 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
632 dev->next_event.tv64 < bc->next_event.tv64)
633 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
636 * If the current CPU owns the hrtimer broadcast
637 * mechanism, it cannot go deep idle and we remove the
638 * CPU from the broadcast mask. We don't have to go
639 * through the EXIT path as the local timer is not
642 ret = broadcast_needs_cpu(bc, cpu);
644 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
646 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
647 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
649 * The cpu which was handling the broadcast
650 * timer marked this cpu in the broadcast
651 * pending mask and fired the broadcast
652 * IPI. So we are going to handle the expired
653 * event anyway via the broadcast IPI
654 * handler. No need to reprogram the timer
655 * with an already expired event.
657 if (cpumask_test_and_clear_cpu(cpu,
658 tick_broadcast_pending_mask))
662 * Bail out if there is no next event.
664 if (dev->next_event.tv64 == KTIME_MAX)
667 * If the pending bit is not set, then we are
668 * either the CPU handling the broadcast
669 * interrupt or we got woken by something else.
671 * We are not longer in the broadcast mask, so
672 * if the cpu local expiry time is already
673 * reached, we would reprogram the cpu local
674 * timer with an already expired event.
676 * This can lead to a ping-pong when we return
677 * to idle and therefor rearm the broadcast
678 * timer before the cpu local timer was able
679 * to fire. This happens because the forced
680 * reprogramming makes sure that the event
681 * will happen in the future and depending on
682 * the min_delta setting this might be far
683 * enough out that the ping-pong starts.
685 * If the cpu local next_event has expired
686 * then we know that the broadcast timer
687 * next_event has expired as well and
688 * broadcast is about to be handled. So we
689 * avoid reprogramming and enforce that the
690 * broadcast handler, which did not run yet,
691 * will invoke the cpu local handler.
693 * We cannot call the handler directly from
694 * here, because we might be in a NOHZ phase
695 * and we did not go through the irq_enter()
699 if (dev->next_event.tv64 <= now.tv64) {
700 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
704 * We got woken by something else. Reprogram
705 * the cpu local timer device.
707 tick_program_event(dev->next_event, 1);
711 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
716 * Reset the one shot broadcast for a cpu
718 * Called with tick_broadcast_lock held
720 static void tick_broadcast_clear_oneshot(int cpu)
722 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
725 static void tick_broadcast_init_next_event(struct cpumask *mask,
728 struct tick_device *td;
731 for_each_cpu(cpu, mask) {
732 td = &per_cpu(tick_cpu_device, cpu);
734 td->evtdev->next_event = expires;
739 * tick_broadcast_setup_oneshot - setup the broadcast device
741 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
743 int cpu = smp_processor_id();
745 /* Set it up only once ! */
746 if (bc->event_handler != tick_handle_oneshot_broadcast) {
747 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
749 bc->event_handler = tick_handle_oneshot_broadcast;
752 * We must be careful here. There might be other CPUs
753 * waiting for periodic broadcast. We need to set the
754 * oneshot_mask bits for those and program the
755 * broadcast device to fire.
757 cpumask_copy(tmpmask, tick_broadcast_mask);
758 cpumask_clear_cpu(cpu, tmpmask);
759 cpumask_or(tick_broadcast_oneshot_mask,
760 tick_broadcast_oneshot_mask, tmpmask);
762 if (was_periodic && !cpumask_empty(tmpmask)) {
763 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
764 tick_broadcast_init_next_event(tmpmask,
766 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
768 bc->next_event.tv64 = KTIME_MAX;
771 * The first cpu which switches to oneshot mode sets
772 * the bit for all other cpus which are in the general
773 * (periodic) broadcast mask. So the bit is set and
774 * would prevent the first broadcast enter after this
775 * to program the bc device.
777 tick_broadcast_clear_oneshot(cpu);
782 * Select oneshot operating mode for the broadcast device
784 void tick_broadcast_switch_to_oneshot(void)
786 struct clock_event_device *bc;
789 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
791 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
792 bc = tick_broadcast_device.evtdev;
794 tick_broadcast_setup_oneshot(bc);
796 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
801 * Remove a dead CPU from broadcasting
803 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
806 unsigned int cpu = *cpup;
808 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
811 * Clear the broadcast mask flag for the dead cpu, but do not
812 * stop the broadcast device!
814 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
816 broadcast_move_bc(cpu);
818 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
822 * Check, whether the broadcast device is in one shot mode
824 int tick_broadcast_oneshot_active(void)
826 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
830 * Check whether the broadcast device supports oneshot.
832 bool tick_broadcast_oneshot_available(void)
834 struct clock_event_device *bc = tick_broadcast_device.evtdev;
836 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
841 void __init tick_broadcast_init(void)
843 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
844 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
845 #ifdef CONFIG_TICK_ONESHOT
846 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
847 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
848 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);