2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43 * DEPRECATED. This flag is a NOOP and scheduled to be removed
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run.
55 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61 #define IRQF_DISABLED 0x00000020
62 #define IRQF_SHARED 0x00000080
63 #define IRQF_PROBE_SHARED 0x00000100
64 #define __IRQF_TIMER 0x00000200
65 #define IRQF_PERCPU 0x00000400
66 #define IRQF_NOBALANCING 0x00000800
67 #define IRQF_IRQPOLL 0x00001000
68 #define IRQF_ONESHOT 0x00002000
69 #define IRQF_NO_SUSPEND 0x00004000
70 #define IRQF_FORCE_RESUME 0x00008000
71 #define IRQF_NO_THREAD 0x00010000
72 #define IRQF_EARLY_RESUME 0x00020000
74 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
77 * These values can be returned by request_any_context_irq() and
78 * describe the context the interrupt will be run in.
80 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
81 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
88 typedef irqreturn_t (*irq_handler_t)(int, void *);
91 * struct irqaction - per interrupt action descriptor
92 * @handler: interrupt handler function
93 * @name: name of the device
94 * @dev_id: cookie to identify the device
95 * @percpu_dev_id: cookie to identify the device
96 * @next: pointer to the next irqaction for shared interrupts
97 * @irq: interrupt number
98 * @flags: flags (see IRQF_* above)
99 * @thread_fn: interrupt handler function for threaded interrupts
100 * @thread: thread pointer for threaded interrupts
101 * @thread_flags: flags related to @thread
102 * @thread_mask: bitmask for keeping track of @thread activity
103 * @dir: pointer to the proc/irq/NN/name entry
106 irq_handler_t handler;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
110 irq_handler_t thread_fn;
111 struct task_struct *thread;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
117 struct proc_dir_entry *dir;
118 } ____cacheline_internodealigned_in_smp;
120 extern irqreturn_t no_action(int cpl, void *dev_id);
122 extern int __must_check
123 request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
127 static inline int __must_check
128 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
134 extern int __must_check
135 request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
138 extern int __must_check
139 request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
142 extern void free_irq(unsigned int, void *);
143 extern void free_percpu_irq(unsigned int, void __percpu *);
147 extern int __must_check
148 devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
153 static inline int __must_check
154 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
161 extern int __must_check
162 devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
166 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
169 * On lockdep we dont want to enable hardirqs in hardirq
170 * context. Use local_irq_enable_in_hardirq() to annotate
171 * kernel code that has to do this nevertheless (pretty much
172 * the only valid case is for old/broken hardware that is
175 * NOTE: in theory this might break fragile code that relies
176 * on hardirq delivery - in practice we dont seem to have such
177 * places left. So the only effect should be slightly increased
178 * irqs-off latencies.
180 #ifdef CONFIG_LOCKDEP
181 # define local_irq_enable_in_hardirq() do { } while (0)
183 # define local_irq_enable_in_hardirq() local_irq_enable()
186 extern void disable_irq_nosync(unsigned int irq);
187 extern void disable_irq(unsigned int irq);
188 extern void disable_percpu_irq(unsigned int irq);
189 extern void enable_irq(unsigned int irq);
190 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191 extern void irq_wake_thread(unsigned int irq, void *dev_id);
193 /* The following three functions are for the core kernel use only. */
194 extern void suspend_device_irqs(void);
195 extern void resume_device_irqs(void);
196 #ifdef CONFIG_PM_SLEEP
197 extern int check_wakeup_irqs(void);
199 static inline int check_wakeup_irqs(void) { return 0; }
202 #if defined(CONFIG_SMP)
204 extern cpumask_var_t irq_default_affinity;
206 /* Internal implementation. Use the helpers below */
207 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
211 * irq_set_affinity - Set the irq affinity of a given irq
212 * @irq: Interrupt to set affinity
215 * Fails if cpumask does not contain an online CPU
218 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
220 return __irq_set_affinity(irq, cpumask, false);
224 * irq_force_affinity - Force the irq affinity of a given irq
225 * @irq: Interrupt to set affinity
228 * Same as irq_set_affinity, but without checking the mask against
231 * Solely for low level cpu hotplug code, where we need to make per
232 * cpu interrupts affine before the cpu becomes online.
235 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
237 return __irq_set_affinity(irq, cpumask, true);
240 extern int irq_can_set_affinity(unsigned int irq);
241 extern int irq_select_affinity(unsigned int irq);
243 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
246 * struct irq_affinity_notify - context for notification of IRQ affinity changes
247 * @irq: Interrupt to which notification applies
248 * @kref: Reference count, for internal use
249 * @work: Work item, for internal use
250 * @notify: Function to be called on change. This will be
251 * called in process context.
252 * @release: Function to be called on release. This will be
253 * called in process context. Once registered, the
254 * structure must only be freed when this function is
257 struct irq_affinity_notify {
260 struct work_struct work;
261 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
262 void (*release)(struct kref *ref);
266 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
268 #else /* CONFIG_SMP */
270 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
275 static inline int irq_can_set_affinity(unsigned int irq)
280 static inline int irq_select_affinity(unsigned int irq) { return 0; }
282 static inline int irq_set_affinity_hint(unsigned int irq,
283 const struct cpumask *m)
287 #endif /* CONFIG_SMP */
290 * Special lockdep variants of irq disabling/enabling.
291 * These should be used for locking constructs that
292 * know that a particular irq context which is disabled,
293 * and which is the only irq-context user of a lock,
294 * that it's safe to take the lock in the irq-disabled
295 * section without disabling hardirqs.
297 * On !CONFIG_LOCKDEP they are equivalent to the normal
298 * irq disable/enable methods.
300 static inline void disable_irq_nosync_lockdep(unsigned int irq)
302 disable_irq_nosync(irq);
303 #ifdef CONFIG_LOCKDEP
308 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
310 disable_irq_nosync(irq);
311 #ifdef CONFIG_LOCKDEP
312 local_irq_save(*flags);
316 static inline void disable_irq_lockdep(unsigned int irq)
319 #ifdef CONFIG_LOCKDEP
324 static inline void enable_irq_lockdep(unsigned int irq)
326 #ifdef CONFIG_LOCKDEP
332 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
334 #ifdef CONFIG_LOCKDEP
335 local_irq_restore(*flags);
340 /* IRQ wakeup (PM) control: */
341 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
343 static inline int enable_irq_wake(unsigned int irq)
345 return irq_set_irq_wake(irq, 1);
348 static inline int disable_irq_wake(unsigned int irq)
350 return irq_set_irq_wake(irq, 0);
354 #ifdef CONFIG_IRQ_FORCED_THREADING
355 extern bool force_irqthreads;
357 #define force_irqthreads (0)
360 #ifndef __ARCH_SET_SOFTIRQ_PENDING
361 #define set_softirq_pending(x) (local_softirq_pending() = (x))
362 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
365 /* Some architectures might implement lazy enabling/disabling of
366 * interrupts. In some cases, such as stop_machine, we might want
367 * to ensure that after a local_irq_disable(), interrupts have
368 * really been disabled in hardware. Such architectures need to
369 * implement the following hook.
371 #ifndef hard_irq_disable
372 #define hard_irq_disable() do { } while(0)
375 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
376 frequency threaded job scheduling. For almost all the purposes
377 tasklets are more than enough. F.e. all serial device BHs et
378 al. should be converted to tasklets, not to softirqs.
388 BLOCK_IOPOLL_SOFTIRQ,
392 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
397 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
399 /* map softirq index to softirq name. update 'softirq_to_name' in
400 * kernel/softirq.c when adding a new softirq.
402 extern const char * const softirq_to_name[NR_SOFTIRQS];
404 /* softirq mask and active fields moved to irq_cpustat_t in
405 * asm/hardirq.h to get better cache usage. KAO
408 struct softirq_action
410 void (*action)(struct softirq_action *);
413 asmlinkage void do_softirq(void);
414 asmlinkage void __do_softirq(void);
416 #ifdef __ARCH_HAS_DO_SOFTIRQ
417 void do_softirq_own_stack(void);
419 static inline void do_softirq_own_stack(void)
425 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
426 extern void softirq_init(void);
427 extern void __raise_softirq_irqoff(unsigned int nr);
429 extern void raise_softirq_irqoff(unsigned int nr);
430 extern void raise_softirq(unsigned int nr);
432 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
434 static inline struct task_struct *this_cpu_ksoftirqd(void)
436 return this_cpu_read(ksoftirqd);
439 /* Tasklets --- multithreaded analogue of BHs.
441 Main feature differing them of generic softirqs: tasklet
442 is running only on one CPU simultaneously.
444 Main feature differing them of BHs: different tasklets
445 may be run simultaneously on different CPUs.
448 * If tasklet_schedule() is called, then tasklet is guaranteed
449 to be executed on some cpu at least once after this.
450 * If the tasklet is already scheduled, but its execution is still not
451 started, it will be executed only once.
452 * If this tasklet is already running on another CPU (or schedule is called
453 from tasklet itself), it is rescheduled for later.
454 * Tasklet is strictly serialized wrt itself, but not
455 wrt another tasklets. If client needs some intertask synchronization,
456 he makes it with spinlocks.
459 struct tasklet_struct
461 struct tasklet_struct *next;
464 void (*func)(unsigned long);
468 #define DECLARE_TASKLET(name, func, data) \
469 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
471 #define DECLARE_TASKLET_DISABLED(name, func, data) \
472 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
477 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
478 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
482 static inline int tasklet_trylock(struct tasklet_struct *t)
484 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
487 static inline void tasklet_unlock(struct tasklet_struct *t)
489 smp_mb__before_clear_bit();
490 clear_bit(TASKLET_STATE_RUN, &(t)->state);
493 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
495 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
498 #define tasklet_trylock(t) 1
499 #define tasklet_unlock_wait(t) do { } while (0)
500 #define tasklet_unlock(t) do { } while (0)
503 extern void __tasklet_schedule(struct tasklet_struct *t);
505 static inline void tasklet_schedule(struct tasklet_struct *t)
507 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
508 __tasklet_schedule(t);
511 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
513 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
515 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
516 __tasklet_hi_schedule(t);
519 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
522 * This version avoids touching any other tasklets. Needed for kmemcheck
523 * in order not to take any page faults while enqueueing this tasklet;
524 * consider VERY carefully whether you really need this or
525 * tasklet_hi_schedule()...
527 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
529 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
530 __tasklet_hi_schedule_first(t);
534 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
536 atomic_inc(&t->count);
537 smp_mb__after_atomic_inc();
540 static inline void tasklet_disable(struct tasklet_struct *t)
542 tasklet_disable_nosync(t);
543 tasklet_unlock_wait(t);
547 static inline void tasklet_enable(struct tasklet_struct *t)
549 smp_mb__before_atomic_dec();
550 atomic_dec(&t->count);
553 static inline void tasklet_hi_enable(struct tasklet_struct *t)
555 smp_mb__before_atomic_dec();
556 atomic_dec(&t->count);
559 extern void tasklet_kill(struct tasklet_struct *t);
560 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
561 extern void tasklet_init(struct tasklet_struct *t,
562 void (*func)(unsigned long), unsigned long data);
564 struct tasklet_hrtimer {
565 struct hrtimer timer;
566 struct tasklet_struct tasklet;
567 enum hrtimer_restart (*function)(struct hrtimer *);
571 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
572 enum hrtimer_restart (*function)(struct hrtimer *),
573 clockid_t which_clock, enum hrtimer_mode mode);
576 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
577 const enum hrtimer_mode mode)
579 return hrtimer_start(&ttimer->timer, time, mode);
583 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
585 hrtimer_cancel(&ttimer->timer);
586 tasklet_kill(&ttimer->tasklet);
590 * Autoprobing for irqs:
592 * probe_irq_on() and probe_irq_off() provide robust primitives
593 * for accurate IRQ probing during kernel initialization. They are
594 * reasonably simple to use, are not "fooled" by spurious interrupts,
595 * and, unlike other attempts at IRQ probing, they do not get hung on
596 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
598 * For reasonably foolproof probing, use them as follows:
600 * 1. clear and/or mask the device's internal interrupt.
602 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
603 * 4. enable the device and cause it to trigger an interrupt.
604 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
605 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
606 * 7. service the device to clear its pending interrupt.
607 * 8. loop again if paranoia is required.
609 * probe_irq_on() returns a mask of allocated irq's.
611 * probe_irq_off() takes the mask as a parameter,
612 * and returns the irq number which occurred,
613 * or zero if none occurred, or a negative irq number
614 * if more than one irq occurred.
617 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
618 static inline unsigned long probe_irq_on(void)
622 static inline int probe_irq_off(unsigned long val)
626 static inline unsigned int probe_irq_mask(unsigned long val)
631 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
632 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
633 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
636 #ifdef CONFIG_PROC_FS
637 /* Initialize /proc/irq/ */
638 extern void init_irq_proc(void);
640 static inline void init_irq_proc(void)
646 int show_interrupts(struct seq_file *p, void *v);
647 int arch_show_interrupts(struct seq_file *p, int prec);
649 extern int early_irq_init(void);
650 extern int arch_probe_nr_irqs(void);
651 extern int arch_early_irq_init(void);