1 #ifndef LINUX_HARDIRQ_H
2 #define LINUX_HARDIRQ_H
4 #include <linux/preempt.h>
5 #include <linux/lockdep.h>
6 #include <linux/ftrace_irq.h>
7 #include <linux/vtime.h>
8 #include <asm/hardirq.h>
11 extern void synchronize_irq(unsigned int irq);
12 extern bool synchronize_hardirq(unsigned int irq);
14 #if defined(CONFIG_TINY_RCU)
16 static inline void rcu_nmi_enter(void)
20 static inline void rcu_nmi_exit(void)
25 extern void rcu_nmi_enter(void);
26 extern void rcu_nmi_exit(void);
30 * It is safe to do non-atomic ops on ->hardirq_context,
31 * because NMI handlers may not preempt and the ops are
32 * always balanced, so the interrupted value of ->hardirq_context
33 * will always be restored.
35 #define __irq_enter() \
37 account_irq_enter_time(current); \
38 preempt_count_add(HARDIRQ_OFFSET); \
39 trace_hardirq_enter(); \
43 * Enter irq context (on NO_HZ, update jiffies):
45 extern void irq_enter(void);
48 * Exit irq context without processing softirqs:
50 #define __irq_exit() \
52 trace_hardirq_exit(); \
53 account_irq_exit_time(current); \
54 preempt_count_sub(HARDIRQ_OFFSET); \
58 * Exit irq context and process softirqs if needed:
60 extern void irq_exit(void);
67 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
69 trace_hardirq_enter(); \
74 trace_hardirq_exit(); \
77 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
82 #endif /* LINUX_HARDIRQ_H */