2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
57 #include <linux/of_irq.h>
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
62 #include <asm/pgtable.h>
64 #include <asm/cache.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h>
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80 EXPORT_PER_CPU_SYMBOL(irq_stat);
82 int __irq_offset_value;
85 EXPORT_SYMBOL(__irq_offset_value);
86 atomic_t ppc_n_lost_interrupts;
89 extern int tau_initialized;
90 extern int tau_interrupts(int);
92 #endif /* CONFIG_PPC32 */
96 #ifndef CONFIG_SPARSE_IRQ
97 EXPORT_SYMBOL(irq_desc);
100 int distribute_irqs = 1;
102 static inline notrace unsigned long get_hard_enabled(void)
104 unsigned long enabled;
106 __asm__ __volatile__("lbz %0,%1(13)"
107 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
112 static inline notrace void set_soft_enabled(unsigned long enable)
114 __asm__ __volatile__("stb %0,%1(13)"
115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
118 static inline notrace void decrementer_check_overflow(void)
120 u64 now = get_tb_or_rtc();
124 next_tb = &__get_cpu_var(decrementers_next_tb);
131 notrace void arch_local_irq_restore(unsigned long en)
134 * get_paca()->soft_enabled = en;
135 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
136 * That was allowed before, and in such a case we do need to take care
137 * that gcc will set soft_enabled directly via r13, not choose to use
138 * an intermediate register, lest we're preempted to a different cpu.
140 set_soft_enabled(en);
144 #ifdef CONFIG_PPC_STD_MMU_64
145 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
147 * Do we need to disable preemption here? Not really: in the
148 * unlikely event that we're preempted to a different cpu in
149 * between getting r13, loading its lppaca_ptr, and loading
150 * its any_int, we might call iseries_handle_interrupts without
151 * an interrupt pending on the new cpu, but that's no disaster,
152 * is it? And the business of preempting us off the old cpu
153 * would itself involve a local_irq_restore which handles the
154 * interrupt to that cpu.
156 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
157 * to avoid any preemption checking added into get_paca().
159 if (local_paca->lppaca_ptr->int_dword.any_int)
160 iseries_handle_interrupts();
162 #endif /* CONFIG_PPC_STD_MMU_64 */
165 * if (get_paca()->hard_enabled) return;
166 * But again we need to take care that gcc gets hard_enabled directly
167 * via r13, not choose to use an intermediate register, lest we're
168 * preempted to a different cpu in between the two instructions.
170 if (get_hard_enabled())
174 * Need to hard-enable interrupts here. Since currently disabled,
175 * no need to take further asm precautions against preemption; but
176 * use local_paca instead of get_paca() to avoid preemption checking.
178 local_paca->hard_enabled = en;
181 * Trigger the decrementer if we have a pending event. Some processors
182 * only trigger on edge transitions of the sign bit. We might also
183 * have disabled interrupts long enough that the decrementer wrapped
186 decrementer_check_overflow();
189 * Force the delivery of pending soft-disabled interrupts on PS3.
190 * Any HV call will have this side effect.
192 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
194 lv1_get_version_info(&tmp, &tmp2);
199 EXPORT_SYMBOL(arch_local_irq_restore);
200 #endif /* CONFIG_PPC64 */
202 int arch_show_interrupts(struct seq_file *p, int prec)
206 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
207 if (tau_initialized) {
208 seq_printf(p, "%*s: ", prec, "TAU");
209 for_each_online_cpu(j)
210 seq_printf(p, "%10u ", tau_interrupts(j));
211 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
213 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
215 seq_printf(p, "%*s: ", prec, "LOC");
216 for_each_online_cpu(j)
217 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
218 seq_printf(p, " Local timer interrupts\n");
220 seq_printf(p, "%*s: ", prec, "SPU");
221 for_each_online_cpu(j)
222 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
223 seq_printf(p, " Spurious interrupts\n");
225 seq_printf(p, "%*s: ", prec, "CNT");
226 for_each_online_cpu(j)
227 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
228 seq_printf(p, " Performance monitoring interrupts\n");
230 seq_printf(p, "%*s: ", prec, "MCE");
231 for_each_online_cpu(j)
232 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
233 seq_printf(p, " Machine check exceptions\n");
241 u64 arch_irq_stat_cpu(unsigned int cpu)
243 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
245 sum += per_cpu(irq_stat, cpu).pmu_irqs;
246 sum += per_cpu(irq_stat, cpu).mce_exceptions;
247 sum += per_cpu(irq_stat, cpu).spurious_irqs;
252 #ifdef CONFIG_HOTPLUG_CPU
253 void migrate_irqs(void)
255 struct irq_desc *desc;
259 const struct cpumask *map = cpu_online_mask;
261 alloc_cpumask_var(&mask, GFP_KERNEL);
264 struct irq_data *data;
265 struct irq_chip *chip;
267 desc = irq_to_desc(irq);
271 data = irq_desc_get_irq_data(desc);
272 if (irqd_is_per_cpu(data))
275 chip = irq_data_get_irq_chip(data);
277 cpumask_and(mask, data->affinity, map);
278 if (cpumask_any(mask) >= nr_cpu_ids) {
279 printk("Breaking affinity for irq %i\n", irq);
280 cpumask_copy(mask, map);
282 if (chip->irq_set_affinity)
283 chip->irq_set_affinity(data, mask, true);
284 else if (desc->action && !(warned++))
285 printk("Cannot set affinity for irq %i\n", irq);
288 free_cpumask_var(mask);
296 static inline void handle_one_irq(unsigned int irq)
298 struct thread_info *curtp, *irqtp;
299 unsigned long saved_sp_limit;
300 struct irq_desc *desc;
302 desc = irq_to_desc(irq);
306 /* Switch to the irq stack to handle this */
307 curtp = current_thread_info();
308 irqtp = hardirq_ctx[smp_processor_id()];
310 if (curtp == irqtp) {
311 /* We're already on the irq stack, just handle it */
312 desc->handle_irq(irq, desc);
316 saved_sp_limit = current->thread.ksp_limit;
318 irqtp->task = curtp->task;
321 /* Copy the softirq bits in preempt_count so that the
322 * softirq checks work in the hardirq context. */
323 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
324 (curtp->preempt_count & SOFTIRQ_MASK);
326 current->thread.ksp_limit = (unsigned long)irqtp +
327 _ALIGN_UP(sizeof(struct thread_info), 16);
329 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
330 current->thread.ksp_limit = saved_sp_limit;
333 /* Set any flag that may have been set on the
337 set_bits(irqtp->flags, &curtp->flags);
340 static inline void check_stack_overflow(void)
342 #ifdef CONFIG_DEBUG_STACKOVERFLOW
345 sp = __get_SP() & (THREAD_SIZE-1);
347 /* check for stack overflow: is there less than 2KB free? */
348 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
349 printk("do_IRQ: stack overflow: %ld\n",
350 sp - sizeof(struct thread_info));
356 void do_IRQ(struct pt_regs *regs)
358 struct pt_regs *old_regs = set_irq_regs(regs);
361 trace_irq_entry(regs);
365 check_stack_overflow();
367 irq = ppc_md.get_irq();
369 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
371 else if (irq != NO_IRQ_IGNORE)
372 __get_cpu_var(irq_stat).spurious_irqs++;
375 set_irq_regs(old_regs);
377 #ifdef CONFIG_PPC_ISERIES
378 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
379 get_lppaca()->int_dword.fields.decr_int) {
380 get_lppaca()->int_dword.fields.decr_int = 0;
381 /* Signal a fake decrementer interrupt */
382 timer_interrupt(regs);
386 trace_irq_exit(regs);
389 void __init init_IRQ(void)
399 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
400 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
401 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
402 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
404 void exc_lvl_ctx_init(void)
406 struct thread_info *tp;
409 for_each_possible_cpu(i) {
413 cpu_nr = get_hard_smp_processor_id(i);
415 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
416 tp = critirq_ctx[cpu_nr];
418 tp->preempt_count = 0;
421 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
422 tp = dbgirq_ctx[cpu_nr];
424 tp->preempt_count = 0;
426 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
427 tp = mcheckirq_ctx[cpu_nr];
429 tp->preempt_count = HARDIRQ_OFFSET;
435 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
436 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
438 void irq_ctx_init(void)
440 struct thread_info *tp;
443 for_each_possible_cpu(i) {
444 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
447 tp->preempt_count = 0;
449 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
452 tp->preempt_count = HARDIRQ_OFFSET;
456 static inline void do_softirq_onstack(void)
458 struct thread_info *curtp, *irqtp;
459 unsigned long saved_sp_limit = current->thread.ksp_limit;
461 curtp = current_thread_info();
462 irqtp = softirq_ctx[smp_processor_id()];
463 irqtp->task = curtp->task;
465 current->thread.ksp_limit = (unsigned long)irqtp +
466 _ALIGN_UP(sizeof(struct thread_info), 16);
467 call_do_softirq(irqtp);
468 current->thread.ksp_limit = saved_sp_limit;
471 /* Set any flag that may have been set on the
475 set_bits(irqtp->flags, &curtp->flags);
478 void do_softirq(void)
485 local_irq_save(flags);
487 if (local_softirq_pending())
488 do_softirq_onstack();
490 local_irq_restore(flags);
493 irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
497 EXPORT_SYMBOL_GPL(irqd_to_hwirq);
499 irq_hw_number_t virq_to_hw(unsigned int virq)
501 struct irq_data *irq_data = irq_get_irq_data(virq);
502 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
504 EXPORT_SYMBOL_GPL(virq_to_hw);
507 int irq_choose_cpu(const struct cpumask *mask)
511 if (cpumask_equal(mask, cpu_all_mask)) {
512 static int irq_rover;
513 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
516 /* Round-robin distribution... */
518 raw_spin_lock_irqsave(&irq_rover_lock, flags);
520 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
521 if (irq_rover >= nr_cpu_ids)
522 irq_rover = cpumask_first(cpu_online_mask);
526 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
528 cpuid = cpumask_first_and(mask, cpu_online_mask);
529 if (cpuid >= nr_cpu_ids)
533 return get_hard_smp_processor_id(cpuid);
536 int irq_choose_cpu(const struct cpumask *mask)
538 return hard_smp_processor_id();
542 int arch_early_irq_init(void)
548 static int __init setup_noirqdistrib(char *str)
554 __setup("noirqdistrib", setup_noirqdistrib);
555 #endif /* CONFIG_PPC64 */