2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
57 #include <asm/uaccess.h>
58 #include <asm/system.h>
60 #include <asm/pgtable.h>
62 #include <asm/cache.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
69 #include <asm/firmware.h>
70 #include <asm/lv1call.h>
73 int __irq_offset_value;
74 static int ppc_spurious_interrupts;
77 EXPORT_SYMBOL(__irq_offset_value);
78 atomic_t ppc_n_lost_interrupts;
81 extern int tau_initialized;
82 extern int tau_interrupts(int);
84 #endif /* CONFIG_PPC32 */
87 EXPORT_SYMBOL(irq_desc);
89 int distribute_irqs = 1;
91 static inline notrace unsigned long get_hard_enabled(void)
93 unsigned long enabled;
95 __asm__ __volatile__("lbz %0,%1(13)"
96 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
101 static inline notrace void set_soft_enabled(unsigned long enable)
103 __asm__ __volatile__("stb %0,%1(13)"
104 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
107 notrace void raw_local_irq_restore(unsigned long en)
110 * get_paca()->soft_enabled = en;
111 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
112 * That was allowed before, and in such a case we do need to take care
113 * that gcc will set soft_enabled directly via r13, not choose to use
114 * an intermediate register, lest we're preempted to a different cpu.
116 set_soft_enabled(en);
120 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
122 * Do we need to disable preemption here? Not really: in the
123 * unlikely event that we're preempted to a different cpu in
124 * between getting r13, loading its lppaca_ptr, and loading
125 * its any_int, we might call iseries_handle_interrupts without
126 * an interrupt pending on the new cpu, but that's no disaster,
127 * is it? And the business of preempting us off the old cpu
128 * would itself involve a local_irq_restore which handles the
129 * interrupt to that cpu.
131 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
132 * to avoid any preemption checking added into get_paca().
134 if (local_paca->lppaca_ptr->int_dword.any_int)
135 iseries_handle_interrupts();
139 * if (get_paca()->hard_enabled) return;
140 * But again we need to take care that gcc gets hard_enabled directly
141 * via r13, not choose to use an intermediate register, lest we're
142 * preempted to a different cpu in between the two instructions.
144 if (get_hard_enabled())
148 * Need to hard-enable interrupts here. Since currently disabled,
149 * no need to take further asm precautions against preemption; but
150 * use local_paca instead of get_paca() to avoid preemption checking.
152 local_paca->hard_enabled = en;
153 if ((int)mfspr(SPRN_DEC) < 0)
157 * Force the delivery of pending soft-disabled interrupts on PS3.
158 * Any HV call will have this side effect.
160 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
162 lv1_get_version_info(&tmp);
167 EXPORT_SYMBOL(raw_local_irq_restore);
168 #endif /* CONFIG_PPC64 */
170 int show_interrupts(struct seq_file *p, void *v)
172 int i = *(loff_t *)v, j;
173 struct irqaction *action;
174 struct irq_desc *desc;
179 for_each_online_cpu(j)
180 seq_printf(p, "CPU%d ", j);
185 desc = get_irq_desc(i);
186 spin_lock_irqsave(&desc->lock, flags);
187 action = desc->action;
188 if (!action || !action->handler)
190 seq_printf(p, "%3d: ", i);
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
195 seq_printf(p, "%10u ", kstat_irqs(i));
196 #endif /* CONFIG_SMP */
198 seq_printf(p, " %s ", desc->chip->typename);
200 seq_puts(p, " None ");
201 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
202 seq_printf(p, " %s", action->name);
203 for (action = action->next; action; action = action->next)
204 seq_printf(p, ", %s", action->name);
207 spin_unlock_irqrestore(&desc->lock, flags);
208 } else if (i == NR_IRQS) {
209 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
210 if (tau_initialized){
211 seq_puts(p, "TAU: ");
212 for_each_online_cpu(j)
213 seq_printf(p, "%10u ", tau_interrupts(j));
214 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
216 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
217 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
222 #ifdef CONFIG_HOTPLUG_CPU
223 void fixup_irqs(cpumask_t map)
231 if (irq_desc[irq].status & IRQ_PER_CPU)
234 cpumask_and(&mask, irq_desc[irq].affinity, &map);
235 if (any_online_cpu(mask) == NR_CPUS) {
236 printk("Breaking affinity for irq %i\n", irq);
239 if (irq_desc[irq].chip->set_affinity)
240 irq_desc[irq].chip->set_affinity(irq, &mask);
241 else if (irq_desc[irq].action && !(warned++))
242 printk("Cannot set affinity for irq %i\n", irq);
251 #ifdef CONFIG_IRQSTACKS
252 static inline void handle_one_irq(unsigned int irq)
254 struct thread_info *curtp, *irqtp;
255 unsigned long saved_sp_limit;
256 struct irq_desc *desc;
259 /* Switch to the irq stack to handle this */
260 curtp = current_thread_info();
261 irqtp = hardirq_ctx[smp_processor_id()];
263 if (curtp == irqtp) {
264 /* We're already on the irq stack, just handle it */
265 generic_handle_irq(irq);
269 desc = irq_desc + irq;
270 saved_sp_limit = current->thread.ksp_limit;
272 handler = desc->handle_irq;
276 irqtp->task = curtp->task;
279 /* Copy the softirq bits in preempt_count so that the
280 * softirq checks work in the hardirq context. */
281 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
282 (curtp->preempt_count & SOFTIRQ_MASK);
284 current->thread.ksp_limit = (unsigned long)irqtp +
285 _ALIGN_UP(sizeof(struct thread_info), 16);
287 call_handle_irq(irq, desc, irqtp, handler);
288 current->thread.ksp_limit = saved_sp_limit;
291 /* Set any flag that may have been set on the
295 set_bits(irqtp->flags, &curtp->flags);
298 static inline void handle_one_irq(unsigned int irq)
300 generic_handle_irq(irq);
304 static inline void check_stack_overflow(void)
306 #ifdef CONFIG_DEBUG_STACKOVERFLOW
309 sp = __get_SP() & (THREAD_SIZE-1);
311 /* check for stack overflow: is there less than 2KB free? */
312 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
313 printk("do_IRQ: stack overflow: %ld\n",
314 sp - sizeof(struct thread_info));
320 void do_IRQ(struct pt_regs *regs)
322 struct pt_regs *old_regs = set_irq_regs(regs);
327 check_stack_overflow();
329 irq = ppc_md.get_irq();
331 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
333 else if (irq != NO_IRQ_IGNORE)
334 /* That's not SMP safe ... but who cares ? */
335 ppc_spurious_interrupts++;
338 set_irq_regs(old_regs);
340 #ifdef CONFIG_PPC_ISERIES
341 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
342 get_lppaca()->int_dword.fields.decr_int) {
343 get_lppaca()->int_dword.fields.decr_int = 0;
344 /* Signal a fake decrementer interrupt */
345 timer_interrupt(regs);
350 void __init init_IRQ(void)
360 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
361 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
362 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
363 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
365 void exc_lvl_ctx_init(void)
367 struct thread_info *tp;
370 for_each_possible_cpu(i) {
371 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
374 tp->preempt_count = 0;
377 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
380 tp->preempt_count = 0;
382 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
383 tp = mcheckirq_ctx[i];
385 tp->preempt_count = HARDIRQ_OFFSET;
391 #ifdef CONFIG_IRQSTACKS
392 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
393 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
395 void irq_ctx_init(void)
397 struct thread_info *tp;
400 for_each_possible_cpu(i) {
401 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
404 tp->preempt_count = 0;
406 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
409 tp->preempt_count = HARDIRQ_OFFSET;
413 static inline void do_softirq_onstack(void)
415 struct thread_info *curtp, *irqtp;
416 unsigned long saved_sp_limit = current->thread.ksp_limit;
418 curtp = current_thread_info();
419 irqtp = softirq_ctx[smp_processor_id()];
420 irqtp->task = curtp->task;
421 current->thread.ksp_limit = (unsigned long)irqtp +
422 _ALIGN_UP(sizeof(struct thread_info), 16);
423 call_do_softirq(irqtp);
424 current->thread.ksp_limit = saved_sp_limit;
429 #define do_softirq_onstack() __do_softirq()
430 #endif /* CONFIG_IRQSTACKS */
432 void do_softirq(void)
439 local_irq_save(flags);
441 if (local_softirq_pending())
442 do_softirq_onstack();
444 local_irq_restore(flags);
449 * IRQ controller and virtual interrupts
452 static LIST_HEAD(irq_hosts);
453 static DEFINE_SPINLOCK(irq_big_lock);
454 static unsigned int revmap_trees_allocated;
455 static DEFINE_MUTEX(revmap_trees_mutex);
456 struct irq_map_entry irq_map[NR_IRQS];
457 static unsigned int irq_virq_count = NR_IRQS;
458 static struct irq_host *irq_default_host;
460 irq_hw_number_t virq_to_hw(unsigned int virq)
462 return irq_map[virq].hwirq;
464 EXPORT_SYMBOL_GPL(virq_to_hw);
466 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
468 return h->of_node != NULL && h->of_node == np;
471 struct irq_host *irq_alloc_host(struct device_node *of_node,
472 unsigned int revmap_type,
473 unsigned int revmap_arg,
474 struct irq_host_ops *ops,
475 irq_hw_number_t inval_irq)
477 struct irq_host *host;
478 unsigned int size = sizeof(struct irq_host);
483 /* Allocate structure and revmap table if using linear mapping */
484 if (revmap_type == IRQ_HOST_MAP_LINEAR)
485 size += revmap_arg * sizeof(unsigned int);
486 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
491 host->revmap_type = revmap_type;
492 host->inval_irq = inval_irq;
494 host->of_node = of_node_get(of_node);
496 if (host->ops->match == NULL)
497 host->ops->match = default_irq_host_match;
499 spin_lock_irqsave(&irq_big_lock, flags);
501 /* If it's a legacy controller, check for duplicates and
502 * mark it as allocated (we use irq 0 host pointer for that
504 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
505 if (irq_map[0].host != NULL) {
506 spin_unlock_irqrestore(&irq_big_lock, flags);
507 /* If we are early boot, we can't free the structure,
509 * this will be fixed once slab is made available early
510 * instead of the current cruft
516 irq_map[0].host = host;
519 list_add(&host->link, &irq_hosts);
520 spin_unlock_irqrestore(&irq_big_lock, flags);
522 /* Additional setups per revmap type */
523 switch(revmap_type) {
524 case IRQ_HOST_MAP_LEGACY:
525 /* 0 is always the invalid number for legacy */
527 /* setup us as the host for all legacy interrupts */
528 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
529 irq_map[i].hwirq = i;
531 irq_map[i].host = host;
534 /* Clear norequest flags */
535 get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
537 /* Legacy flags are left to default at this point,
538 * one can then use irq_create_mapping() to
539 * explicitly change them
541 ops->map(host, i, i);
544 case IRQ_HOST_MAP_LINEAR:
545 rmap = (unsigned int *)(host + 1);
546 for (i = 0; i < revmap_arg; i++)
548 host->revmap_data.linear.size = revmap_arg;
550 host->revmap_data.linear.revmap = rmap;
556 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
561 struct irq_host *irq_find_host(struct device_node *node)
563 struct irq_host *h, *found = NULL;
566 /* We might want to match the legacy controller last since
567 * it might potentially be set to match all interrupts in
568 * the absence of a device node. This isn't a problem so far
571 spin_lock_irqsave(&irq_big_lock, flags);
572 list_for_each_entry(h, &irq_hosts, link)
573 if (h->ops->match(h, node)) {
577 spin_unlock_irqrestore(&irq_big_lock, flags);
580 EXPORT_SYMBOL_GPL(irq_find_host);
582 void irq_set_default_host(struct irq_host *host)
584 pr_debug("irq: Default host set to @0x%p\n", host);
586 irq_default_host = host;
589 void irq_set_virq_count(unsigned int count)
591 pr_debug("irq: Trying to set virq count to %d\n", count);
593 BUG_ON(count < NUM_ISA_INTERRUPTS);
595 irq_virq_count = count;
598 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
599 irq_hw_number_t hwirq)
601 /* Clear IRQ_NOREQUEST flag */
602 get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
606 irq_map[virq].hwirq = hwirq;
609 if (host->ops->map(host, virq, hwirq)) {
610 pr_debug("irq: -> mapping failed, freeing\n");
611 irq_free_virt(virq, 1);
618 unsigned int irq_create_direct_mapping(struct irq_host *host)
623 host = irq_default_host;
625 BUG_ON(host == NULL);
626 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
628 virq = irq_alloc_virt(host, 1, 0);
629 if (virq == NO_IRQ) {
630 pr_debug("irq: create_direct virq allocation failed\n");
634 pr_debug("irq: create_direct obtained virq %d\n", virq);
636 if (irq_setup_virq(host, virq, virq))
642 unsigned int irq_create_mapping(struct irq_host *host,
643 irq_hw_number_t hwirq)
645 unsigned int virq, hint;
647 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
649 /* Look for default host if nececssary */
651 host = irq_default_host;
653 printk(KERN_WARNING "irq_create_mapping called for"
654 " NULL host, hwirq=%lx\n", hwirq);
658 pr_debug("irq: -> using host @%p\n", host);
660 /* Check if mapping already exist, if it does, call
661 * host->ops->map() to update the flags
663 virq = irq_find_mapping(host, hwirq);
664 if (virq != NO_IRQ) {
665 if (host->ops->remap)
666 host->ops->remap(host, virq, hwirq);
667 pr_debug("irq: -> existing mapping on virq %d\n", virq);
671 /* Get a virtual interrupt number */
672 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
674 virq = (unsigned int)hwirq;
675 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
679 /* Allocate a virtual interrupt number */
680 hint = hwirq % irq_virq_count;
681 virq = irq_alloc_virt(host, 1, hint);
682 if (virq == NO_IRQ) {
683 pr_debug("irq: -> virq allocation failed\n");
688 if (irq_setup_virq(host, virq, hwirq))
691 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
692 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
696 EXPORT_SYMBOL_GPL(irq_create_mapping);
698 unsigned int irq_create_of_mapping(struct device_node *controller,
699 u32 *intspec, unsigned int intsize)
701 struct irq_host *host;
702 irq_hw_number_t hwirq;
703 unsigned int type = IRQ_TYPE_NONE;
706 if (controller == NULL)
707 host = irq_default_host;
709 host = irq_find_host(controller);
711 printk(KERN_WARNING "irq: no irq host found for %s !\n",
712 controller->full_name);
716 /* If host has no translation, then we assume interrupt line */
717 if (host->ops->xlate == NULL)
720 if (host->ops->xlate(host, controller, intspec, intsize,
726 virq = irq_create_mapping(host, hwirq);
730 /* Set type if specified and different than the current one */
731 if (type != IRQ_TYPE_NONE &&
732 type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
733 set_irq_type(virq, type);
736 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
738 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
742 if (of_irq_map_one(dev, index, &oirq))
745 return irq_create_of_mapping(oirq.controller, oirq.specifier,
748 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
750 void irq_dispose_mapping(unsigned int virq)
752 struct irq_host *host;
753 irq_hw_number_t hwirq;
758 host = irq_map[virq].host;
759 WARN_ON (host == NULL);
763 /* Never unmap legacy interrupts */
764 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
767 /* remove chip and handler */
768 set_irq_chip_and_handler(virq, NULL, NULL);
770 /* Make sure it's completed */
771 synchronize_irq(virq);
773 /* Tell the PIC about it */
774 if (host->ops->unmap)
775 host->ops->unmap(host, virq);
778 /* Clear reverse map */
779 hwirq = irq_map[virq].hwirq;
780 switch(host->revmap_type) {
781 case IRQ_HOST_MAP_LINEAR:
782 if (hwirq < host->revmap_data.linear.size)
783 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
785 case IRQ_HOST_MAP_TREE:
787 * Check if radix tree allocated yet, if not then nothing to
791 if (revmap_trees_allocated < 1)
793 mutex_lock(&revmap_trees_mutex);
794 radix_tree_delete(&host->revmap_data.tree, hwirq);
795 mutex_unlock(&revmap_trees_mutex);
801 irq_map[virq].hwirq = host->inval_irq;
804 get_irq_desc(virq)->status |= IRQ_NOREQUEST;
807 irq_free_virt(virq, 1);
809 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
811 unsigned int irq_find_mapping(struct irq_host *host,
812 irq_hw_number_t hwirq)
815 unsigned int hint = hwirq % irq_virq_count;
817 /* Look for default host if nececssary */
819 host = irq_default_host;
823 /* legacy -> bail early */
824 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
827 /* Slow path does a linear search of the map */
828 if (hint < NUM_ISA_INTERRUPTS)
829 hint = NUM_ISA_INTERRUPTS;
832 if (irq_map[i].host == host &&
833 irq_map[i].hwirq == hwirq)
836 if (i >= irq_virq_count)
837 i = NUM_ISA_INTERRUPTS;
841 EXPORT_SYMBOL_GPL(irq_find_mapping);
844 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
845 irq_hw_number_t hwirq)
847 struct irq_map_entry *ptr;
850 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
853 * Check if the radix tree exists and has bee initialized.
854 * If not, we fallback to slow mode
856 if (revmap_trees_allocated < 2)
857 return irq_find_mapping(host, hwirq);
859 /* Now try to resolve */
861 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
862 * as it's referencing an entry in the static irq_map table.
864 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
867 * If found in radix tree, then fine.
868 * Else fallback to linear lookup - this should not happen in practice
869 * as it means that we failed to insert the node in the radix tree.
872 virq = ptr - irq_map;
874 virq = irq_find_mapping(host, hwirq);
879 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
880 irq_hw_number_t hwirq)
883 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
886 * Check if the radix tree exists yet.
887 * If not, then the irq will be inserted into the tree when it gets
891 if (revmap_trees_allocated < 1)
894 if (virq != NO_IRQ) {
895 mutex_lock(&revmap_trees_mutex);
896 radix_tree_insert(&host->revmap_data.tree, hwirq,
898 mutex_unlock(&revmap_trees_mutex);
902 unsigned int irq_linear_revmap(struct irq_host *host,
903 irq_hw_number_t hwirq)
905 unsigned int *revmap;
907 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
909 /* Check revmap bounds */
910 if (unlikely(hwirq >= host->revmap_data.linear.size))
911 return irq_find_mapping(host, hwirq);
913 /* Check if revmap was allocated */
914 revmap = host->revmap_data.linear.revmap;
915 if (unlikely(revmap == NULL))
916 return irq_find_mapping(host, hwirq);
918 /* Fill up revmap with slow path if no mapping found */
919 if (unlikely(revmap[hwirq] == NO_IRQ))
920 revmap[hwirq] = irq_find_mapping(host, hwirq);
922 return revmap[hwirq];
925 unsigned int irq_alloc_virt(struct irq_host *host,
930 unsigned int i, j, found = NO_IRQ;
932 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
935 spin_lock_irqsave(&irq_big_lock, flags);
937 /* Use hint for 1 interrupt if any */
938 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
939 hint < irq_virq_count && irq_map[hint].host == NULL) {
944 /* Look for count consecutive numbers in the allocatable
947 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
948 if (irq_map[i].host != NULL)
954 found = i - count + 1;
958 if (found == NO_IRQ) {
959 spin_unlock_irqrestore(&irq_big_lock, flags);
963 for (i = found; i < (found + count); i++) {
964 irq_map[i].hwirq = host->inval_irq;
966 irq_map[i].host = host;
968 spin_unlock_irqrestore(&irq_big_lock, flags);
972 void irq_free_virt(unsigned int virq, unsigned int count)
977 WARN_ON (virq < NUM_ISA_INTERRUPTS);
978 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
980 spin_lock_irqsave(&irq_big_lock, flags);
981 for (i = virq; i < (virq + count); i++) {
982 struct irq_host *host;
984 if (i < NUM_ISA_INTERRUPTS ||
985 (virq + count) > irq_virq_count)
988 host = irq_map[i].host;
989 irq_map[i].hwirq = host->inval_irq;
991 irq_map[i].host = NULL;
993 spin_unlock_irqrestore(&irq_big_lock, flags);
996 void irq_early_init(void)
1000 for (i = 0; i < NR_IRQS; i++)
1001 get_irq_desc(i)->status |= IRQ_NOREQUEST;
1004 /* We need to create the radix trees late */
1005 static int irq_late_init(void)
1011 * No mutual exclusion with respect to accessors of the tree is needed
1012 * here as the synchronization is done via the state variable
1013 * revmap_trees_allocated.
1015 list_for_each_entry(h, &irq_hosts, link) {
1016 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1017 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1021 * Make sure the radix trees inits are visible before setting
1025 revmap_trees_allocated = 1;
1028 * Insert the reverse mapping for those interrupts already present
1031 mutex_lock(&revmap_trees_mutex);
1032 for (i = 0; i < irq_virq_count; i++) {
1033 if (irq_map[i].host &&
1034 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1035 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1036 irq_map[i].hwirq, &irq_map[i]);
1038 mutex_unlock(&revmap_trees_mutex);
1041 * Make sure the radix trees insertions are visible before setting
1045 revmap_trees_allocated = 2;
1049 arch_initcall(irq_late_init);
1051 #ifdef CONFIG_VIRQ_DEBUG
1052 static int virq_debug_show(struct seq_file *m, void *private)
1054 unsigned long flags;
1055 struct irq_desc *desc;
1057 char none[] = "none";
1060 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1061 "chip name", "host name");
1063 for (i = 1; i < NR_IRQS; i++) {
1064 desc = get_irq_desc(i);
1065 spin_lock_irqsave(&desc->lock, flags);
1067 if (desc->action && desc->action->handler) {
1068 seq_printf(m, "%5d ", i);
1069 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1071 if (desc->chip && desc->chip->typename)
1072 p = desc->chip->typename;
1075 seq_printf(m, "%-15s ", p);
1077 if (irq_map[i].host && irq_map[i].host->of_node)
1078 p = irq_map[i].host->of_node->full_name;
1081 seq_printf(m, "%s\n", p);
1084 spin_unlock_irqrestore(&desc->lock, flags);
1090 static int virq_debug_open(struct inode *inode, struct file *file)
1092 return single_open(file, virq_debug_show, inode->i_private);
1095 static const struct file_operations virq_debug_fops = {
1096 .open = virq_debug_open,
1098 .llseek = seq_lseek,
1099 .release = single_release,
1102 static int __init irq_debugfs_init(void)
1104 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1105 NULL, &virq_debug_fops) == NULL)
1110 __initcall(irq_debugfs_init);
1111 #endif /* CONFIG_VIRQ_DEBUG */
1114 static int __init setup_noirqdistrib(char *str)
1116 distribute_irqs = 0;
1120 __setup("noirqdistrib", setup_noirqdistrib);
1121 #endif /* CONFIG_PPC64 */