2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
18 #include "internals.h"
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 static struct lock_class_key irq_desc_lock_class;
25 #if defined(CONFIG_SMP)
26 static int __init irq_affinity_setup(char *str)
28 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpulist_parse(str, irq_default_affinity);
31 * Set at least the boot cpu. We don't want to end up with
32 * bugreports caused by random comandline masks
34 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
37 __setup("irqaffinity=", irq_affinity_setup);
39 extern struct cpumask hmp_slow_cpu_mask;
41 static void __init init_irq_default_affinity(void)
43 #ifdef CONFIG_CPUMASK_OFFSTACK
44 if (!irq_default_affinity)
45 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
47 #ifdef CONFIG_SCHED_HMP
48 if (!cpumask_empty(&hmp_slow_cpu_mask)) {
49 cpumask_copy(irq_default_affinity, &hmp_slow_cpu_mask);
53 if (cpumask_empty(irq_default_affinity))
54 cpumask_setall(irq_default_affinity);
57 static void __init init_irq_default_affinity(void)
63 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
65 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
68 #ifdef CONFIG_GENERIC_PENDING_IRQ
69 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
70 free_cpumask_var(desc->irq_data.affinity);
77 static void desc_smp_init(struct irq_desc *desc, int node)
79 desc->irq_data.node = node;
80 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
81 #ifdef CONFIG_GENERIC_PENDING_IRQ
82 cpumask_clear(desc->pending_mask);
86 static inline int desc_node(struct irq_desc *desc)
88 return desc->irq_data.node;
93 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
94 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
95 static inline int desc_node(struct irq_desc *desc) { return 0; }
98 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
103 desc->irq_data.irq = irq;
104 desc->irq_data.chip = &no_irq_chip;
105 desc->irq_data.chip_data = NULL;
106 desc->irq_data.handler_data = NULL;
107 desc->irq_data.msi_desc = NULL;
108 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
109 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
110 desc->handle_irq = handle_bad_irq;
113 desc->irqs_unhandled = 0;
116 for_each_possible_cpu(cpu)
117 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
118 desc_smp_init(desc, node);
121 int nr_irqs = NR_IRQS;
122 EXPORT_SYMBOL_GPL(nr_irqs);
124 static DEFINE_MUTEX(sparse_irq_lock);
125 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
127 #ifdef CONFIG_SPARSE_IRQ
129 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
131 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
133 radix_tree_insert(&irq_desc_tree, irq, desc);
136 struct irq_desc *irq_to_desc(unsigned int irq)
138 return radix_tree_lookup(&irq_desc_tree, irq);
140 EXPORT_SYMBOL(irq_to_desc);
142 static void delete_irq_desc(unsigned int irq)
144 radix_tree_delete(&irq_desc_tree, irq);
148 static void free_masks(struct irq_desc *desc)
150 #ifdef CONFIG_GENERIC_PENDING_IRQ
151 free_cpumask_var(desc->pending_mask);
153 free_cpumask_var(desc->irq_data.affinity);
156 static inline void free_masks(struct irq_desc *desc) { }
159 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
161 struct irq_desc *desc;
162 gfp_t gfp = GFP_KERNEL;
164 desc = kzalloc_node(sizeof(*desc), gfp, node);
167 /* allocate based on nr_cpu_ids */
168 desc->kstat_irqs = alloc_percpu(unsigned int);
169 if (!desc->kstat_irqs)
172 if (alloc_masks(desc, gfp, node))
175 raw_spin_lock_init(&desc->lock);
176 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
178 desc_set_defaults(irq, desc, node, owner);
183 free_percpu(desc->kstat_irqs);
189 static void free_desc(unsigned int irq)
191 struct irq_desc *desc = irq_to_desc(irq);
193 unregister_irq_proc(irq, desc);
195 mutex_lock(&sparse_irq_lock);
196 delete_irq_desc(irq);
197 mutex_unlock(&sparse_irq_lock);
200 free_percpu(desc->kstat_irqs);
204 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
205 struct module *owner)
207 struct irq_desc *desc;
210 for (i = 0; i < cnt; i++) {
211 desc = alloc_desc(start + i, node, owner);
214 mutex_lock(&sparse_irq_lock);
215 irq_insert_desc(start + i, desc);
216 mutex_unlock(&sparse_irq_lock);
221 for (i--; i >= 0; i--)
222 free_desc(start + i);
224 mutex_lock(&sparse_irq_lock);
225 bitmap_clear(allocated_irqs, start, cnt);
226 mutex_unlock(&sparse_irq_lock);
230 static int irq_expand_nr_irqs(unsigned int nr)
232 if (nr > IRQ_BITMAP_BITS)
238 int __init early_irq_init(void)
240 int i, initcnt, node = first_online_node;
241 struct irq_desc *desc;
243 init_irq_default_affinity();
245 /* Let arch update nr_irqs and return the nr of preallocated irqs */
246 initcnt = arch_probe_nr_irqs();
247 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
249 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
250 nr_irqs = IRQ_BITMAP_BITS;
252 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
253 initcnt = IRQ_BITMAP_BITS;
255 if (initcnt > nr_irqs)
258 for (i = 0; i < initcnt; i++) {
259 desc = alloc_desc(i, node, NULL);
260 set_bit(i, allocated_irqs);
261 irq_insert_desc(i, desc);
263 return arch_early_irq_init();
266 #else /* !CONFIG_SPARSE_IRQ */
268 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
269 [0 ... NR_IRQS-1] = {
270 .handle_irq = handle_bad_irq,
272 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
276 int __init early_irq_init(void)
278 int count, i, node = first_online_node;
279 struct irq_desc *desc;
281 init_irq_default_affinity();
283 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
286 count = ARRAY_SIZE(irq_desc);
288 for (i = 0; i < count; i++) {
289 desc[i].kstat_irqs = alloc_percpu(unsigned int);
290 alloc_masks(&desc[i], GFP_KERNEL, node);
291 raw_spin_lock_init(&desc[i].lock);
292 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
293 desc_set_defaults(i, &desc[i], node, NULL);
295 return arch_early_irq_init();
298 struct irq_desc *irq_to_desc(unsigned int irq)
300 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
302 EXPORT_SYMBOL(irq_to_desc);
304 static void free_desc(unsigned int irq)
306 dynamic_irq_cleanup(irq);
309 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
310 struct module *owner)
314 for (i = 0; i < cnt; i++) {
315 struct irq_desc *desc = irq_to_desc(start + i);
322 static int irq_expand_nr_irqs(unsigned int nr)
327 #endif /* !CONFIG_SPARSE_IRQ */
330 * generic_handle_irq - Invoke the handler for a particular irq
331 * @irq: The irq number to handle
334 int generic_handle_irq(unsigned int irq)
336 struct irq_desc *desc = irq_to_desc(irq);
340 generic_handle_irq_desc(irq, desc);
343 EXPORT_SYMBOL_GPL(generic_handle_irq);
345 /* Dynamic interrupt handling */
348 * irq_free_descs - free irq descriptors
349 * @from: Start of descriptor range
350 * @cnt: Number of consecutive irqs to free
352 void irq_free_descs(unsigned int from, unsigned int cnt)
356 if (from >= nr_irqs || (from + cnt) > nr_irqs)
359 for (i = 0; i < cnt; i++)
362 mutex_lock(&sparse_irq_lock);
363 bitmap_clear(allocated_irqs, from, cnt);
364 mutex_unlock(&sparse_irq_lock);
366 EXPORT_SYMBOL_GPL(irq_free_descs);
369 * irq_alloc_descs - allocate and initialize a range of irq descriptors
370 * @irq: Allocate for specific irq number if irq >= 0
371 * @from: Start the search from this irq number
372 * @cnt: Number of consecutive irqs to allocate.
373 * @node: Preferred node on which the irq descriptor should be allocated
374 * @owner: Owning module (can be NULL)
376 * Returns the first irq number or error code
379 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
380 struct module *owner)
393 mutex_lock(&sparse_irq_lock);
395 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
398 if (irq >=0 && start != irq)
401 if (start + cnt > nr_irqs) {
402 ret = irq_expand_nr_irqs(start + cnt);
407 bitmap_set(allocated_irqs, start, cnt);
408 mutex_unlock(&sparse_irq_lock);
409 return alloc_descs(start, cnt, node, owner);
412 mutex_unlock(&sparse_irq_lock);
415 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
418 * irq_reserve_irqs - mark irqs allocated
419 * @from: mark from irq number
420 * @cnt: number of irqs to mark
422 * Returns 0 on success or an appropriate error code
424 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
429 if (!cnt || (from + cnt) > nr_irqs)
432 mutex_lock(&sparse_irq_lock);
433 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
435 bitmap_set(allocated_irqs, start, cnt);
438 mutex_unlock(&sparse_irq_lock);
443 * irq_get_next_irq - get next allocated irq number
444 * @offset: where to start the search
446 * Returns next irq number after offset or nr_irqs if none is found.
448 unsigned int irq_get_next_irq(unsigned int offset)
450 return find_next_bit(allocated_irqs, nr_irqs, offset);
454 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
457 struct irq_desc *desc = irq_to_desc(irq);
460 if (check & _IRQ_DESC_CHECK) {
461 if ((check & _IRQ_DESC_PERCPU) &&
462 !irq_settings_is_per_cpu_devid(desc))
465 if (!(check & _IRQ_DESC_PERCPU) &&
466 irq_settings_is_per_cpu_devid(desc))
472 raw_spin_lock_irqsave(&desc->lock, *flags);
477 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
479 raw_spin_unlock_irqrestore(&desc->lock, flags);
481 chip_bus_sync_unlock(desc);
484 int irq_set_percpu_devid(unsigned int irq)
486 struct irq_desc *desc = irq_to_desc(irq);
491 if (desc->percpu_enabled)
494 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
496 if (!desc->percpu_enabled)
499 irq_set_percpu_devid_flags(irq);
504 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
505 * @irq: irq number to initialize
507 void dynamic_irq_cleanup(unsigned int irq)
509 struct irq_desc *desc = irq_to_desc(irq);
512 raw_spin_lock_irqsave(&desc->lock, flags);
513 desc_set_defaults(irq, desc, desc_node(desc), NULL);
514 raw_spin_unlock_irqrestore(&desc->lock, flags);
517 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
519 struct irq_desc *desc = irq_to_desc(irq);
521 return desc && desc->kstat_irqs ?
522 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
525 unsigned int kstat_irqs(unsigned int irq)
527 struct irq_desc *desc = irq_to_desc(irq);
531 if (!desc || !desc->kstat_irqs)
533 for_each_possible_cpu(cpu)
534 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);