Merge branch 'linus' into irq/core
[firefly-linux-kernel-4.4.55.git] / kernel / irq / manage.c
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/task_work.h>
21
22 #include "internals.h"
23
24 #ifdef CONFIG_IRQ_FORCED_THREADING
25 __read_mostly bool force_irqthreads;
26
27 static int __init setup_forced_irqthreads(char *arg)
28 {
29         force_irqthreads = true;
30         return 0;
31 }
32 early_param("threadirqs", setup_forced_irqthreads);
33 #endif
34
35 static void __synchronize_hardirq(struct irq_desc *desc)
36 {
37         bool inprogress;
38
39         do {
40                 unsigned long flags;
41
42                 /*
43                  * Wait until we're out of the critical section.  This might
44                  * give the wrong answer due to the lack of memory barriers.
45                  */
46                 while (irqd_irq_inprogress(&desc->irq_data))
47                         cpu_relax();
48
49                 /* Ok, that indicated we're done: double-check carefully. */
50                 raw_spin_lock_irqsave(&desc->lock, flags);
51                 inprogress = irqd_irq_inprogress(&desc->irq_data);
52                 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54                 /* Oops, that failed? */
55         } while (inprogress);
56 }
57
58 /**
59  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60  *      @irq: interrupt number to wait for
61  *
62  *      This function waits for any pending hard IRQ handlers for this
63  *      interrupt to complete before returning. If you use this
64  *      function while holding a resource the IRQ handler may need you
65  *      will deadlock. It does not take associated threaded handlers
66  *      into account.
67  *
68  *      Do not use this for shutdown scenarios where you must be sure
69  *      that all parts (hardirq and threaded handler) have completed.
70  *
71  *      This function may be called - with care - from IRQ context.
72  */
73 void synchronize_hardirq(unsigned int irq)
74 {
75         struct irq_desc *desc = irq_to_desc(irq);
76
77         if (desc)
78                 __synchronize_hardirq(desc);
79 }
80 EXPORT_SYMBOL(synchronize_hardirq);
81
82 /**
83  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
84  *      @irq: interrupt number to wait for
85  *
86  *      This function waits for any pending IRQ handlers for this interrupt
87  *      to complete before returning. If you use this function while
88  *      holding a resource the IRQ handler may need you will deadlock.
89  *
90  *      This function may be called - with care - from IRQ context.
91  */
92 void synchronize_irq(unsigned int irq)
93 {
94         struct irq_desc *desc = irq_to_desc(irq);
95
96         if (desc) {
97                 __synchronize_hardirq(desc);
98                 /*
99                  * We made sure that no hardirq handler is
100                  * running. Now verify that no threaded handlers are
101                  * active.
102                  */
103                 wait_event(desc->wait_for_threads,
104                            !atomic_read(&desc->threads_active));
105         }
106 }
107 EXPORT_SYMBOL(synchronize_irq);
108
109 #ifdef CONFIG_SMP
110 cpumask_var_t irq_default_affinity;
111
112 /**
113  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
114  *      @irq:           Interrupt to check
115  *
116  */
117 int irq_can_set_affinity(unsigned int irq)
118 {
119         struct irq_desc *desc = irq_to_desc(irq);
120
121         if (!desc || !irqd_can_balance(&desc->irq_data) ||
122             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123                 return 0;
124
125         return 1;
126 }
127
128 /**
129  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
130  *      @desc:          irq descriptor which has affitnity changed
131  *
132  *      We just set IRQTF_AFFINITY and delegate the affinity setting
133  *      to the interrupt thread itself. We can not call
134  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
135  *      code can be called from hard interrupt context.
136  */
137 void irq_set_thread_affinity(struct irq_desc *desc)
138 {
139         struct irqaction *action = desc->action;
140
141         while (action) {
142                 if (action->thread)
143                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
144                 action = action->next;
145         }
146 }
147
148 #ifdef CONFIG_GENERIC_PENDING_IRQ
149 static inline bool irq_can_move_pcntxt(struct irq_data *data)
150 {
151         return irqd_can_move_in_process_context(data);
152 }
153 static inline bool irq_move_pending(struct irq_data *data)
154 {
155         return irqd_is_setaffinity_pending(data);
156 }
157 static inline void
158 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
159 {
160         cpumask_copy(desc->pending_mask, mask);
161 }
162 static inline void
163 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
164 {
165         cpumask_copy(mask, desc->pending_mask);
166 }
167 #else
168 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
169 static inline bool irq_move_pending(struct irq_data *data) { return false; }
170 static inline void
171 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
172 static inline void
173 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
174 #endif
175
176 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
177                         bool force)
178 {
179         struct irq_desc *desc = irq_data_to_desc(data);
180         struct irq_chip *chip = irq_data_get_irq_chip(data);
181         int ret;
182
183         ret = chip->irq_set_affinity(data, mask, force);
184         switch (ret) {
185         case IRQ_SET_MASK_OK:
186         case IRQ_SET_MASK_OK_DONE:
187                 cpumask_copy(data->affinity, mask);
188         case IRQ_SET_MASK_OK_NOCOPY:
189                 irq_set_thread_affinity(desc);
190                 ret = 0;
191         }
192
193         return ret;
194 }
195
196 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
197                             bool force)
198 {
199         struct irq_chip *chip = irq_data_get_irq_chip(data);
200         struct irq_desc *desc = irq_data_to_desc(data);
201         int ret = 0;
202
203         if (!chip || !chip->irq_set_affinity)
204                 return -EINVAL;
205
206         if (irq_can_move_pcntxt(data)) {
207                 ret = irq_do_set_affinity(data, mask, force);
208         } else {
209                 irqd_set_move_pending(data);
210                 irq_copy_pending(desc, mask);
211         }
212
213         if (desc->affinity_notify) {
214                 kref_get(&desc->affinity_notify->kref);
215                 schedule_work(&desc->affinity_notify->work);
216         }
217         irqd_set(data, IRQD_AFFINITY_SET);
218
219         return ret;
220 }
221
222 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
223 {
224         struct irq_desc *desc = irq_to_desc(irq);
225         unsigned long flags;
226         int ret;
227
228         if (!desc)
229                 return -EINVAL;
230
231         raw_spin_lock_irqsave(&desc->lock, flags);
232         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
233         raw_spin_unlock_irqrestore(&desc->lock, flags);
234         return ret;
235 }
236
237 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
238 {
239         unsigned long flags;
240         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
241
242         if (!desc)
243                 return -EINVAL;
244         desc->affinity_hint = m;
245         irq_put_desc_unlock(desc, flags);
246         /* set the initial affinity to prevent every interrupt being on CPU0 */
247         __irq_set_affinity(irq, m, false);
248         return 0;
249 }
250 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
251
252 static void irq_affinity_notify(struct work_struct *work)
253 {
254         struct irq_affinity_notify *notify =
255                 container_of(work, struct irq_affinity_notify, work);
256         struct irq_desc *desc = irq_to_desc(notify->irq);
257         cpumask_var_t cpumask;
258         unsigned long flags;
259
260         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
261                 goto out;
262
263         raw_spin_lock_irqsave(&desc->lock, flags);
264         if (irq_move_pending(&desc->irq_data))
265                 irq_get_pending(cpumask, desc);
266         else
267                 cpumask_copy(cpumask, desc->irq_data.affinity);
268         raw_spin_unlock_irqrestore(&desc->lock, flags);
269
270         notify->notify(notify, cpumask);
271
272         free_cpumask_var(cpumask);
273 out:
274         kref_put(&notify->kref, notify->release);
275 }
276
277 /**
278  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
279  *      @irq:           Interrupt for which to enable/disable notification
280  *      @notify:        Context for notification, or %NULL to disable
281  *                      notification.  Function pointers must be initialised;
282  *                      the other fields will be initialised by this function.
283  *
284  *      Must be called in process context.  Notification may only be enabled
285  *      after the IRQ is allocated and must be disabled before the IRQ is
286  *      freed using free_irq().
287  */
288 int
289 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
290 {
291         struct irq_desc *desc = irq_to_desc(irq);
292         struct irq_affinity_notify *old_notify;
293         unsigned long flags;
294
295         /* The release function is promised process context */
296         might_sleep();
297
298         if (!desc)
299                 return -EINVAL;
300
301         /* Complete initialisation of *notify */
302         if (notify) {
303                 notify->irq = irq;
304                 kref_init(&notify->kref);
305                 INIT_WORK(&notify->work, irq_affinity_notify);
306         }
307
308         raw_spin_lock_irqsave(&desc->lock, flags);
309         old_notify = desc->affinity_notify;
310         desc->affinity_notify = notify;
311         raw_spin_unlock_irqrestore(&desc->lock, flags);
312
313         if (old_notify)
314                 kref_put(&old_notify->kref, old_notify->release);
315
316         return 0;
317 }
318 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
319
320 #ifndef CONFIG_AUTO_IRQ_AFFINITY
321 /*
322  * Generic version of the affinity autoselector.
323  */
324 static int
325 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
326 {
327         struct cpumask *set = irq_default_affinity;
328         int node = desc->irq_data.node;
329
330         /* Excludes PER_CPU and NO_BALANCE interrupts */
331         if (!irq_can_set_affinity(irq))
332                 return 0;
333
334         /*
335          * Preserve an userspace affinity setup, but make sure that
336          * one of the targets is online.
337          */
338         if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
339                 if (cpumask_intersects(desc->irq_data.affinity,
340                                        cpu_online_mask))
341                         set = desc->irq_data.affinity;
342                 else
343                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
344         }
345
346         cpumask_and(mask, cpu_online_mask, set);
347         if (node != NUMA_NO_NODE) {
348                 const struct cpumask *nodemask = cpumask_of_node(node);
349
350                 /* make sure at least one of the cpus in nodemask is online */
351                 if (cpumask_intersects(mask, nodemask))
352                         cpumask_and(mask, mask, nodemask);
353         }
354         irq_do_set_affinity(&desc->irq_data, mask, false);
355         return 0;
356 }
357 #else
358 static inline int
359 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
360 {
361         return irq_select_affinity(irq);
362 }
363 #endif
364
365 /*
366  * Called when affinity is set via /proc/irq
367  */
368 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
369 {
370         struct irq_desc *desc = irq_to_desc(irq);
371         unsigned long flags;
372         int ret;
373
374         raw_spin_lock_irqsave(&desc->lock, flags);
375         ret = setup_affinity(irq, desc, mask);
376         raw_spin_unlock_irqrestore(&desc->lock, flags);
377         return ret;
378 }
379
380 #else
381 static inline int
382 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
383 {
384         return 0;
385 }
386 #endif
387
388 void __disable_irq(struct irq_desc *desc, unsigned int irq)
389 {
390         if (!desc->depth++)
391                 irq_disable(desc);
392 }
393
394 static int __disable_irq_nosync(unsigned int irq)
395 {
396         unsigned long flags;
397         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
398
399         if (!desc)
400                 return -EINVAL;
401         __disable_irq(desc, irq);
402         irq_put_desc_busunlock(desc, flags);
403         return 0;
404 }
405
406 /**
407  *      disable_irq_nosync - disable an irq without waiting
408  *      @irq: Interrupt to disable
409  *
410  *      Disable the selected interrupt line.  Disables and Enables are
411  *      nested.
412  *      Unlike disable_irq(), this function does not ensure existing
413  *      instances of the IRQ handler have completed before returning.
414  *
415  *      This function may be called from IRQ context.
416  */
417 void disable_irq_nosync(unsigned int irq)
418 {
419         __disable_irq_nosync(irq);
420 }
421 EXPORT_SYMBOL(disable_irq_nosync);
422
423 /**
424  *      disable_irq - disable an irq and wait for completion
425  *      @irq: Interrupt to disable
426  *
427  *      Disable the selected interrupt line.  Enables and Disables are
428  *      nested.
429  *      This function waits for any pending IRQ handlers for this interrupt
430  *      to complete before returning. If you use this function while
431  *      holding a resource the IRQ handler may need you will deadlock.
432  *
433  *      This function may be called - with care - from IRQ context.
434  */
435 void disable_irq(unsigned int irq)
436 {
437         if (!__disable_irq_nosync(irq))
438                 synchronize_irq(irq);
439 }
440 EXPORT_SYMBOL(disable_irq);
441
442 void __enable_irq(struct irq_desc *desc, unsigned int irq)
443 {
444         switch (desc->depth) {
445         case 0:
446  err_out:
447                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
448                 break;
449         case 1: {
450                 if (desc->istate & IRQS_SUSPENDED)
451                         goto err_out;
452                 /* Prevent probing on this irq: */
453                 irq_settings_set_noprobe(desc);
454                 irq_enable(desc);
455                 check_irq_resend(desc, irq);
456                 /* fall-through */
457         }
458         default:
459                 desc->depth--;
460         }
461 }
462
463 /**
464  *      enable_irq - enable handling of an irq
465  *      @irq: Interrupt to enable
466  *
467  *      Undoes the effect of one call to disable_irq().  If this
468  *      matches the last disable, processing of interrupts on this
469  *      IRQ line is re-enabled.
470  *
471  *      This function may be called from IRQ context only when
472  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
473  */
474 void enable_irq(unsigned int irq)
475 {
476         unsigned long flags;
477         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
478
479         if (!desc)
480                 return;
481         if (WARN(!desc->irq_data.chip,
482                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
483                 goto out;
484
485         __enable_irq(desc, irq);
486 out:
487         irq_put_desc_busunlock(desc, flags);
488 }
489 EXPORT_SYMBOL(enable_irq);
490
491 static int set_irq_wake_real(unsigned int irq, unsigned int on)
492 {
493         struct irq_desc *desc = irq_to_desc(irq);
494         int ret = -ENXIO;
495
496         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
497                 return 0;
498
499         if (desc->irq_data.chip->irq_set_wake)
500                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
501
502         return ret;
503 }
504
505 /**
506  *      irq_set_irq_wake - control irq power management wakeup
507  *      @irq:   interrupt to control
508  *      @on:    enable/disable power management wakeup
509  *
510  *      Enable/disable power management wakeup mode, which is
511  *      disabled by default.  Enables and disables must match,
512  *      just as they match for non-wakeup mode support.
513  *
514  *      Wakeup mode lets this IRQ wake the system from sleep
515  *      states like "suspend to RAM".
516  */
517 int irq_set_irq_wake(unsigned int irq, unsigned int on)
518 {
519         unsigned long flags;
520         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
521         int ret = 0;
522
523         if (!desc)
524                 return -EINVAL;
525
526         /* wakeup-capable irqs can be shared between drivers that
527          * don't need to have the same sleep mode behaviors.
528          */
529         if (on) {
530                 if (desc->wake_depth++ == 0) {
531                         ret = set_irq_wake_real(irq, on);
532                         if (ret)
533                                 desc->wake_depth = 0;
534                         else
535                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
536                 }
537         } else {
538                 if (desc->wake_depth == 0) {
539                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
540                 } else if (--desc->wake_depth == 0) {
541                         ret = set_irq_wake_real(irq, on);
542                         if (ret)
543                                 desc->wake_depth = 1;
544                         else
545                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
546                 }
547         }
548         irq_put_desc_busunlock(desc, flags);
549         return ret;
550 }
551 EXPORT_SYMBOL(irq_set_irq_wake);
552
553 /*
554  * Internal function that tells the architecture code whether a
555  * particular irq has been exclusively allocated or is available
556  * for driver use.
557  */
558 int can_request_irq(unsigned int irq, unsigned long irqflags)
559 {
560         unsigned long flags;
561         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
562         int canrequest = 0;
563
564         if (!desc)
565                 return 0;
566
567         if (irq_settings_can_request(desc)) {
568                 if (!desc->action ||
569                     irqflags & desc->action->flags & IRQF_SHARED)
570                         canrequest = 1;
571         }
572         irq_put_desc_unlock(desc, flags);
573         return canrequest;
574 }
575
576 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
577                       unsigned long flags)
578 {
579         struct irq_chip *chip = desc->irq_data.chip;
580         int ret, unmask = 0;
581
582         if (!chip || !chip->irq_set_type) {
583                 /*
584                  * IRQF_TRIGGER_* but the PIC does not support multiple
585                  * flow-types?
586                  */
587                 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
588                          chip ? (chip->name ? : "unknown") : "unknown");
589                 return 0;
590         }
591
592         flags &= IRQ_TYPE_SENSE_MASK;
593
594         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
595                 if (!irqd_irq_masked(&desc->irq_data))
596                         mask_irq(desc);
597                 if (!irqd_irq_disabled(&desc->irq_data))
598                         unmask = 1;
599         }
600
601         /* caller masked out all except trigger mode flags */
602         ret = chip->irq_set_type(&desc->irq_data, flags);
603
604         switch (ret) {
605         case IRQ_SET_MASK_OK:
606         case IRQ_SET_MASK_OK_DONE:
607                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
608                 irqd_set(&desc->irq_data, flags);
609
610         case IRQ_SET_MASK_OK_NOCOPY:
611                 flags = irqd_get_trigger_type(&desc->irq_data);
612                 irq_settings_set_trigger_mask(desc, flags);
613                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
614                 irq_settings_clr_level(desc);
615                 if (flags & IRQ_TYPE_LEVEL_MASK) {
616                         irq_settings_set_level(desc);
617                         irqd_set(&desc->irq_data, IRQD_LEVEL);
618                 }
619
620                 ret = 0;
621                 break;
622         default:
623                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
624                        flags, irq, chip->irq_set_type);
625         }
626         if (unmask)
627                 unmask_irq(desc);
628         return ret;
629 }
630
631 #ifdef CONFIG_HARDIRQS_SW_RESEND
632 int irq_set_parent(int irq, int parent_irq)
633 {
634         unsigned long flags;
635         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
636
637         if (!desc)
638                 return -EINVAL;
639
640         desc->parent_irq = parent_irq;
641
642         irq_put_desc_unlock(desc, flags);
643         return 0;
644 }
645 #endif
646
647 /*
648  * Default primary interrupt handler for threaded interrupts. Is
649  * assigned as primary handler when request_threaded_irq is called
650  * with handler == NULL. Useful for oneshot interrupts.
651  */
652 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
653 {
654         return IRQ_WAKE_THREAD;
655 }
656
657 /*
658  * Primary handler for nested threaded interrupts. Should never be
659  * called.
660  */
661 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
662 {
663         WARN(1, "Primary handler called for nested irq %d\n", irq);
664         return IRQ_NONE;
665 }
666
667 static int irq_wait_for_interrupt(struct irqaction *action)
668 {
669         set_current_state(TASK_INTERRUPTIBLE);
670
671         while (!kthread_should_stop()) {
672
673                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
674                                        &action->thread_flags)) {
675                         __set_current_state(TASK_RUNNING);
676                         return 0;
677                 }
678                 schedule();
679                 set_current_state(TASK_INTERRUPTIBLE);
680         }
681         __set_current_state(TASK_RUNNING);
682         return -1;
683 }
684
685 /*
686  * Oneshot interrupts keep the irq line masked until the threaded
687  * handler finished. unmask if the interrupt has not been disabled and
688  * is marked MASKED.
689  */
690 static void irq_finalize_oneshot(struct irq_desc *desc,
691                                  struct irqaction *action)
692 {
693         if (!(desc->istate & IRQS_ONESHOT))
694                 return;
695 again:
696         chip_bus_lock(desc);
697         raw_spin_lock_irq(&desc->lock);
698
699         /*
700          * Implausible though it may be we need to protect us against
701          * the following scenario:
702          *
703          * The thread is faster done than the hard interrupt handler
704          * on the other CPU. If we unmask the irq line then the
705          * interrupt can come in again and masks the line, leaves due
706          * to IRQS_INPROGRESS and the irq line is masked forever.
707          *
708          * This also serializes the state of shared oneshot handlers
709          * versus "desc->threads_onehsot |= action->thread_mask;" in
710          * irq_wake_thread(). See the comment there which explains the
711          * serialization.
712          */
713         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
714                 raw_spin_unlock_irq(&desc->lock);
715                 chip_bus_sync_unlock(desc);
716                 cpu_relax();
717                 goto again;
718         }
719
720         /*
721          * Now check again, whether the thread should run. Otherwise
722          * we would clear the threads_oneshot bit of this thread which
723          * was just set.
724          */
725         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
726                 goto out_unlock;
727
728         desc->threads_oneshot &= ~action->thread_mask;
729
730         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
731             irqd_irq_masked(&desc->irq_data))
732                 unmask_threaded_irq(desc);
733
734 out_unlock:
735         raw_spin_unlock_irq(&desc->lock);
736         chip_bus_sync_unlock(desc);
737 }
738
739 #ifdef CONFIG_SMP
740 /*
741  * Check whether we need to change the affinity of the interrupt thread.
742  */
743 static void
744 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
745 {
746         cpumask_var_t mask;
747         bool valid = true;
748
749         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
750                 return;
751
752         /*
753          * In case we are out of memory we set IRQTF_AFFINITY again and
754          * try again next time
755          */
756         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
757                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
758                 return;
759         }
760
761         raw_spin_lock_irq(&desc->lock);
762         /*
763          * This code is triggered unconditionally. Check the affinity
764          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
765          */
766         if (desc->irq_data.affinity)
767                 cpumask_copy(mask, desc->irq_data.affinity);
768         else
769                 valid = false;
770         raw_spin_unlock_irq(&desc->lock);
771
772         if (valid)
773                 set_cpus_allowed_ptr(current, mask);
774         free_cpumask_var(mask);
775 }
776 #else
777 static inline void
778 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
779 #endif
780
781 /*
782  * Interrupts which are not explicitely requested as threaded
783  * interrupts rely on the implicit bh/preempt disable of the hard irq
784  * context. So we need to disable bh here to avoid deadlocks and other
785  * side effects.
786  */
787 static irqreturn_t
788 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
789 {
790         irqreturn_t ret;
791
792         local_bh_disable();
793         ret = action->thread_fn(action->irq, action->dev_id);
794         irq_finalize_oneshot(desc, action);
795         local_bh_enable();
796         return ret;
797 }
798
799 /*
800  * Interrupts explicitly requested as threaded interrupts want to be
801  * preemtible - many of them need to sleep and wait for slow busses to
802  * complete.
803  */
804 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
805                 struct irqaction *action)
806 {
807         irqreturn_t ret;
808
809         ret = action->thread_fn(action->irq, action->dev_id);
810         irq_finalize_oneshot(desc, action);
811         return ret;
812 }
813
814 static void wake_threads_waitq(struct irq_desc *desc)
815 {
816         if (atomic_dec_and_test(&desc->threads_active))
817                 wake_up(&desc->wait_for_threads);
818 }
819
820 static void irq_thread_dtor(struct callback_head *unused)
821 {
822         struct task_struct *tsk = current;
823         struct irq_desc *desc;
824         struct irqaction *action;
825
826         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
827                 return;
828
829         action = kthread_data(tsk);
830
831         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
832                tsk->comm, tsk->pid, action->irq);
833
834
835         desc = irq_to_desc(action->irq);
836         /*
837          * If IRQTF_RUNTHREAD is set, we need to decrement
838          * desc->threads_active and wake possible waiters.
839          */
840         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
841                 wake_threads_waitq(desc);
842
843         /* Prevent a stale desc->threads_oneshot */
844         irq_finalize_oneshot(desc, action);
845 }
846
847 /*
848  * Interrupt handler thread
849  */
850 static int irq_thread(void *data)
851 {
852         struct callback_head on_exit_work;
853         struct irqaction *action = data;
854         struct irq_desc *desc = irq_to_desc(action->irq);
855         irqreturn_t (*handler_fn)(struct irq_desc *desc,
856                         struct irqaction *action);
857
858         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
859                                         &action->thread_flags))
860                 handler_fn = irq_forced_thread_fn;
861         else
862                 handler_fn = irq_thread_fn;
863
864         init_task_work(&on_exit_work, irq_thread_dtor);
865         task_work_add(current, &on_exit_work, false);
866
867         irq_thread_check_affinity(desc, action);
868
869         while (!irq_wait_for_interrupt(action)) {
870                 irqreturn_t action_ret;
871
872                 irq_thread_check_affinity(desc, action);
873
874                 action_ret = handler_fn(desc, action);
875                 if (action_ret == IRQ_HANDLED)
876                         atomic_inc(&desc->threads_handled);
877
878                 wake_threads_waitq(desc);
879         }
880
881         /*
882          * This is the regular exit path. __free_irq() is stopping the
883          * thread via kthread_stop() after calling
884          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
885          * oneshot mask bit can be set. We cannot verify that as we
886          * cannot touch the oneshot mask at this point anymore as
887          * __setup_irq() might have given out currents thread_mask
888          * again.
889          */
890         task_work_cancel(current, irq_thread_dtor);
891         return 0;
892 }
893
894 /**
895  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
896  *      @irq:           Interrupt line
897  *      @dev_id:        Device identity for which the thread should be woken
898  *
899  */
900 void irq_wake_thread(unsigned int irq, void *dev_id)
901 {
902         struct irq_desc *desc = irq_to_desc(irq);
903         struct irqaction *action;
904         unsigned long flags;
905
906         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
907                 return;
908
909         raw_spin_lock_irqsave(&desc->lock, flags);
910         for (action = desc->action; action; action = action->next) {
911                 if (action->dev_id == dev_id) {
912                         if (action->thread)
913                                 __irq_wake_thread(desc, action);
914                         break;
915                 }
916         }
917         raw_spin_unlock_irqrestore(&desc->lock, flags);
918 }
919 EXPORT_SYMBOL_GPL(irq_wake_thread);
920
921 static void irq_setup_forced_threading(struct irqaction *new)
922 {
923         if (!force_irqthreads)
924                 return;
925         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
926                 return;
927
928         new->flags |= IRQF_ONESHOT;
929
930         if (!new->thread_fn) {
931                 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
932                 new->thread_fn = new->handler;
933                 new->handler = irq_default_primary_handler;
934         }
935 }
936
937 static int irq_request_resources(struct irq_desc *desc)
938 {
939         struct irq_data *d = &desc->irq_data;
940         struct irq_chip *c = d->chip;
941
942         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
943 }
944
945 static void irq_release_resources(struct irq_desc *desc)
946 {
947         struct irq_data *d = &desc->irq_data;
948         struct irq_chip *c = d->chip;
949
950         if (c->irq_release_resources)
951                 c->irq_release_resources(d);
952 }
953
954 /*
955  * Internal function to register an irqaction - typically used to
956  * allocate special interrupts that are part of the architecture.
957  */
958 static int
959 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
960 {
961         struct irqaction *old, **old_ptr;
962         unsigned long flags, thread_mask = 0;
963         int ret, nested, shared = 0;
964         cpumask_var_t mask;
965
966         if (!desc)
967                 return -EINVAL;
968
969         if (desc->irq_data.chip == &no_irq_chip)
970                 return -ENOSYS;
971         if (!try_module_get(desc->owner))
972                 return -ENODEV;
973
974         /*
975          * Check whether the interrupt nests into another interrupt
976          * thread.
977          */
978         nested = irq_settings_is_nested_thread(desc);
979         if (nested) {
980                 if (!new->thread_fn) {
981                         ret = -EINVAL;
982                         goto out_mput;
983                 }
984                 /*
985                  * Replace the primary handler which was provided from
986                  * the driver for non nested interrupt handling by the
987                  * dummy function which warns when called.
988                  */
989                 new->handler = irq_nested_primary_handler;
990         } else {
991                 if (irq_settings_can_thread(desc))
992                         irq_setup_forced_threading(new);
993         }
994
995         /*
996          * Create a handler thread when a thread function is supplied
997          * and the interrupt does not nest into another interrupt
998          * thread.
999          */
1000         if (new->thread_fn && !nested) {
1001                 struct task_struct *t;
1002                 static const struct sched_param param = {
1003                         .sched_priority = MAX_USER_RT_PRIO/2,
1004                 };
1005
1006                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1007                                    new->name);
1008                 if (IS_ERR(t)) {
1009                         ret = PTR_ERR(t);
1010                         goto out_mput;
1011                 }
1012
1013                 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1014
1015                 /*
1016                  * We keep the reference to the task struct even if
1017                  * the thread dies to avoid that the interrupt code
1018                  * references an already freed task_struct.
1019                  */
1020                 get_task_struct(t);
1021                 new->thread = t;
1022                 /*
1023                  * Tell the thread to set its affinity. This is
1024                  * important for shared interrupt handlers as we do
1025                  * not invoke setup_affinity() for the secondary
1026                  * handlers as everything is already set up. Even for
1027                  * interrupts marked with IRQF_NO_BALANCE this is
1028                  * correct as we want the thread to move to the cpu(s)
1029                  * on which the requesting code placed the interrupt.
1030                  */
1031                 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1032         }
1033
1034         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1035                 ret = -ENOMEM;
1036                 goto out_thread;
1037         }
1038
1039         /*
1040          * Drivers are often written to work w/o knowledge about the
1041          * underlying irq chip implementation, so a request for a
1042          * threaded irq without a primary hard irq context handler
1043          * requires the ONESHOT flag to be set. Some irq chips like
1044          * MSI based interrupts are per se one shot safe. Check the
1045          * chip flags, so we can avoid the unmask dance at the end of
1046          * the threaded handler for those.
1047          */
1048         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1049                 new->flags &= ~IRQF_ONESHOT;
1050
1051         /*
1052          * The following block of code has to be executed atomically
1053          */
1054         raw_spin_lock_irqsave(&desc->lock, flags);
1055         old_ptr = &desc->action;
1056         old = *old_ptr;
1057         if (old) {
1058                 /*
1059                  * Can't share interrupts unless both agree to and are
1060                  * the same type (level, edge, polarity). So both flag
1061                  * fields must have IRQF_SHARED set and the bits which
1062                  * set the trigger type must match. Also all must
1063                  * agree on ONESHOT.
1064                  */
1065                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1066                     ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1067                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1068                         goto mismatch;
1069
1070                 /* All handlers must agree on per-cpuness */
1071                 if ((old->flags & IRQF_PERCPU) !=
1072                     (new->flags & IRQF_PERCPU))
1073                         goto mismatch;
1074
1075                 /* add new interrupt at end of irq queue */
1076                 do {
1077                         /*
1078                          * Or all existing action->thread_mask bits,
1079                          * so we can find the next zero bit for this
1080                          * new action.
1081                          */
1082                         thread_mask |= old->thread_mask;
1083                         old_ptr = &old->next;
1084                         old = *old_ptr;
1085                 } while (old);
1086                 shared = 1;
1087         }
1088
1089         /*
1090          * Setup the thread mask for this irqaction for ONESHOT. For
1091          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1092          * conditional in irq_wake_thread().
1093          */
1094         if (new->flags & IRQF_ONESHOT) {
1095                 /*
1096                  * Unlikely to have 32 resp 64 irqs sharing one line,
1097                  * but who knows.
1098                  */
1099                 if (thread_mask == ~0UL) {
1100                         ret = -EBUSY;
1101                         goto out_mask;
1102                 }
1103                 /*
1104                  * The thread_mask for the action is or'ed to
1105                  * desc->thread_active to indicate that the
1106                  * IRQF_ONESHOT thread handler has been woken, but not
1107                  * yet finished. The bit is cleared when a thread
1108                  * completes. When all threads of a shared interrupt
1109                  * line have completed desc->threads_active becomes
1110                  * zero and the interrupt line is unmasked. See
1111                  * handle.c:irq_wake_thread() for further information.
1112                  *
1113                  * If no thread is woken by primary (hard irq context)
1114                  * interrupt handlers, then desc->threads_active is
1115                  * also checked for zero to unmask the irq line in the
1116                  * affected hard irq flow handlers
1117                  * (handle_[fasteoi|level]_irq).
1118                  *
1119                  * The new action gets the first zero bit of
1120                  * thread_mask assigned. See the loop above which or's
1121                  * all existing action->thread_mask bits.
1122                  */
1123                 new->thread_mask = 1 << ffz(thread_mask);
1124
1125         } else if (new->handler == irq_default_primary_handler &&
1126                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1127                 /*
1128                  * The interrupt was requested with handler = NULL, so
1129                  * we use the default primary handler for it. But it
1130                  * does not have the oneshot flag set. In combination
1131                  * with level interrupts this is deadly, because the
1132                  * default primary handler just wakes the thread, then
1133                  * the irq lines is reenabled, but the device still
1134                  * has the level irq asserted. Rinse and repeat....
1135                  *
1136                  * While this works for edge type interrupts, we play
1137                  * it safe and reject unconditionally because we can't
1138                  * say for sure which type this interrupt really
1139                  * has. The type flags are unreliable as the
1140                  * underlying chip implementation can override them.
1141                  */
1142                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1143                        irq);
1144                 ret = -EINVAL;
1145                 goto out_mask;
1146         }
1147
1148         if (!shared) {
1149                 ret = irq_request_resources(desc);
1150                 if (ret) {
1151                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1152                                new->name, irq, desc->irq_data.chip->name);
1153                         goto out_mask;
1154                 }
1155
1156                 init_waitqueue_head(&desc->wait_for_threads);
1157
1158                 /* Setup the type (level, edge polarity) if configured: */
1159                 if (new->flags & IRQF_TRIGGER_MASK) {
1160                         ret = __irq_set_trigger(desc, irq,
1161                                         new->flags & IRQF_TRIGGER_MASK);
1162
1163                         if (ret)
1164                                 goto out_mask;
1165                 }
1166
1167                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1168                                   IRQS_ONESHOT | IRQS_WAITING);
1169                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1170
1171                 if (new->flags & IRQF_PERCPU) {
1172                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1173                         irq_settings_set_per_cpu(desc);
1174                 }
1175
1176                 if (new->flags & IRQF_ONESHOT)
1177                         desc->istate |= IRQS_ONESHOT;
1178
1179                 if (irq_settings_can_autoenable(desc))
1180                         irq_startup(desc, true);
1181                 else
1182                         /* Undo nested disables: */
1183                         desc->depth = 1;
1184
1185                 /* Exclude IRQ from balancing if requested */
1186                 if (new->flags & IRQF_NOBALANCING) {
1187                         irq_settings_set_no_balancing(desc);
1188                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1189                 }
1190
1191                 /* Set default affinity mask once everything is setup */
1192                 setup_affinity(irq, desc, mask);
1193
1194         } else if (new->flags & IRQF_TRIGGER_MASK) {
1195                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1196                 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1197
1198                 if (nmsk != omsk)
1199                         /* hope the handler works with current  trigger mode */
1200                         pr_warning("irq %d uses trigger mode %u; requested %u\n",
1201                                    irq, nmsk, omsk);
1202         }
1203
1204         new->irq = irq;
1205         *old_ptr = new;
1206
1207         irq_pm_install_action(desc, new);
1208
1209         /* Reset broken irq detection when installing new handler */
1210         desc->irq_count = 0;
1211         desc->irqs_unhandled = 0;
1212
1213         /*
1214          * Check whether we disabled the irq via the spurious handler
1215          * before. Reenable it and give it another chance.
1216          */
1217         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1218                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1219                 __enable_irq(desc, irq);
1220         }
1221
1222         raw_spin_unlock_irqrestore(&desc->lock, flags);
1223
1224         /*
1225          * Strictly no need to wake it up, but hung_task complains
1226          * when no hard interrupt wakes the thread up.
1227          */
1228         if (new->thread)
1229                 wake_up_process(new->thread);
1230
1231         register_irq_proc(irq, desc);
1232         new->dir = NULL;
1233         register_handler_proc(irq, new);
1234         free_cpumask_var(mask);
1235
1236         return 0;
1237
1238 mismatch:
1239         if (!(new->flags & IRQF_PROBE_SHARED)) {
1240                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1241                        irq, new->flags, new->name, old->flags, old->name);
1242 #ifdef CONFIG_DEBUG_SHIRQ
1243                 dump_stack();
1244 #endif
1245         }
1246         ret = -EBUSY;
1247
1248 out_mask:
1249         raw_spin_unlock_irqrestore(&desc->lock, flags);
1250         free_cpumask_var(mask);
1251
1252 out_thread:
1253         if (new->thread) {
1254                 struct task_struct *t = new->thread;
1255
1256                 new->thread = NULL;
1257                 kthread_stop(t);
1258                 put_task_struct(t);
1259         }
1260 out_mput:
1261         module_put(desc->owner);
1262         return ret;
1263 }
1264
1265 /**
1266  *      setup_irq - setup an interrupt
1267  *      @irq: Interrupt line to setup
1268  *      @act: irqaction for the interrupt
1269  *
1270  * Used to statically setup interrupts in the early boot process.
1271  */
1272 int setup_irq(unsigned int irq, struct irqaction *act)
1273 {
1274         int retval;
1275         struct irq_desc *desc = irq_to_desc(irq);
1276
1277         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1278                 return -EINVAL;
1279         chip_bus_lock(desc);
1280         retval = __setup_irq(irq, desc, act);
1281         chip_bus_sync_unlock(desc);
1282
1283         return retval;
1284 }
1285 EXPORT_SYMBOL_GPL(setup_irq);
1286
1287 /*
1288  * Internal function to unregister an irqaction - used to free
1289  * regular and special interrupts that are part of the architecture.
1290  */
1291 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1292 {
1293         struct irq_desc *desc = irq_to_desc(irq);
1294         struct irqaction *action, **action_ptr;
1295         unsigned long flags;
1296
1297         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1298
1299         if (!desc)
1300                 return NULL;
1301
1302         raw_spin_lock_irqsave(&desc->lock, flags);
1303
1304         /*
1305          * There can be multiple actions per IRQ descriptor, find the right
1306          * one based on the dev_id:
1307          */
1308         action_ptr = &desc->action;
1309         for (;;) {
1310                 action = *action_ptr;
1311
1312                 if (!action) {
1313                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1314                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1315
1316                         return NULL;
1317                 }
1318
1319                 if (action->dev_id == dev_id)
1320                         break;
1321                 action_ptr = &action->next;
1322         }
1323
1324         /* Found it - now remove it from the list of entries: */
1325         *action_ptr = action->next;
1326
1327         irq_pm_remove_action(desc, action);
1328
1329         /* If this was the last handler, shut down the IRQ line: */
1330         if (!desc->action) {
1331                 irq_shutdown(desc);
1332                 irq_release_resources(desc);
1333         }
1334
1335 #ifdef CONFIG_SMP
1336         /* make sure affinity_hint is cleaned up */
1337         if (WARN_ON_ONCE(desc->affinity_hint))
1338                 desc->affinity_hint = NULL;
1339 #endif
1340
1341         raw_spin_unlock_irqrestore(&desc->lock, flags);
1342
1343         unregister_handler_proc(irq, action);
1344
1345         /* Make sure it's not being used on another CPU: */
1346         synchronize_irq(irq);
1347
1348 #ifdef CONFIG_DEBUG_SHIRQ
1349         /*
1350          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1351          * event to happen even now it's being freed, so let's make sure that
1352          * is so by doing an extra call to the handler ....
1353          *
1354          * ( We do this after actually deregistering it, to make sure that a
1355          *   'real' IRQ doesn't run in * parallel with our fake. )
1356          */
1357         if (action->flags & IRQF_SHARED) {
1358                 local_irq_save(flags);
1359                 action->handler(irq, dev_id);
1360                 local_irq_restore(flags);
1361         }
1362 #endif
1363
1364         if (action->thread) {
1365                 kthread_stop(action->thread);
1366                 put_task_struct(action->thread);
1367         }
1368
1369         module_put(desc->owner);
1370         return action;
1371 }
1372
1373 /**
1374  *      remove_irq - free an interrupt
1375  *      @irq: Interrupt line to free
1376  *      @act: irqaction for the interrupt
1377  *
1378  * Used to remove interrupts statically setup by the early boot process.
1379  */
1380 void remove_irq(unsigned int irq, struct irqaction *act)
1381 {
1382         struct irq_desc *desc = irq_to_desc(irq);
1383
1384         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1385             __free_irq(irq, act->dev_id);
1386 }
1387 EXPORT_SYMBOL_GPL(remove_irq);
1388
1389 /**
1390  *      free_irq - free an interrupt allocated with request_irq
1391  *      @irq: Interrupt line to free
1392  *      @dev_id: Device identity to free
1393  *
1394  *      Remove an interrupt handler. The handler is removed and if the
1395  *      interrupt line is no longer in use by any driver it is disabled.
1396  *      On a shared IRQ the caller must ensure the interrupt is disabled
1397  *      on the card it drives before calling this function. The function
1398  *      does not return until any executing interrupts for this IRQ
1399  *      have completed.
1400  *
1401  *      This function must not be called from interrupt context.
1402  */
1403 void free_irq(unsigned int irq, void *dev_id)
1404 {
1405         struct irq_desc *desc = irq_to_desc(irq);
1406
1407         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1408                 return;
1409
1410 #ifdef CONFIG_SMP
1411         if (WARN_ON(desc->affinity_notify))
1412                 desc->affinity_notify = NULL;
1413 #endif
1414
1415         chip_bus_lock(desc);
1416         kfree(__free_irq(irq, dev_id));
1417         chip_bus_sync_unlock(desc);
1418 }
1419 EXPORT_SYMBOL(free_irq);
1420
1421 /**
1422  *      request_threaded_irq - allocate an interrupt line
1423  *      @irq: Interrupt line to allocate
1424  *      @handler: Function to be called when the IRQ occurs.
1425  *                Primary handler for threaded interrupts
1426  *                If NULL and thread_fn != NULL the default
1427  *                primary handler is installed
1428  *      @thread_fn: Function called from the irq handler thread
1429  *                  If NULL, no irq thread is created
1430  *      @irqflags: Interrupt type flags
1431  *      @devname: An ascii name for the claiming device
1432  *      @dev_id: A cookie passed back to the handler function
1433  *
1434  *      This call allocates interrupt resources and enables the
1435  *      interrupt line and IRQ handling. From the point this
1436  *      call is made your handler function may be invoked. Since
1437  *      your handler function must clear any interrupt the board
1438  *      raises, you must take care both to initialise your hardware
1439  *      and to set up the interrupt handler in the right order.
1440  *
1441  *      If you want to set up a threaded irq handler for your device
1442  *      then you need to supply @handler and @thread_fn. @handler is
1443  *      still called in hard interrupt context and has to check
1444  *      whether the interrupt originates from the device. If yes it
1445  *      needs to disable the interrupt on the device and return
1446  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1447  *      @thread_fn. This split handler design is necessary to support
1448  *      shared interrupts.
1449  *
1450  *      Dev_id must be globally unique. Normally the address of the
1451  *      device data structure is used as the cookie. Since the handler
1452  *      receives this value it makes sense to use it.
1453  *
1454  *      If your interrupt is shared you must pass a non NULL dev_id
1455  *      as this is required when freeing the interrupt.
1456  *
1457  *      Flags:
1458  *
1459  *      IRQF_SHARED             Interrupt is shared
1460  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1461  *
1462  */
1463 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1464                          irq_handler_t thread_fn, unsigned long irqflags,
1465                          const char *devname, void *dev_id)
1466 {
1467         struct irqaction *action;
1468         struct irq_desc *desc;
1469         int retval;
1470
1471         /*
1472          * Sanity-check: shared interrupts must pass in a real dev-ID,
1473          * otherwise we'll have trouble later trying to figure out
1474          * which interrupt is which (messes up the interrupt freeing
1475          * logic etc).
1476          */
1477         if ((irqflags & IRQF_SHARED) && !dev_id)
1478                 return -EINVAL;
1479
1480         desc = irq_to_desc(irq);
1481         if (!desc)
1482                 return -EINVAL;
1483
1484         if (!irq_settings_can_request(desc) ||
1485             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1486                 return -EINVAL;
1487
1488         if (!handler) {
1489                 if (!thread_fn)
1490                         return -EINVAL;
1491                 handler = irq_default_primary_handler;
1492         }
1493
1494         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1495         if (!action)
1496                 return -ENOMEM;
1497
1498         action->handler = handler;
1499         action->thread_fn = thread_fn;
1500         action->flags = irqflags;
1501         action->name = devname;
1502         action->dev_id = dev_id;
1503
1504         chip_bus_lock(desc);
1505         retval = __setup_irq(irq, desc, action);
1506         chip_bus_sync_unlock(desc);
1507
1508         if (retval)
1509                 kfree(action);
1510
1511 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1512         if (!retval && (irqflags & IRQF_SHARED)) {
1513                 /*
1514                  * It's a shared IRQ -- the driver ought to be prepared for it
1515                  * to happen immediately, so let's make sure....
1516                  * We disable the irq to make sure that a 'real' IRQ doesn't
1517                  * run in parallel with our fake.
1518                  */
1519                 unsigned long flags;
1520
1521                 disable_irq(irq);
1522                 local_irq_save(flags);
1523
1524                 handler(irq, dev_id);
1525
1526                 local_irq_restore(flags);
1527                 enable_irq(irq);
1528         }
1529 #endif
1530         return retval;
1531 }
1532 EXPORT_SYMBOL(request_threaded_irq);
1533
1534 /**
1535  *      request_any_context_irq - allocate an interrupt line
1536  *      @irq: Interrupt line to allocate
1537  *      @handler: Function to be called when the IRQ occurs.
1538  *                Threaded handler for threaded interrupts.
1539  *      @flags: Interrupt type flags
1540  *      @name: An ascii name for the claiming device
1541  *      @dev_id: A cookie passed back to the handler function
1542  *
1543  *      This call allocates interrupt resources and enables the
1544  *      interrupt line and IRQ handling. It selects either a
1545  *      hardirq or threaded handling method depending on the
1546  *      context.
1547  *
1548  *      On failure, it returns a negative value. On success,
1549  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1550  */
1551 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1552                             unsigned long flags, const char *name, void *dev_id)
1553 {
1554         struct irq_desc *desc = irq_to_desc(irq);
1555         int ret;
1556
1557         if (!desc)
1558                 return -EINVAL;
1559
1560         if (irq_settings_is_nested_thread(desc)) {
1561                 ret = request_threaded_irq(irq, NULL, handler,
1562                                            flags, name, dev_id);
1563                 return !ret ? IRQC_IS_NESTED : ret;
1564         }
1565
1566         ret = request_irq(irq, handler, flags, name, dev_id);
1567         return !ret ? IRQC_IS_HARDIRQ : ret;
1568 }
1569 EXPORT_SYMBOL_GPL(request_any_context_irq);
1570
1571 void enable_percpu_irq(unsigned int irq, unsigned int type)
1572 {
1573         unsigned int cpu = smp_processor_id();
1574         unsigned long flags;
1575         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1576
1577         if (!desc)
1578                 return;
1579
1580         type &= IRQ_TYPE_SENSE_MASK;
1581         if (type != IRQ_TYPE_NONE) {
1582                 int ret;
1583
1584                 ret = __irq_set_trigger(desc, irq, type);
1585
1586                 if (ret) {
1587                         WARN(1, "failed to set type for IRQ%d\n", irq);
1588                         goto out;
1589                 }
1590         }
1591
1592         irq_percpu_enable(desc, cpu);
1593 out:
1594         irq_put_desc_unlock(desc, flags);
1595 }
1596 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1597
1598 void disable_percpu_irq(unsigned int irq)
1599 {
1600         unsigned int cpu = smp_processor_id();
1601         unsigned long flags;
1602         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1603
1604         if (!desc)
1605                 return;
1606
1607         irq_percpu_disable(desc, cpu);
1608         irq_put_desc_unlock(desc, flags);
1609 }
1610 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1611
1612 /*
1613  * Internal function to unregister a percpu irqaction.
1614  */
1615 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1616 {
1617         struct irq_desc *desc = irq_to_desc(irq);
1618         struct irqaction *action;
1619         unsigned long flags;
1620
1621         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1622
1623         if (!desc)
1624                 return NULL;
1625
1626         raw_spin_lock_irqsave(&desc->lock, flags);
1627
1628         action = desc->action;
1629         if (!action || action->percpu_dev_id != dev_id) {
1630                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1631                 goto bad;
1632         }
1633
1634         if (!cpumask_empty(desc->percpu_enabled)) {
1635                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1636                      irq, cpumask_first(desc->percpu_enabled));
1637                 goto bad;
1638         }
1639
1640         /* Found it - now remove it from the list of entries: */
1641         desc->action = NULL;
1642
1643         raw_spin_unlock_irqrestore(&desc->lock, flags);
1644
1645         unregister_handler_proc(irq, action);
1646
1647         module_put(desc->owner);
1648         return action;
1649
1650 bad:
1651         raw_spin_unlock_irqrestore(&desc->lock, flags);
1652         return NULL;
1653 }
1654
1655 /**
1656  *      remove_percpu_irq - free a per-cpu interrupt
1657  *      @irq: Interrupt line to free
1658  *      @act: irqaction for the interrupt
1659  *
1660  * Used to remove interrupts statically setup by the early boot process.
1661  */
1662 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1663 {
1664         struct irq_desc *desc = irq_to_desc(irq);
1665
1666         if (desc && irq_settings_is_per_cpu_devid(desc))
1667             __free_percpu_irq(irq, act->percpu_dev_id);
1668 }
1669
1670 /**
1671  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1672  *      @irq: Interrupt line to free
1673  *      @dev_id: Device identity to free
1674  *
1675  *      Remove a percpu interrupt handler. The handler is removed, but
1676  *      the interrupt line is not disabled. This must be done on each
1677  *      CPU before calling this function. The function does not return
1678  *      until any executing interrupts for this IRQ have completed.
1679  *
1680  *      This function must not be called from interrupt context.
1681  */
1682 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1683 {
1684         struct irq_desc *desc = irq_to_desc(irq);
1685
1686         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1687                 return;
1688
1689         chip_bus_lock(desc);
1690         kfree(__free_percpu_irq(irq, dev_id));
1691         chip_bus_sync_unlock(desc);
1692 }
1693
1694 /**
1695  *      setup_percpu_irq - setup a per-cpu interrupt
1696  *      @irq: Interrupt line to setup
1697  *      @act: irqaction for the interrupt
1698  *
1699  * Used to statically setup per-cpu interrupts in the early boot process.
1700  */
1701 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1702 {
1703         struct irq_desc *desc = irq_to_desc(irq);
1704         int retval;
1705
1706         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1707                 return -EINVAL;
1708         chip_bus_lock(desc);
1709         retval = __setup_irq(irq, desc, act);
1710         chip_bus_sync_unlock(desc);
1711
1712         return retval;
1713 }
1714
1715 /**
1716  *      request_percpu_irq - allocate a percpu interrupt line
1717  *      @irq: Interrupt line to allocate
1718  *      @handler: Function to be called when the IRQ occurs.
1719  *      @devname: An ascii name for the claiming device
1720  *      @dev_id: A percpu cookie passed back to the handler function
1721  *
1722  *      This call allocates interrupt resources, but doesn't
1723  *      automatically enable the interrupt. It has to be done on each
1724  *      CPU using enable_percpu_irq().
1725  *
1726  *      Dev_id must be globally unique. It is a per-cpu variable, and
1727  *      the handler gets called with the interrupted CPU's instance of
1728  *      that variable.
1729  */
1730 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1731                        const char *devname, void __percpu *dev_id)
1732 {
1733         struct irqaction *action;
1734         struct irq_desc *desc;
1735         int retval;
1736
1737         if (!dev_id)
1738                 return -EINVAL;
1739
1740         desc = irq_to_desc(irq);
1741         if (!desc || !irq_settings_can_request(desc) ||
1742             !irq_settings_is_per_cpu_devid(desc))
1743                 return -EINVAL;
1744
1745         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1746         if (!action)
1747                 return -ENOMEM;
1748
1749         action->handler = handler;
1750         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1751         action->name = devname;
1752         action->percpu_dev_id = dev_id;
1753
1754         chip_bus_lock(desc);
1755         retval = __setup_irq(irq, desc, action);
1756         chip_bus_sync_unlock(desc);
1757
1758         if (retval)
1759                 kfree(action);
1760
1761         return retval;
1762 }