2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
22 #define CREATE_TRACE_POINTS
23 #include <asm/trace/irq_vectors.h>
25 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
26 EXPORT_PER_CPU_SYMBOL(irq_stat);
28 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
29 EXPORT_PER_CPU_SYMBOL(irq_regs);
31 atomic_t irq_err_count;
33 /* Function pointer for generic interrupt vector handling */
34 void (*x86_platform_ipi_callback)(void) = NULL;
37 * 'what should we do if we get a hw irq event on an illegal vector'.
38 * each architecture has to answer this themselves.
40 void ack_bad_irq(unsigned int irq)
42 if (printk_ratelimit())
43 pr_err("unexpected IRQ trap at vector %02x\n", irq);
46 * Currently unexpected vectors happen only on SMP and APIC.
47 * We _must_ ack these because every local APIC has only N
48 * irq slots per priority level, and a 'hanging, unacked' IRQ
49 * holds up an irq slot - in excessive cases (when multiple
50 * unexpected vectors occur) that might lock up the APIC
52 * But only ack when the APIC is enabled -AK
57 #define irq_stats(x) (&per_cpu(irq_stat, x))
59 * /proc/interrupts printing for arch specific interrupts
61 int arch_show_interrupts(struct seq_file *p, int prec)
65 seq_printf(p, "%*s: ", prec, "NMI");
66 for_each_online_cpu(j)
67 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
68 seq_puts(p, " Non-maskable interrupts\n");
69 #ifdef CONFIG_X86_LOCAL_APIC
70 seq_printf(p, "%*s: ", prec, "LOC");
71 for_each_online_cpu(j)
72 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
73 seq_puts(p, " Local timer interrupts\n");
75 seq_printf(p, "%*s: ", prec, "SPU");
76 for_each_online_cpu(j)
77 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
78 seq_puts(p, " Spurious interrupts\n");
79 seq_printf(p, "%*s: ", prec, "PMI");
80 for_each_online_cpu(j)
81 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
82 seq_puts(p, " Performance monitoring interrupts\n");
83 seq_printf(p, "%*s: ", prec, "IWI");
84 for_each_online_cpu(j)
85 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
86 seq_puts(p, " IRQ work interrupts\n");
87 seq_printf(p, "%*s: ", prec, "RTR");
88 for_each_online_cpu(j)
89 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
90 seq_puts(p, " APIC ICR read retries\n");
92 if (x86_platform_ipi_callback) {
93 seq_printf(p, "%*s: ", prec, "PLT");
94 for_each_online_cpu(j)
95 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
96 seq_puts(p, " Platform interrupts\n");
99 seq_printf(p, "%*s: ", prec, "RES");
100 for_each_online_cpu(j)
101 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
102 seq_puts(p, " Rescheduling interrupts\n");
103 seq_printf(p, "%*s: ", prec, "CAL");
104 for_each_online_cpu(j)
105 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
106 irq_stats(j)->irq_tlb_count);
107 seq_puts(p, " Function call interrupts\n");
108 seq_printf(p, "%*s: ", prec, "TLB");
109 for_each_online_cpu(j)
110 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
111 seq_puts(p, " TLB shootdowns\n");
113 #ifdef CONFIG_X86_THERMAL_VECTOR
114 seq_printf(p, "%*s: ", prec, "TRM");
115 for_each_online_cpu(j)
116 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
117 seq_puts(p, " Thermal event interrupts\n");
119 #ifdef CONFIG_X86_MCE_THRESHOLD
120 seq_printf(p, "%*s: ", prec, "THR");
121 for_each_online_cpu(j)
122 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
123 seq_puts(p, " Threshold APIC interrupts\n");
125 #ifdef CONFIG_X86_MCE_AMD
126 seq_printf(p, "%*s: ", prec, "DFR");
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
129 seq_puts(p, " Deferred Error APIC interrupts\n");
131 #ifdef CONFIG_X86_MCE
132 seq_printf(p, "%*s: ", prec, "MCE");
133 for_each_online_cpu(j)
134 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
135 seq_puts(p, " Machine check exceptions\n");
136 seq_printf(p, "%*s: ", prec, "MCP");
137 for_each_online_cpu(j)
138 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
139 seq_puts(p, " Machine check polls\n");
141 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
142 seq_printf(p, "%*s: ", prec, "HYP");
143 for_each_online_cpu(j)
144 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
145 seq_puts(p, " Hypervisor callback interrupts\n");
147 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
148 #if defined(CONFIG_X86_IO_APIC)
149 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
151 #ifdef CONFIG_HAVE_KVM
152 seq_printf(p, "%*s: ", prec, "PIN");
153 for_each_online_cpu(j)
154 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
155 seq_puts(p, " Posted-interrupt notification event\n");
157 seq_printf(p, "%*s: ", prec, "PIW");
158 for_each_online_cpu(j)
159 seq_printf(p, "%10u ",
160 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
161 seq_puts(p, " Posted-interrupt wakeup event\n");
169 u64 arch_irq_stat_cpu(unsigned int cpu)
171 u64 sum = irq_stats(cpu)->__nmi_count;
173 #ifdef CONFIG_X86_LOCAL_APIC
174 sum += irq_stats(cpu)->apic_timer_irqs;
175 sum += irq_stats(cpu)->irq_spurious_count;
176 sum += irq_stats(cpu)->apic_perf_irqs;
177 sum += irq_stats(cpu)->apic_irq_work_irqs;
178 sum += irq_stats(cpu)->icr_read_retry_count;
180 if (x86_platform_ipi_callback)
181 sum += irq_stats(cpu)->x86_platform_ipis;
183 sum += irq_stats(cpu)->irq_resched_count;
184 sum += irq_stats(cpu)->irq_call_count;
186 #ifdef CONFIG_X86_THERMAL_VECTOR
187 sum += irq_stats(cpu)->irq_thermal_count;
189 #ifdef CONFIG_X86_MCE_THRESHOLD
190 sum += irq_stats(cpu)->irq_threshold_count;
192 #ifdef CONFIG_X86_MCE
193 sum += per_cpu(mce_exception_count, cpu);
194 sum += per_cpu(mce_poll_count, cpu);
199 u64 arch_irq_stat(void)
201 u64 sum = atomic_read(&irq_err_count);
207 * do_IRQ handles all normal device IRQ's (the special
208 * SMP cross-CPU interrupts have their own specific
211 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
213 struct pt_regs *old_regs = set_irq_regs(regs);
215 /* high bit used in ret_from_ code */
216 unsigned vector = ~regs->orig_ax;
221 irq = __this_cpu_read(vector_irq[vector]);
223 if (!handle_irq(irq, regs)) {
226 if (irq != VECTOR_RETRIGGERED) {
227 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
228 __func__, smp_processor_id(),
231 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
237 set_irq_regs(old_regs);
242 * Handler for X86_PLATFORM_IPI_VECTOR.
244 void __smp_x86_platform_ipi(void)
246 inc_irq_stat(x86_platform_ipis);
248 if (x86_platform_ipi_callback)
249 x86_platform_ipi_callback();
252 __visible void smp_x86_platform_ipi(struct pt_regs *regs)
254 struct pt_regs *old_regs = set_irq_regs(regs);
257 __smp_x86_platform_ipi();
259 set_irq_regs(old_regs);
262 #ifdef CONFIG_HAVE_KVM
263 static void dummy_handler(void) {}
264 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
266 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
269 kvm_posted_intr_wakeup_handler = handler;
271 kvm_posted_intr_wakeup_handler = dummy_handler;
273 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
276 * Handler for POSTED_INTERRUPT_VECTOR.
278 __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
280 struct pt_regs *old_regs = set_irq_regs(regs);
283 inc_irq_stat(kvm_posted_intr_ipis);
285 set_irq_regs(old_regs);
289 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
291 __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
293 struct pt_regs *old_regs = set_irq_regs(regs);
296 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
297 kvm_posted_intr_wakeup_handler();
299 set_irq_regs(old_regs);
303 __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
305 struct pt_regs *old_regs = set_irq_regs(regs);
308 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
309 __smp_x86_platform_ipi();
310 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
312 set_irq_regs(old_regs);
315 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
317 #ifdef CONFIG_HOTPLUG_CPU
319 /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
320 * below, which is protected by stop_machine(). Putting them on the stack
321 * results in a stack frame overflow. Dynamically allocating could result in a
322 * failure so declare these two cpumasks as global.
324 static struct cpumask affinity_new, online_new;
327 * This cpu is going to be removed and its vectors migrated to the remaining
328 * online cpus. Check to see if there are enough vectors in the remaining cpus.
329 * This function is protected by stop_machine().
331 int check_irq_vectors_for_cpu_disable(void)
334 unsigned int this_cpu, vector, this_count, count;
335 struct irq_desc *desc;
336 struct irq_data *data;
338 this_cpu = smp_processor_id();
339 cpumask_copy(&online_new, cpu_online_mask);
340 cpumask_clear_cpu(this_cpu, &online_new);
343 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
344 irq = __this_cpu_read(vector_irq[vector]);
346 desc = irq_to_desc(irq);
350 data = irq_desc_get_irq_data(desc);
351 cpumask_copy(&affinity_new, data->affinity);
352 cpumask_clear_cpu(this_cpu, &affinity_new);
354 /* Do not count inactive or per-cpu irqs. */
355 if (!irq_has_action(irq) || irqd_is_per_cpu(data))
359 * A single irq may be mapped to multiple
360 * cpu's vector_irq[] (for example IOAPIC cluster
361 * mode). In this case we have two
364 * 1) the resulting affinity mask is empty; that is
365 * this the down'd cpu is the last cpu in the irq's
368 * 2) the resulting affinity mask is no longer
369 * a subset of the online cpus but the affinity
370 * mask is not zero; that is the down'd cpu is the
371 * last online cpu in a user set affinity mask.
373 if (cpumask_empty(&affinity_new) ||
374 !cpumask_subset(&affinity_new, &online_new))
380 for_each_online_cpu(cpu) {
384 * We scan from FIRST_EXTERNAL_VECTOR to first system
385 * vector. If the vector is marked in the used vectors
386 * bitmap or an irq is assigned to it, we don't count
389 for (vector = FIRST_EXTERNAL_VECTOR;
390 vector < first_system_vector; vector++) {
391 if (!test_bit(vector, used_vectors) &&
392 per_cpu(vector_irq, cpu)[vector] < 0)
397 if (count < this_count) {
398 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
399 this_cpu, this_count, count);
405 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
406 void fixup_irqs(void)
408 unsigned int irq, vector;
410 struct irq_desc *desc;
411 struct irq_data *data;
412 struct irq_chip *chip;
415 for_each_irq_desc(irq, desc) {
416 int break_affinity = 0;
417 int set_affinity = 1;
418 const struct cpumask *affinity;
425 /* interrupt's are disabled at this point */
426 raw_spin_lock(&desc->lock);
428 data = irq_desc_get_irq_data(desc);
429 affinity = data->affinity;
430 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
431 cpumask_subset(affinity, cpu_online_mask)) {
432 raw_spin_unlock(&desc->lock);
437 * Complete the irq move. This cpu is going down and for
438 * non intr-remapping case, we can't wait till this interrupt
439 * arrives at this cpu before completing the irq move.
441 irq_force_complete_move(irq);
443 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
445 affinity = cpu_online_mask;
448 chip = irq_data_get_irq_chip(data);
449 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
450 chip->irq_mask(data);
452 if (chip->irq_set_affinity) {
453 ret = chip->irq_set_affinity(data, affinity, true);
455 pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq);
462 * We unmask if the irq was not marked masked by the
463 * core code. That respects the lazy irq disable
466 if (!irqd_can_move_in_process_context(data) &&
467 !irqd_irq_masked(data) && chip->irq_unmask)
468 chip->irq_unmask(data);
470 raw_spin_unlock(&desc->lock);
472 if (break_affinity && set_affinity)
473 pr_notice("Broke affinity for irq %i\n", irq);
474 else if (!set_affinity)
475 pr_notice("Cannot set affinity for irq %i\n", irq);
479 * We can remove mdelay() and then send spuriuous interrupts to
480 * new cpu targets for all the irqs that were handled previously by
481 * this cpu. While it works, I have seen spurious interrupt messages
482 * (nothing wrong but still...).
484 * So for now, retain mdelay(1) and check the IRR and then send those
485 * interrupts to new targets as this cpu is already offlined...
489 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
492 if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
495 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
496 if (irr & (1 << (vector % 32))) {
497 irq = __this_cpu_read(vector_irq[vector]);
499 desc = irq_to_desc(irq);
500 data = irq_desc_get_irq_data(desc);
501 chip = irq_data_get_irq_chip(data);
502 raw_spin_lock(&desc->lock);
503 if (chip->irq_retrigger) {
504 chip->irq_retrigger(data);
505 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
507 raw_spin_unlock(&desc->lock);
509 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
510 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);