2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct apic_chip_data {
27 cpumask_var_t old_domain;
28 u8 move_in_progress : 1;
31 struct irq_domain *x86_vector_domain;
32 static DEFINE_RAW_SPINLOCK(vector_lock);
33 static cpumask_var_t vector_cpumask;
34 static struct irq_chip lapic_controller;
35 #ifdef CONFIG_X86_IO_APIC
36 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
39 void lock_vector_lock(void)
41 /* Used to the online set of cpus does not change
42 * during assign_irq_vector.
44 raw_spin_lock(&vector_lock);
47 void unlock_vector_lock(void)
49 raw_spin_unlock(&vector_lock);
52 static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
57 while (irq_data->parent_data)
58 irq_data = irq_data->parent_data;
60 return irq_data->chip_data;
63 struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
65 struct apic_chip_data *data = apic_chip_data(irq_data);
67 return data ? &data->cfg : NULL;
70 struct irq_cfg *irq_cfg(unsigned int irq)
72 return irqd_cfg(irq_get_irq_data(irq));
75 static struct apic_chip_data *alloc_apic_chip_data(int node)
77 struct apic_chip_data *data;
79 data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
82 if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
84 if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
88 free_cpumask_var(data->domain);
94 static void free_apic_chip_data(struct apic_chip_data *data)
97 free_cpumask_var(data->domain);
98 free_cpumask_var(data->old_domain);
103 static int __assign_irq_vector(int irq, struct apic_chip_data *d,
104 const struct cpumask *mask)
107 * NOTE! The local APIC isn't very good at handling
108 * multiple interrupts at the same interrupt level.
109 * As the interrupt level is determined by taking the
110 * vector number and shifting that right by 4, we
111 * want to spread these out a bit so that they don't
112 * all fall in the same interrupt level.
114 * Also, we've got to be careful not to trash gate
115 * 0x80, because int 0x80 is hm, kind of importantish. ;)
117 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
118 static int current_offset = VECTOR_OFFSET_START % 16;
121 if (d->move_in_progress)
124 /* Only try and allocate irqs on cpus that are present */
126 cpumask_clear(d->old_domain);
127 cpu = cpumask_first_and(mask, cpu_online_mask);
128 while (cpu < nr_cpu_ids) {
129 int new_cpu, vector, offset;
131 apic->vector_allocation_domain(cpu, vector_cpumask, mask);
133 if (cpumask_subset(vector_cpumask, d->domain)) {
135 if (cpumask_equal(vector_cpumask, d->domain))
138 * New cpumask using the vector is a proper subset of
139 * the current in use mask. So cleanup the vector
140 * allocation for the members that are not used anymore.
142 cpumask_andnot(d->old_domain, d->domain,
144 d->move_in_progress =
145 cpumask_intersects(d->old_domain, cpu_online_mask);
146 cpumask_and(d->domain, d->domain, vector_cpumask);
150 vector = current_vector;
151 offset = current_offset;
154 if (vector >= first_system_vector) {
155 offset = (offset + 1) % 16;
156 vector = FIRST_EXTERNAL_VECTOR + offset;
159 if (unlikely(current_vector == vector)) {
160 cpumask_or(d->old_domain, d->old_domain,
162 cpumask_andnot(vector_cpumask, mask, d->old_domain);
163 cpu = cpumask_first_and(vector_cpumask,
168 if (test_bit(vector, used_vectors))
171 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
172 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
176 current_vector = vector;
177 current_offset = offset;
179 cpumask_copy(d->old_domain, d->domain);
180 d->move_in_progress =
181 cpumask_intersects(d->old_domain, cpu_online_mask);
183 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
184 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
185 d->cfg.vector = vector;
186 cpumask_copy(d->domain, vector_cpumask);
192 /* cache destination APIC IDs into cfg->dest_apicid */
193 err = apic->cpu_mask_to_apicid_and(mask, d->domain,
194 &d->cfg.dest_apicid);
200 static int assign_irq_vector(int irq, struct apic_chip_data *data,
201 const struct cpumask *mask)
206 raw_spin_lock_irqsave(&vector_lock, flags);
207 err = __assign_irq_vector(irq, data, mask);
208 raw_spin_unlock_irqrestore(&vector_lock, flags);
212 static int assign_irq_vector_policy(int irq, int node,
213 struct apic_chip_data *data,
214 struct irq_alloc_info *info)
216 if (info && info->mask)
217 return assign_irq_vector(irq, data, info->mask);
218 if (node != NUMA_NO_NODE &&
219 assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
221 return assign_irq_vector(irq, data, apic->target_cpus());
224 static void clear_irq_vector(int irq, struct apic_chip_data *data)
226 struct irq_desc *desc;
230 raw_spin_lock_irqsave(&vector_lock, flags);
231 BUG_ON(!data->cfg.vector);
233 vector = data->cfg.vector;
234 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
235 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
237 data->cfg.vector = 0;
238 cpumask_clear(data->domain);
240 if (likely(!data->move_in_progress)) {
241 raw_spin_unlock_irqrestore(&vector_lock, flags);
245 desc = irq_to_desc(irq);
246 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
247 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
249 if (per_cpu(vector_irq, cpu)[vector] != desc)
251 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
255 data->move_in_progress = 0;
256 raw_spin_unlock_irqrestore(&vector_lock, flags);
259 void init_irq_alloc_info(struct irq_alloc_info *info,
260 const struct cpumask *mask)
262 memset(info, 0, sizeof(*info));
266 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
271 memset(dst, 0, sizeof(*dst));
274 static void x86_vector_free_irqs(struct irq_domain *domain,
275 unsigned int virq, unsigned int nr_irqs)
277 struct irq_data *irq_data;
280 for (i = 0; i < nr_irqs; i++) {
281 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
282 if (irq_data && irq_data->chip_data) {
283 clear_irq_vector(virq + i, irq_data->chip_data);
284 free_apic_chip_data(irq_data->chip_data);
285 #ifdef CONFIG_X86_IO_APIC
286 if (virq + i < nr_legacy_irqs())
287 legacy_irq_data[virq + i] = NULL;
289 irq_domain_reset_irq_data(irq_data);
294 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
295 unsigned int nr_irqs, void *arg)
297 struct irq_alloc_info *info = arg;
298 struct apic_chip_data *data;
299 struct irq_data *irq_data;
305 /* Currently vector allocator can't guarantee contiguous allocations */
306 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
309 for (i = 0; i < nr_irqs; i++) {
310 irq_data = irq_domain_get_irq_data(domain, virq + i);
312 node = irq_data_get_node(irq_data);
313 #ifdef CONFIG_X86_IO_APIC
314 if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
315 data = legacy_irq_data[virq + i];
318 data = alloc_apic_chip_data(node);
324 irq_data->chip = &lapic_controller;
325 irq_data->chip_data = data;
326 irq_data->hwirq = virq + i;
327 err = assign_irq_vector_policy(virq + i, node, data, info);
335 x86_vector_free_irqs(domain, virq, i + 1);
339 static const struct irq_domain_ops x86_vector_domain_ops = {
340 .alloc = x86_vector_alloc_irqs,
341 .free = x86_vector_free_irqs,
344 int __init arch_probe_nr_irqs(void)
348 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
349 nr_irqs = NR_VECTORS * nr_cpu_ids;
351 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
352 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
354 * for MSI and HT dyn irq
356 if (gsi_top <= NR_IRQS_LEGACY)
357 nr += 8 * nr_cpu_ids;
364 return nr_legacy_irqs();
367 #ifdef CONFIG_X86_IO_APIC
368 static void init_legacy_irqs(void)
370 int i, node = cpu_to_node(0);
371 struct apic_chip_data *data;
374 * For legacy IRQ's, start with assigning irq0 to irq15 to
375 * ISA_IRQ_VECTOR(i) for all cpu's.
377 for (i = 0; i < nr_legacy_irqs(); i++) {
378 data = legacy_irq_data[i] = alloc_apic_chip_data(node);
381 data->cfg.vector = ISA_IRQ_VECTOR(i);
382 cpumask_setall(data->domain);
383 irq_set_chip_data(i, data);
387 static void init_legacy_irqs(void) { }
390 int __init arch_early_irq_init(void)
394 x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
396 BUG_ON(x86_vector_domain == NULL);
397 irq_set_default_host(x86_vector_domain);
399 arch_init_msi_domain(x86_vector_domain);
400 arch_init_htirq_domain(x86_vector_domain);
402 BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
404 return arch_early_ioapic_init();
407 /* Initialize vector_irq on a new cpu */
408 static void __setup_vector_irq(int cpu)
410 struct apic_chip_data *data;
411 struct irq_desc *desc;
414 /* Mark the inuse vectors */
415 for_each_irq_desc(irq, desc) {
416 struct irq_data *idata = irq_desc_get_irq_data(desc);
418 data = apic_chip_data(idata);
419 if (!data || !cpumask_test_cpu(cpu, data->domain))
421 vector = data->cfg.vector;
422 per_cpu(vector_irq, cpu)[vector] = desc;
424 /* Mark the free vectors */
425 for (vector = 0; vector < NR_VECTORS; ++vector) {
426 desc = per_cpu(vector_irq, cpu)[vector];
427 if (IS_ERR_OR_NULL(desc))
430 data = apic_chip_data(irq_desc_get_irq_data(desc));
431 if (!cpumask_test_cpu(cpu, data->domain))
432 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
437 * Setup the vector to irq mappings. Must be called with vector_lock held.
439 void setup_vector_irq(int cpu)
443 lockdep_assert_held(&vector_lock);
445 * On most of the platforms, legacy PIC delivers the interrupts on the
446 * boot cpu. But there are certain platforms where PIC interrupts are
447 * delivered to multiple cpu's. If the legacy IRQ is handled by the
448 * legacy PIC, for the new cpu that is coming online, setup the static
449 * legacy vector to irq mapping:
451 for (irq = 0; irq < nr_legacy_irqs(); irq++)
452 per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
454 __setup_vector_irq(cpu);
457 static int apic_retrigger_irq(struct irq_data *irq_data)
459 struct apic_chip_data *data = apic_chip_data(irq_data);
463 raw_spin_lock_irqsave(&vector_lock, flags);
464 cpu = cpumask_first_and(data->domain, cpu_online_mask);
465 apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
466 raw_spin_unlock_irqrestore(&vector_lock, flags);
471 void apic_ack_edge(struct irq_data *data)
473 irq_complete_move(irqd_cfg(data));
478 static int apic_set_affinity(struct irq_data *irq_data,
479 const struct cpumask *dest, bool force)
481 struct apic_chip_data *data = irq_data->chip_data;
482 int err, irq = irq_data->irq;
484 if (!config_enabled(CONFIG_SMP))
487 if (!cpumask_intersects(dest, cpu_online_mask))
490 err = assign_irq_vector(irq, data, dest);
492 if (assign_irq_vector(irq, data,
493 irq_data_get_affinity_mask(irq_data)))
494 pr_err("Failed to recover vector for irq %d\n", irq);
498 return IRQ_SET_MASK_OK;
501 static struct irq_chip lapic_controller = {
502 .irq_ack = apic_ack_edge,
503 .irq_set_affinity = apic_set_affinity,
504 .irq_retrigger = apic_retrigger_irq,
508 static void __send_cleanup_vector(struct apic_chip_data *data)
510 cpumask_var_t cleanup_mask;
512 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
515 for_each_cpu_and(i, data->old_domain, cpu_online_mask)
516 apic->send_IPI_mask(cpumask_of(i),
517 IRQ_MOVE_CLEANUP_VECTOR);
519 cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
520 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
521 free_cpumask_var(cleanup_mask);
523 data->move_in_progress = 0;
526 void send_cleanup_vector(struct irq_cfg *cfg)
528 struct apic_chip_data *data;
530 data = container_of(cfg, struct apic_chip_data, cfg);
531 if (data->move_in_progress)
532 __send_cleanup_vector(data);
535 asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
541 /* Prevent vectors vanishing under us */
542 raw_spin_lock(&vector_lock);
544 me = smp_processor_id();
545 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
546 struct apic_chip_data *data;
547 struct irq_desc *desc;
551 desc = __this_cpu_read(vector_irq[vector]);
552 if (IS_ERR_OR_NULL(desc))
555 if (!raw_spin_trylock(&desc->lock)) {
556 raw_spin_unlock(&vector_lock);
558 raw_spin_lock(&vector_lock);
562 data = apic_chip_data(irq_desc_get_irq_data(desc));
567 * Check if the irq migration is in progress. If so, we
568 * haven't received the cleanup request yet for this irq.
570 if (data->move_in_progress)
573 if (vector == data->cfg.vector &&
574 cpumask_test_cpu(me, data->domain))
577 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
579 * Check if the vector that needs to be cleanedup is
580 * registered at the cpu's IRR. If so, then this is not
581 * the best time to clean it up. Lets clean it up in the
582 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
585 if (irr & (1 << (vector % 32))) {
586 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
589 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
591 raw_spin_unlock(&desc->lock);
594 raw_spin_unlock(&vector_lock);
599 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
602 struct apic_chip_data *data;
604 data = container_of(cfg, struct apic_chip_data, cfg);
605 if (likely(!data->move_in_progress))
608 me = smp_processor_id();
609 if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
610 __send_cleanup_vector(data);
613 void irq_complete_move(struct irq_cfg *cfg)
615 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
618 void irq_force_complete_move(int irq)
620 struct irq_cfg *cfg = irq_cfg(irq);
623 __irq_complete_move(cfg, cfg->vector);
627 static void __init print_APIC_field(int base)
633 for (i = 0; i < 8; i++)
634 pr_cont("%08x", apic_read(base + i*0x10));
639 static void __init print_local_APIC(void *dummy)
641 unsigned int i, v, ver, maxlvt;
644 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
645 smp_processor_id(), hard_smp_processor_id());
646 v = apic_read(APIC_ID);
647 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
648 v = apic_read(APIC_LVR);
649 pr_info("... APIC VERSION: %08x\n", v);
650 ver = GET_APIC_VERSION(v);
651 maxlvt = lapic_get_maxlvt();
653 v = apic_read(APIC_TASKPRI);
654 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
657 if (APIC_INTEGRATED(ver)) {
658 if (!APIC_XAPIC(ver)) {
659 v = apic_read(APIC_ARBPRI);
660 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
661 v, v & APIC_ARBPRI_MASK);
663 v = apic_read(APIC_PROCPRI);
664 pr_debug("... APIC PROCPRI: %08x\n", v);
668 * Remote read supported only in the 82489DX and local APIC for
669 * Pentium processors.
671 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
672 v = apic_read(APIC_RRR);
673 pr_debug("... APIC RRR: %08x\n", v);
676 v = apic_read(APIC_LDR);
677 pr_debug("... APIC LDR: %08x\n", v);
678 if (!x2apic_enabled()) {
679 v = apic_read(APIC_DFR);
680 pr_debug("... APIC DFR: %08x\n", v);
682 v = apic_read(APIC_SPIV);
683 pr_debug("... APIC SPIV: %08x\n", v);
685 pr_debug("... APIC ISR field:\n");
686 print_APIC_field(APIC_ISR);
687 pr_debug("... APIC TMR field:\n");
688 print_APIC_field(APIC_TMR);
689 pr_debug("... APIC IRR field:\n");
690 print_APIC_field(APIC_IRR);
693 if (APIC_INTEGRATED(ver)) {
694 /* Due to the Pentium erratum 3AP. */
696 apic_write(APIC_ESR, 0);
698 v = apic_read(APIC_ESR);
699 pr_debug("... APIC ESR: %08x\n", v);
702 icr = apic_icr_read();
703 pr_debug("... APIC ICR: %08x\n", (u32)icr);
704 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
706 v = apic_read(APIC_LVTT);
707 pr_debug("... APIC LVTT: %08x\n", v);
711 v = apic_read(APIC_LVTPC);
712 pr_debug("... APIC LVTPC: %08x\n", v);
714 v = apic_read(APIC_LVT0);
715 pr_debug("... APIC LVT0: %08x\n", v);
716 v = apic_read(APIC_LVT1);
717 pr_debug("... APIC LVT1: %08x\n", v);
721 v = apic_read(APIC_LVTERR);
722 pr_debug("... APIC LVTERR: %08x\n", v);
725 v = apic_read(APIC_TMICT);
726 pr_debug("... APIC TMICT: %08x\n", v);
727 v = apic_read(APIC_TMCCT);
728 pr_debug("... APIC TMCCT: %08x\n", v);
729 v = apic_read(APIC_TDCR);
730 pr_debug("... APIC TDCR: %08x\n", v);
732 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
733 v = apic_read(APIC_EFEAT);
734 maxlvt = (v >> 16) & 0xff;
735 pr_debug("... APIC EFEAT: %08x\n", v);
736 v = apic_read(APIC_ECTRL);
737 pr_debug("... APIC ECTRL: %08x\n", v);
738 for (i = 0; i < maxlvt; i++) {
739 v = apic_read(APIC_EILVTn(i));
740 pr_debug("... APIC EILVT%d: %08x\n", i, v);
746 static void __init print_local_APICs(int maxcpu)
754 for_each_online_cpu(cpu) {
757 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
762 static void __init print_PIC(void)
767 if (!nr_legacy_irqs())
770 pr_debug("\nprinting PIC contents\n");
772 raw_spin_lock_irqsave(&i8259A_lock, flags);
774 v = inb(0xa1) << 8 | inb(0x21);
775 pr_debug("... PIC IMR: %04x\n", v);
777 v = inb(0xa0) << 8 | inb(0x20);
778 pr_debug("... PIC IRR: %04x\n", v);
782 v = inb(0xa0) << 8 | inb(0x20);
786 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
788 pr_debug("... PIC ISR: %04x\n", v);
790 v = inb(0x4d1) << 8 | inb(0x4d0);
791 pr_debug("... PIC ELCR: %04x\n", v);
794 static int show_lapic __initdata = 1;
795 static __init int setup_show_lapic(char *arg)
799 if (strcmp(arg, "all") == 0) {
800 show_lapic = CONFIG_NR_CPUS;
802 get_option(&arg, &num);
809 __setup("show_lapic=", setup_show_lapic);
811 static int __init print_ICs(void)
813 if (apic_verbosity == APIC_QUIET)
818 /* don't print out if apic is not there */
819 if (!cpu_has_apic && !apic_from_smp_config())
822 print_local_APICs(show_lapic);
828 late_initcall(print_ICs);