2 #define pr_fmt(fmt) "DMAR-IR: " fmt
4 #include <linux/interrupt.h>
5 #include <linux/dmar.h>
6 #include <linux/spinlock.h>
7 #include <linux/slab.h>
8 #include <linux/jiffies.h>
9 #include <linux/hpet.h>
10 #include <linux/pci.h>
11 #include <linux/irq.h>
12 #include <linux/intel-iommu.h>
13 #include <linux/acpi.h>
14 #include <linux/crash_dump.h>
15 #include <asm/io_apic.h>
18 #include <asm/irq_remapping.h>
19 #include <asm/pci-direct.h>
20 #include <asm/msidef.h>
22 #include "irq_remapping.h"
25 struct intel_iommu *iommu;
27 unsigned int bus; /* PCI bus number */
28 unsigned int devfn; /* PCI devfn number */
32 struct intel_iommu *iommu;
38 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
39 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
41 static int __read_mostly eim_mode;
42 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
43 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
50 * ->iommu->register_lock
52 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
53 * in single-threaded environment with interrupt disabled, so no need to tabke
54 * the dmar_global_lock.
56 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
58 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
59 static int __init parse_ioapics_under_ir(void);
61 static bool ir_pre_enabled(struct intel_iommu *iommu)
63 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
66 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
68 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
71 static void init_ir_status(struct intel_iommu *iommu)
75 gsts = readl(iommu->reg + DMAR_GSTS_REG);
76 if (gsts & DMA_GSTS_IRES)
77 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
80 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
82 struct irq_cfg *cfg = irq_cfg(irq);
83 return cfg ? &cfg->irq_2_iommu : NULL;
86 static int get_irte(int irq, struct irte *entry)
88 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
92 if (!entry || !irq_iommu)
95 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
97 if (unlikely(!irq_iommu->iommu)) {
98 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
102 index = irq_iommu->irte_index + irq_iommu->sub_handle;
103 *entry = *(irq_iommu->iommu->ir_table->base + index);
105 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
109 static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
111 struct ir_table *table = iommu->ir_table;
112 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
113 struct irq_cfg *cfg = irq_cfg(irq);
114 unsigned int mask = 0;
118 if (!count || !irq_iommu)
122 count = __roundup_pow_of_two(count);
126 if (mask > ecap_max_handle_mask(iommu->ecap)) {
127 pr_err("Requested mask %x exceeds the max invalidation handle"
128 " mask value %Lx\n", mask,
129 ecap_max_handle_mask(iommu->ecap));
133 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
134 index = bitmap_find_free_region(table->bitmap,
135 INTR_REMAP_TABLE_ENTRIES, mask);
137 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
140 irq_iommu->iommu = iommu;
141 irq_iommu->irte_index = index;
142 irq_iommu->sub_handle = 0;
143 irq_iommu->irte_mask = mask;
145 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
150 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
154 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
158 return qi_submit_sync(&desc, iommu);
161 static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
163 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
170 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
171 *sub_handle = irq_iommu->sub_handle;
172 index = irq_iommu->irte_index;
173 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
177 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
179 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
180 struct irq_cfg *cfg = irq_cfg(irq);
186 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
189 irq_iommu->iommu = iommu;
190 irq_iommu->irte_index = index;
191 irq_iommu->sub_handle = subhandle;
192 irq_iommu->irte_mask = 0;
194 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
199 static int modify_irte(int irq, struct irte *irte_modified)
201 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
202 struct intel_iommu *iommu;
210 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
212 iommu = irq_iommu->iommu;
214 index = irq_iommu->irte_index + irq_iommu->sub_handle;
215 irte = &iommu->ir_table->base[index];
217 set_64bit(&irte->low, irte_modified->low);
218 set_64bit(&irte->high, irte_modified->high);
219 __iommu_flush_cache(iommu, irte, sizeof(*irte));
221 rc = qi_flush_iec(iommu, index, 0);
222 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
227 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
231 for (i = 0; i < MAX_HPET_TBS; i++)
232 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
233 return ir_hpet[i].iommu;
237 static struct intel_iommu *map_ioapic_to_ir(int apic)
241 for (i = 0; i < MAX_IO_APICS; i++)
242 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
243 return ir_ioapic[i].iommu;
247 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
249 struct dmar_drhd_unit *drhd;
251 drhd = dmar_find_matched_drhd_unit(dev);
258 static int clear_entries(struct irq_2_iommu *irq_iommu)
260 struct irte *start, *entry, *end;
261 struct intel_iommu *iommu;
264 if (irq_iommu->sub_handle)
267 iommu = irq_iommu->iommu;
268 index = irq_iommu->irte_index + irq_iommu->sub_handle;
270 start = iommu->ir_table->base + index;
271 end = start + (1 << irq_iommu->irte_mask);
273 for (entry = start; entry < end; entry++) {
274 set_64bit(&entry->low, 0);
275 set_64bit(&entry->high, 0);
277 bitmap_release_region(iommu->ir_table->bitmap, index,
278 irq_iommu->irte_mask);
280 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
283 static int free_irte(int irq)
285 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
292 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
294 rc = clear_entries(irq_iommu);
296 irq_iommu->iommu = NULL;
297 irq_iommu->irte_index = 0;
298 irq_iommu->sub_handle = 0;
299 irq_iommu->irte_mask = 0;
301 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
307 * source validation type
309 #define SVT_NO_VERIFY 0x0 /* no verification is required */
310 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
311 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
314 * source-id qualifier
316 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
317 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
318 * the third least significant bit
320 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
321 * the second and third least significant bits
323 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
324 * the least three significant bits
328 * set SVT, SQ and SID fields of irte to verify
329 * source ids of interrupt requests
331 static void set_irte_sid(struct irte *irte, unsigned int svt,
332 unsigned int sq, unsigned int sid)
334 if (disable_sourceid_checking)
341 static int set_ioapic_sid(struct irte *irte, int apic)
349 down_read(&dmar_global_lock);
350 for (i = 0; i < MAX_IO_APICS; i++) {
351 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
352 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
356 up_read(&dmar_global_lock);
359 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
363 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
368 static int set_hpet_sid(struct irte *irte, u8 id)
376 down_read(&dmar_global_lock);
377 for (i = 0; i < MAX_HPET_TBS; i++) {
378 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
379 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
383 up_read(&dmar_global_lock);
386 pr_warn("Failed to set source-id of HPET block (%d)\n", id);
391 * Should really use SQ_ALL_16. Some platforms are broken.
392 * While we figure out the right quirks for these broken platforms, use
393 * SQ_13_IGNORE_3 for now.
395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
400 struct set_msi_sid_data {
401 struct pci_dev *pdev;
405 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
407 struct set_msi_sid_data *data = opaque;
415 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
417 struct set_msi_sid_data data;
422 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
425 * DMA alias provides us with a PCI device and alias. The only case
426 * where the it will return an alias on a different bus than the
427 * device is the case of a PCIe-to-PCI bridge, where the alias is for
428 * the subordinate bus. In this case we can only verify the bus.
430 * If the alias device is on a different bus than our source device
431 * then we have a topology based alias, use it.
433 * Otherwise, the alias is for a device DMA quirk and we cannot
434 * assume that MSI uses the same requester ID. Therefore use the
437 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
438 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
439 PCI_DEVID(PCI_BUS_NUM(data.alias),
441 else if (data.pdev->bus->number != dev->bus->number)
442 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
444 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
445 PCI_DEVID(dev->bus->number, dev->devfn));
450 static int iommu_load_old_irte(struct intel_iommu *iommu)
452 struct irte *old_ir_table;
453 phys_addr_t irt_phys;
458 if (!is_kdump_kernel()) {
459 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
461 clear_ir_pre_enabled(iommu);
462 iommu_disable_irq_remapping(iommu);
466 /* Check whether the old ir-table has the same size as ours */
467 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
468 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
469 != INTR_REMAP_TABLE_REG_SIZE)
472 irt_phys = irta & VTD_PAGE_MASK;
473 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
475 /* Map the old IR table */
476 old_ir_table = ioremap_cache(irt_phys, size);
481 memcpy(iommu->ir_table->base, old_ir_table, size);
483 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
486 * Now check the table for used entries and mark those as
487 * allocated in the bitmap
489 for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
490 if (iommu->ir_table->base[i].present)
491 bitmap_set(iommu->ir_table->bitmap, i, 1);
498 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
504 addr = virt_to_phys((void *)iommu->ir_table->base);
506 raw_spin_lock_irqsave(&iommu->register_lock, flags);
508 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
509 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
511 /* Set interrupt-remapping table pointer */
512 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
514 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
515 readl, (sts & DMA_GSTS_IRTPS), sts);
516 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
519 * Global invalidation of interrupt entry cache to make sure the
520 * hardware uses the new irq remapping table.
522 qi_global_iec(iommu);
525 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
530 raw_spin_lock_irqsave(&iommu->register_lock, flags);
532 /* Enable interrupt-remapping */
533 iommu->gcmd |= DMA_GCMD_IRE;
534 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
535 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
537 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
538 readl, (sts & DMA_GSTS_IRES), sts);
541 * With CFI clear in the Global Command register, we should be
542 * protected from dangerous (i.e. compatibility) interrupts
543 * regardless of x2apic status. Check just to be sure.
545 if (sts & DMA_GSTS_CFIS)
547 "Compatibility-format IRQs enabled despite intr remapping;\n"
548 "you are vulnerable to IRQ injection.\n");
550 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
553 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
555 struct ir_table *ir_table;
557 unsigned long *bitmap;
562 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
566 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
567 INTR_REMAP_PAGE_ORDER);
570 pr_err("IR%d: failed to allocate pages of order %d\n",
571 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
575 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
576 sizeof(long), GFP_ATOMIC);
577 if (bitmap == NULL) {
578 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
582 ir_table->base = page_address(pages);
583 ir_table->bitmap = bitmap;
584 iommu->ir_table = ir_table;
587 * If the queued invalidation is already initialized,
588 * shouldn't disable it.
592 * Clear previous faults.
594 dmar_fault(-1, iommu);
595 dmar_disable_qi(iommu);
597 if (dmar_enable_qi(iommu)) {
598 pr_err("Failed to enable queued invalidation\n");
599 goto out_free_bitmap;
603 init_ir_status(iommu);
605 if (ir_pre_enabled(iommu)) {
606 if (iommu_load_old_irte(iommu))
607 pr_err("Failed to copy IR table for %s from previous kernel\n",
610 pr_info("Copied IR table for %s from previous kernel\n",
614 iommu_set_irq_remapping(iommu, eim_mode);
621 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
625 iommu->ir_table = NULL;
630 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
632 if (iommu && iommu->ir_table) {
633 free_pages((unsigned long)iommu->ir_table->base,
634 INTR_REMAP_PAGE_ORDER);
635 kfree(iommu->ir_table->bitmap);
636 kfree(iommu->ir_table);
637 iommu->ir_table = NULL;
642 * Disable Interrupt Remapping.
644 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
649 if (!ecap_ir_support(iommu->ecap))
653 * global invalidation of interrupt entry cache before disabling
654 * interrupt-remapping.
656 qi_global_iec(iommu);
658 raw_spin_lock_irqsave(&iommu->register_lock, flags);
660 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
661 if (!(sts & DMA_GSTS_IRES))
664 iommu->gcmd &= ~DMA_GCMD_IRE;
665 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
667 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
668 readl, !(sts & DMA_GSTS_IRES), sts);
671 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
674 static int __init dmar_x2apic_optout(void)
676 struct acpi_table_dmar *dmar;
677 dmar = (struct acpi_table_dmar *)dmar_tbl;
678 if (!dmar || no_x2apic_optout)
680 return dmar->flags & DMAR_X2APIC_OPT_OUT;
683 static void __init intel_cleanup_irq_remapping(void)
685 struct dmar_drhd_unit *drhd;
686 struct intel_iommu *iommu;
688 for_each_iommu(iommu, drhd) {
689 if (ecap_ir_support(iommu->ecap)) {
690 iommu_disable_irq_remapping(iommu);
691 intel_teardown_irq_remapping(iommu);
695 if (x2apic_supported())
696 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
699 static int __init intel_prepare_irq_remapping(void)
701 struct dmar_drhd_unit *drhd;
702 struct intel_iommu *iommu;
705 if (irq_remap_broken) {
706 pr_warn("This system BIOS has enabled interrupt remapping\n"
707 "on a chipset that contains an erratum making that\n"
708 "feature unstable. To maintain system stability\n"
709 "interrupt remapping is being disabled. Please\n"
710 "contact your BIOS vendor for an update\n");
711 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
715 if (dmar_table_init() < 0)
718 if (!dmar_ir_support())
721 if (parse_ioapics_under_ir() != 1) {
722 pr_info("Not enabling interrupt remapping\n");
726 /* First make sure all IOMMUs support IRQ remapping */
727 for_each_iommu(iommu, drhd)
728 if (!ecap_ir_support(iommu->ecap))
731 /* Detect remapping mode: lapic or x2apic */
732 if (x2apic_supported()) {
733 eim = !dmar_x2apic_optout();
735 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
736 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
740 for_each_iommu(iommu, drhd) {
741 if (eim && !ecap_eim_support(iommu->ecap)) {
742 pr_info("%s does not support EIM\n", iommu->name);
746 /* Disable IRQ remapping if it is already enabled */
747 iommu_disable_irq_remapping(iommu);
752 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
754 /* Do the initializations early */
755 for_each_iommu(iommu, drhd) {
756 if (intel_setup_irq_remapping(iommu)) {
757 pr_err("Failed to setup irq remapping for %s\n",
766 intel_cleanup_irq_remapping();
770 static int __init intel_enable_irq_remapping(void)
772 struct dmar_drhd_unit *drhd;
773 struct intel_iommu *iommu;
777 * Setup Interrupt-remapping for all the DRHD's now.
779 for_each_iommu(iommu, drhd) {
780 iommu_enable_irq_remapping(iommu);
787 irq_remapping_enabled = 1;
790 * VT-d has a different layout for IO-APIC entries when
791 * interrupt remapping is enabled. So it needs a special routine
792 * to print IO-APIC entries for debugging purposes too.
794 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
796 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
798 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
801 intel_cleanup_irq_remapping();
805 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
806 struct intel_iommu *iommu,
807 struct acpi_dmar_hardware_unit *drhd)
809 struct acpi_dmar_pci_path *path;
811 int count, free = -1;
814 path = (struct acpi_dmar_pci_path *)(scope + 1);
815 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
816 / sizeof(struct acpi_dmar_pci_path);
818 while (--count > 0) {
820 * Access PCI directly due to the PCI
821 * subsystem isn't initialized yet.
823 bus = read_pci_config_byte(bus, path->device, path->function,
828 for (count = 0; count < MAX_HPET_TBS; count++) {
829 if (ir_hpet[count].iommu == iommu &&
830 ir_hpet[count].id == scope->enumeration_id)
832 else if (ir_hpet[count].iommu == NULL && free == -1)
836 pr_warn("Exceeded Max HPET blocks\n");
840 ir_hpet[free].iommu = iommu;
841 ir_hpet[free].id = scope->enumeration_id;
842 ir_hpet[free].bus = bus;
843 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
844 pr_info("HPET id %d under DRHD base 0x%Lx\n",
845 scope->enumeration_id, drhd->address);
850 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
851 struct intel_iommu *iommu,
852 struct acpi_dmar_hardware_unit *drhd)
854 struct acpi_dmar_pci_path *path;
856 int count, free = -1;
859 path = (struct acpi_dmar_pci_path *)(scope + 1);
860 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
861 / sizeof(struct acpi_dmar_pci_path);
863 while (--count > 0) {
865 * Access PCI directly due to the PCI
866 * subsystem isn't initialized yet.
868 bus = read_pci_config_byte(bus, path->device, path->function,
873 for (count = 0; count < MAX_IO_APICS; count++) {
874 if (ir_ioapic[count].iommu == iommu &&
875 ir_ioapic[count].id == scope->enumeration_id)
877 else if (ir_ioapic[count].iommu == NULL && free == -1)
881 pr_warn("Exceeded Max IO APICS\n");
885 ir_ioapic[free].bus = bus;
886 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
887 ir_ioapic[free].iommu = iommu;
888 ir_ioapic[free].id = scope->enumeration_id;
889 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
890 scope->enumeration_id, drhd->address, iommu->seq_id);
895 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
896 struct intel_iommu *iommu)
899 struct acpi_dmar_hardware_unit *drhd;
900 struct acpi_dmar_device_scope *scope;
903 drhd = (struct acpi_dmar_hardware_unit *)header;
904 start = (void *)(drhd + 1);
905 end = ((void *)drhd) + header->length;
907 while (start < end && ret == 0) {
909 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
910 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
911 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
912 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
913 start += scope->length;
919 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
923 for (i = 0; i < MAX_HPET_TBS; i++)
924 if (ir_hpet[i].iommu == iommu)
925 ir_hpet[i].iommu = NULL;
927 for (i = 0; i < MAX_IO_APICS; i++)
928 if (ir_ioapic[i].iommu == iommu)
929 ir_ioapic[i].iommu = NULL;
933 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
936 static int __init parse_ioapics_under_ir(void)
938 struct dmar_drhd_unit *drhd;
939 struct intel_iommu *iommu;
940 bool ir_supported = false;
943 for_each_iommu(iommu, drhd)
944 if (ecap_ir_support(iommu->ecap)) {
945 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
954 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
955 int ioapic_id = mpc_ioapic_id(ioapic_idx);
956 if (!map_ioapic_to_ir(ioapic_id)) {
957 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
958 "interrupt remapping will be disabled\n",
967 static int __init ir_dev_scope_init(void)
971 if (!irq_remapping_enabled)
974 down_write(&dmar_global_lock);
975 ret = dmar_dev_scope_init();
976 up_write(&dmar_global_lock);
980 rootfs_initcall(ir_dev_scope_init);
982 static void disable_irq_remapping(void)
984 struct dmar_drhd_unit *drhd;
985 struct intel_iommu *iommu = NULL;
988 * Disable Interrupt-remapping for all the DRHD's now.
990 for_each_iommu(iommu, drhd) {
991 if (!ecap_ir_support(iommu->ecap))
994 iommu_disable_irq_remapping(iommu);
998 static int reenable_irq_remapping(int eim)
1000 struct dmar_drhd_unit *drhd;
1002 struct intel_iommu *iommu = NULL;
1004 for_each_iommu(iommu, drhd)
1006 dmar_reenable_qi(iommu);
1009 * Setup Interrupt-remapping for all the DRHD's now.
1011 for_each_iommu(iommu, drhd) {
1012 if (!ecap_ir_support(iommu->ecap))
1015 /* Set up interrupt remapping for iommu.*/
1016 iommu_set_irq_remapping(iommu, eim);
1017 iommu_enable_irq_remapping(iommu);
1028 * handle error condition gracefully here!
1033 static void prepare_irte(struct irte *irte, int vector,
1036 memset(irte, 0, sizeof(*irte));
1039 irte->dst_mode = apic->irq_dest_mode;
1041 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1042 * actual level or edge trigger will be setup in the IO-APIC
1043 * RTE. This will help simplify level triggered irq migration.
1044 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1045 * irq migration in the presence of interrupt-remapping.
1047 irte->trigger_mode = 0;
1048 irte->dlvry_mode = apic->irq_delivery_mode;
1049 irte->vector = vector;
1050 irte->dest_id = IRTE_DEST(dest);
1051 irte->redir_hint = 1;
1054 static int intel_setup_ioapic_entry(int irq,
1055 struct IO_APIC_route_entry *route_entry,
1056 unsigned int destination, int vector,
1057 struct io_apic_irq_attr *attr)
1059 int ioapic_id = mpc_ioapic_id(attr->ioapic);
1060 struct intel_iommu *iommu;
1061 struct IR_IO_APIC_route_entry *entry;
1065 down_read(&dmar_global_lock);
1066 iommu = map_ioapic_to_ir(ioapic_id);
1068 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
1071 index = alloc_irte(iommu, irq, 1);
1073 pr_warn("Failed to allocate IRTE for ioapic %d\n",
1078 up_read(&dmar_global_lock);
1082 prepare_irte(&irte, vector, destination);
1084 /* Set source-id of interrupt request */
1085 set_ioapic_sid(&irte, ioapic_id);
1087 modify_irte(irq, &irte);
1089 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1090 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1091 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1092 "Avail:%X Vector:%02X Dest:%08X "
1093 "SID:%04X SQ:%X SVT:%X)\n",
1094 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
1095 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1096 irte.avail, irte.vector, irte.dest_id,
1097 irte.sid, irte.sq, irte.svt);
1099 entry = (struct IR_IO_APIC_route_entry *)route_entry;
1100 memset(entry, 0, sizeof(*entry));
1102 entry->index2 = (index >> 15) & 0x1;
1105 entry->index = (index & 0x7fff);
1107 * IO-APIC RTE will be configured with virtual vector.
1108 * irq handler will do the explicit EOI to the io-apic.
1110 entry->vector = attr->ioapic_pin;
1111 entry->mask = 0; /* enable IRQ */
1112 entry->trigger = attr->trigger;
1113 entry->polarity = attr->polarity;
1115 /* Mask level triggered irqs.
1116 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1125 * Migrate the IO-APIC irq in the presence of intr-remapping.
1127 * For both level and edge triggered, irq migration is a simple atomic
1128 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1130 * For level triggered, we eliminate the io-apic RTE modification (with the
1131 * updated vector information), by using a virtual vector (io-apic pin number).
1132 * Real vector that is used for interrupting cpu will be coming from
1133 * the interrupt-remapping table entry.
1135 * As the migration is a simple atomic update of IRTE, the same mechanism
1136 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1139 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
1142 struct irq_cfg *cfg = irqd_cfg(data);
1143 unsigned int dest, irq = data->irq;
1147 if (!config_enabled(CONFIG_SMP))
1150 if (!cpumask_intersects(mask, cpu_online_mask))
1153 if (get_irte(irq, &irte))
1156 err = assign_irq_vector(irq, cfg, mask);
1160 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
1162 if (assign_irq_vector(irq, cfg, data->affinity))
1163 pr_err("Failed to recover vector for irq %d\n", irq);
1167 irte.vector = cfg->vector;
1168 irte.dest_id = IRTE_DEST(dest);
1171 * Atomically updates the IRTE with the new destination, vector
1172 * and flushes the interrupt entry cache.
1174 modify_irte(irq, &irte);
1177 * After this point, all the interrupts will start arriving
1178 * at the new destination. So, time to cleanup the previous
1179 * vector allocation.
1181 if (cfg->move_in_progress)
1182 send_cleanup_vector(cfg);
1184 cpumask_copy(data->affinity, mask);
1188 static void intel_compose_msi_msg(struct pci_dev *pdev,
1189 unsigned int irq, unsigned int dest,
1190 struct msi_msg *msg, u8 hpet_id)
1192 struct irq_cfg *cfg;
1199 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
1200 BUG_ON(ir_index == -1);
1202 prepare_irte(&irte, cfg->vector, dest);
1204 /* Set source-id of interrupt request */
1206 set_msi_sid(&irte, pdev);
1208 set_hpet_sid(&irte, hpet_id);
1210 modify_irte(irq, &irte);
1212 msg->address_hi = MSI_ADDR_BASE_HI;
1213 msg->data = sub_handle;
1214 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1216 MSI_ADDR_IR_INDEX1(ir_index) |
1217 MSI_ADDR_IR_INDEX2(ir_index);
1221 * Map the PCI dev to the corresponding remapping hardware unit
1222 * and allocate 'nvec' consecutive interrupt-remapping table entries
1225 static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1227 struct intel_iommu *iommu;
1230 down_read(&dmar_global_lock);
1231 iommu = map_dev_to_ir(dev);
1233 pr_err("Unable to map PCI %s to iommu\n", pci_name(dev));
1236 index = alloc_irte(iommu, irq, nvec);
1238 pr_err("Unable to allocate %d IRTE for PCI %s\n",
1239 nvec, pci_name(dev));
1243 up_read(&dmar_global_lock);
1248 static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1249 int index, int sub_handle)
1251 struct intel_iommu *iommu;
1254 down_read(&dmar_global_lock);
1255 iommu = map_dev_to_ir(pdev);
1258 * setup the mapping between the irq and the IRTE
1259 * base index, the sub_handle pointing to the
1260 * appropriate interrupt remap table entry.
1262 set_irte_irq(irq, iommu, index, sub_handle);
1265 up_read(&dmar_global_lock);
1270 static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
1273 struct intel_iommu *iommu;
1276 down_read(&dmar_global_lock);
1277 iommu = map_hpet_to_ir(id);
1279 index = alloc_irte(iommu, irq, 1);
1283 up_read(&dmar_global_lock);
1288 struct irq_remap_ops intel_irq_remap_ops = {
1289 .prepare = intel_prepare_irq_remapping,
1290 .enable = intel_enable_irq_remapping,
1291 .disable = disable_irq_remapping,
1292 .reenable = reenable_irq_remapping,
1293 .enable_faulting = enable_drhd_fault_handling,
1294 .setup_ioapic_entry = intel_setup_ioapic_entry,
1295 .set_affinity = intel_ioapic_set_affinity,
1296 .free_irq = free_irte,
1297 .compose_msi_msg = intel_compose_msi_msg,
1298 .msi_alloc_irq = intel_msi_alloc_irq,
1299 .msi_setup_irq = intel_msi_setup_irq,
1300 .alloc_hpet_msi = intel_alloc_hpet_msi,
1304 * Support of Interrupt Remapping Unit Hotplug
1306 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1309 int eim = x2apic_enabled();
1311 if (eim && !ecap_eim_support(iommu->ecap)) {
1312 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1313 iommu->reg_phys, iommu->ecap);
1317 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1318 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1323 /* TODO: check all IOAPICs are covered by IOMMU */
1325 /* Setup Interrupt-remapping now. */
1326 ret = intel_setup_irq_remapping(iommu);
1328 pr_err("Failed to setup irq remapping for %s\n",
1330 intel_teardown_irq_remapping(iommu);
1331 ir_remove_ioapic_hpet_scope(iommu);
1333 iommu_enable_irq_remapping(iommu);
1339 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1342 struct intel_iommu *iommu = dmaru->iommu;
1344 if (!irq_remapping_enabled)
1348 if (!ecap_ir_support(iommu->ecap))
1352 if (!iommu->ir_table)
1353 ret = dmar_ir_add(dmaru, iommu);
1355 if (iommu->ir_table) {
1356 if (!bitmap_empty(iommu->ir_table->bitmap,
1357 INTR_REMAP_TABLE_ENTRIES)) {
1360 iommu_disable_irq_remapping(iommu);
1361 intel_teardown_irq_remapping(iommu);
1362 ir_remove_ioapic_hpet_scope(iommu);