1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <linux/intel-iommu.h>
10 #include <linux/acpi.h>
11 #include <asm/io_apic.h>
14 #include <asm/irq_remapping.h>
15 #include <asm/pci-direct.h>
16 #include <asm/msidef.h>
18 #include "irq_remapping.h"
21 struct intel_iommu *iommu;
23 unsigned int bus; /* PCI bus number */
24 unsigned int devfn; /* PCI devfn number */
28 struct intel_iommu *iommu;
34 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
37 static int __read_mostly eim_mode;
38 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
39 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
46 * ->iommu->register_lock
48 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
49 * in single-threaded environment with interrupt disabled, so no need to tabke
50 * the dmar_global_lock.
52 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
54 static int __init parse_ioapics_under_ir(void);
56 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
58 struct irq_cfg *cfg = irq_cfg(irq);
59 return cfg ? &cfg->irq_2_iommu : NULL;
62 static int get_irte(int irq, struct irte *entry)
64 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
68 if (!entry || !irq_iommu)
71 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
73 if (unlikely(!irq_iommu->iommu)) {
74 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
78 index = irq_iommu->irte_index + irq_iommu->sub_handle;
79 *entry = *(irq_iommu->iommu->ir_table->base + index);
81 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
85 static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
87 struct ir_table *table = iommu->ir_table;
88 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
89 struct irq_cfg *cfg = irq_cfg(irq);
90 unsigned int mask = 0;
94 if (!count || !irq_iommu)
98 count = __roundup_pow_of_two(count);
102 if (mask > ecap_max_handle_mask(iommu->ecap)) {
104 "Requested mask %x exceeds the max invalidation handle"
105 " mask value %Lx\n", mask,
106 ecap_max_handle_mask(iommu->ecap));
110 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
111 index = bitmap_find_free_region(table->bitmap,
112 INTR_REMAP_TABLE_ENTRIES, mask);
114 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
117 irq_iommu->iommu = iommu;
118 irq_iommu->irte_index = index;
119 irq_iommu->sub_handle = 0;
120 irq_iommu->irte_mask = mask;
122 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
127 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
131 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
135 return qi_submit_sync(&desc, iommu);
138 static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
140 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
147 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
148 *sub_handle = irq_iommu->sub_handle;
149 index = irq_iommu->irte_index;
150 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
154 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
156 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
157 struct irq_cfg *cfg = irq_cfg(irq);
163 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
166 irq_iommu->iommu = iommu;
167 irq_iommu->irte_index = index;
168 irq_iommu->sub_handle = subhandle;
169 irq_iommu->irte_mask = 0;
171 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
176 static int modify_irte(int irq, struct irte *irte_modified)
178 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
179 struct intel_iommu *iommu;
187 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
189 iommu = irq_iommu->iommu;
191 index = irq_iommu->irte_index + irq_iommu->sub_handle;
192 irte = &iommu->ir_table->base[index];
194 set_64bit(&irte->low, irte_modified->low);
195 set_64bit(&irte->high, irte_modified->high);
196 __iommu_flush_cache(iommu, irte, sizeof(*irte));
198 rc = qi_flush_iec(iommu, index, 0);
199 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
204 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
208 for (i = 0; i < MAX_HPET_TBS; i++)
209 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
210 return ir_hpet[i].iommu;
214 static struct intel_iommu *map_ioapic_to_ir(int apic)
218 for (i = 0; i < MAX_IO_APICS; i++)
219 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
220 return ir_ioapic[i].iommu;
224 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
226 struct dmar_drhd_unit *drhd;
228 drhd = dmar_find_matched_drhd_unit(dev);
235 static int clear_entries(struct irq_2_iommu *irq_iommu)
237 struct irte *start, *entry, *end;
238 struct intel_iommu *iommu;
241 if (irq_iommu->sub_handle)
244 iommu = irq_iommu->iommu;
245 index = irq_iommu->irte_index + irq_iommu->sub_handle;
247 start = iommu->ir_table->base + index;
248 end = start + (1 << irq_iommu->irte_mask);
250 for (entry = start; entry < end; entry++) {
251 set_64bit(&entry->low, 0);
252 set_64bit(&entry->high, 0);
254 bitmap_release_region(iommu->ir_table->bitmap, index,
255 irq_iommu->irte_mask);
257 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
260 static int free_irte(int irq)
262 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
269 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
271 rc = clear_entries(irq_iommu);
273 irq_iommu->iommu = NULL;
274 irq_iommu->irte_index = 0;
275 irq_iommu->sub_handle = 0;
276 irq_iommu->irte_mask = 0;
278 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
284 * source validation type
286 #define SVT_NO_VERIFY 0x0 /* no verification is required */
287 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
288 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
291 * source-id qualifier
293 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
294 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
295 * the third least significant bit
297 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
298 * the second and third least significant bits
300 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
301 * the least three significant bits
305 * set SVT, SQ and SID fields of irte to verify
306 * source ids of interrupt requests
308 static void set_irte_sid(struct irte *irte, unsigned int svt,
309 unsigned int sq, unsigned int sid)
311 if (disable_sourceid_checking)
318 static int set_ioapic_sid(struct irte *irte, int apic)
326 down_read(&dmar_global_lock);
327 for (i = 0; i < MAX_IO_APICS; i++) {
328 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
329 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
333 up_read(&dmar_global_lock);
336 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
340 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
345 static int set_hpet_sid(struct irte *irte, u8 id)
353 down_read(&dmar_global_lock);
354 for (i = 0; i < MAX_HPET_TBS; i++) {
355 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
356 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
360 up_read(&dmar_global_lock);
363 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
368 * Should really use SQ_ALL_16. Some platforms are broken.
369 * While we figure out the right quirks for these broken platforms, use
370 * SQ_13_IGNORE_3 for now.
372 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
377 struct set_msi_sid_data {
378 struct pci_dev *pdev;
382 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
384 struct set_msi_sid_data *data = opaque;
392 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
394 struct set_msi_sid_data data;
399 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
402 * DMA alias provides us with a PCI device and alias. The only case
403 * where the it will return an alias on a different bus than the
404 * device is the case of a PCIe-to-PCI bridge, where the alias is for
405 * the subordinate bus. In this case we can only verify the bus.
407 * If the alias device is on a different bus than our source device
408 * then we have a topology based alias, use it.
410 * Otherwise, the alias is for a device DMA quirk and we cannot
411 * assume that MSI uses the same requester ID. Therefore use the
414 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
415 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
416 PCI_DEVID(PCI_BUS_NUM(data.alias),
418 else if (data.pdev->bus->number != dev->bus->number)
419 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
421 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
422 PCI_DEVID(dev->bus->number, dev->devfn));
427 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
433 addr = virt_to_phys((void *)iommu->ir_table->base);
435 raw_spin_lock_irqsave(&iommu->register_lock, flags);
437 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
438 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
440 /* Set interrupt-remapping table pointer */
441 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
443 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
444 readl, (sts & DMA_GSTS_IRTPS), sts);
445 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
448 * global invalidation of interrupt entry cache before enabling
449 * interrupt-remapping.
451 qi_global_iec(iommu);
453 raw_spin_lock_irqsave(&iommu->register_lock, flags);
455 /* Enable interrupt-remapping */
456 iommu->gcmd |= DMA_GCMD_IRE;
457 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
458 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
460 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
461 readl, (sts & DMA_GSTS_IRES), sts);
464 * With CFI clear in the Global Command register, we should be
465 * protected from dangerous (i.e. compatibility) interrupts
466 * regardless of x2apic status. Check just to be sure.
468 if (sts & DMA_GSTS_CFIS)
470 "Compatibility-format IRQs enabled despite intr remapping;\n"
471 "you are vulnerable to IRQ injection.\n");
473 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
476 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
478 struct ir_table *ir_table;
480 unsigned long *bitmap;
485 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
489 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
490 INTR_REMAP_PAGE_ORDER);
493 pr_err("IR%d: failed to allocate pages of order %d\n",
494 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
498 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
499 sizeof(long), GFP_ATOMIC);
500 if (bitmap == NULL) {
501 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
505 ir_table->base = page_address(pages);
506 ir_table->bitmap = bitmap;
507 iommu->ir_table = ir_table;
511 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
517 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
519 if (iommu && iommu->ir_table) {
520 free_pages((unsigned long)iommu->ir_table->base,
521 INTR_REMAP_PAGE_ORDER);
522 kfree(iommu->ir_table->bitmap);
523 kfree(iommu->ir_table);
524 iommu->ir_table = NULL;
529 * Disable Interrupt Remapping.
531 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
536 if (!ecap_ir_support(iommu->ecap))
540 * global invalidation of interrupt entry cache before disabling
541 * interrupt-remapping.
543 qi_global_iec(iommu);
545 raw_spin_lock_irqsave(&iommu->register_lock, flags);
547 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
548 if (!(sts & DMA_GSTS_IRES))
551 iommu->gcmd &= ~DMA_GCMD_IRE;
552 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
554 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
555 readl, !(sts & DMA_GSTS_IRES), sts);
558 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
561 static int __init dmar_x2apic_optout(void)
563 struct acpi_table_dmar *dmar;
564 dmar = (struct acpi_table_dmar *)dmar_tbl;
565 if (!dmar || no_x2apic_optout)
567 return dmar->flags & DMAR_X2APIC_OPT_OUT;
570 static void __init intel_cleanup_irq_remapping(void)
572 struct dmar_drhd_unit *drhd;
573 struct intel_iommu *iommu;
575 for_each_iommu(iommu, drhd) {
576 if (ecap_ir_support(iommu->ecap)) {
577 iommu_disable_irq_remapping(iommu);
578 intel_teardown_irq_remapping(iommu);
582 if (x2apic_supported())
583 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
586 static int __init intel_prepare_irq_remapping(void)
588 struct dmar_drhd_unit *drhd;
589 struct intel_iommu *iommu;
591 /* First check whether IRQ remapping should be enabled */
592 if (disable_irq_remap)
595 if (irq_remap_broken) {
597 "This system BIOS has enabled interrupt remapping\n"
598 "on a chipset that contains an erratum making that\n"
599 "feature unstable. To maintain system stability\n"
600 "interrupt remapping is being disabled. Please\n"
601 "contact your BIOS vendor for an update\n");
602 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
603 disable_irq_remap = 1;
607 if (dmar_table_init() < 0)
610 if (!dmar_ir_support())
613 if (parse_ioapics_under_ir() != 1) {
614 printk(KERN_INFO "Not enabling interrupt remapping\n");
618 /* First make sure all IOMMUs support IRQ remapping */
619 for_each_iommu(iommu, drhd)
620 if (!ecap_ir_support(iommu->ecap))
623 /* Do the allocations early */
624 for_each_iommu(iommu, drhd)
625 if (intel_setup_irq_remapping(iommu))
631 intel_cleanup_irq_remapping();
635 static int __init intel_enable_irq_remapping(void)
637 struct dmar_drhd_unit *drhd;
638 struct intel_iommu *iommu;
642 if (x2apic_supported()) {
643 eim = !dmar_x2apic_optout();
646 "Your BIOS is broken and requested that x2apic be disabled.\n"
647 "This will slightly decrease performance.\n"
648 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
651 for_each_iommu(iommu, drhd) {
653 * If the queued invalidation is already initialized,
654 * shouldn't disable it.
660 * Clear previous faults.
662 dmar_fault(-1, iommu);
665 * Disable intr remapping and queued invalidation, if already
666 * enabled prior to OS handover.
668 iommu_disable_irq_remapping(iommu);
670 dmar_disable_qi(iommu);
674 * check for the Interrupt-remapping support
676 for_each_iommu(iommu, drhd)
677 if (eim && !ecap_eim_support(iommu->ecap)) {
678 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
679 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
684 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
687 * Enable queued invalidation for all the DRHD's.
689 for_each_iommu(iommu, drhd) {
690 int ret = dmar_enable_qi(iommu);
693 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
694 " invalidation, ecap %Lx, ret %d\n",
695 drhd->reg_base_addr, iommu->ecap, ret);
701 * Setup Interrupt-remapping for all the DRHD's now.
703 for_each_iommu(iommu, drhd) {
704 iommu_set_irq_remapping(iommu, eim);
711 irq_remapping_enabled = 1;
714 * VT-d has a different layout for IO-APIC entries when
715 * interrupt remapping is enabled. So it needs a special routine
716 * to print IO-APIC entries for debugging purposes too.
718 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
720 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
722 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
725 intel_cleanup_irq_remapping();
729 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
730 struct intel_iommu *iommu,
731 struct acpi_dmar_hardware_unit *drhd)
733 struct acpi_dmar_pci_path *path;
735 int count, free = -1;
738 path = (struct acpi_dmar_pci_path *)(scope + 1);
739 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
740 / sizeof(struct acpi_dmar_pci_path);
742 while (--count > 0) {
744 * Access PCI directly due to the PCI
745 * subsystem isn't initialized yet.
747 bus = read_pci_config_byte(bus, path->device, path->function,
752 for (count = 0; count < MAX_HPET_TBS; count++) {
753 if (ir_hpet[count].iommu == iommu &&
754 ir_hpet[count].id == scope->enumeration_id)
756 else if (ir_hpet[count].iommu == NULL && free == -1)
760 pr_warn("Exceeded Max HPET blocks\n");
764 ir_hpet[free].iommu = iommu;
765 ir_hpet[free].id = scope->enumeration_id;
766 ir_hpet[free].bus = bus;
767 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
768 pr_info("HPET id %d under DRHD base 0x%Lx\n",
769 scope->enumeration_id, drhd->address);
774 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
775 struct intel_iommu *iommu,
776 struct acpi_dmar_hardware_unit *drhd)
778 struct acpi_dmar_pci_path *path;
780 int count, free = -1;
783 path = (struct acpi_dmar_pci_path *)(scope + 1);
784 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
785 / sizeof(struct acpi_dmar_pci_path);
787 while (--count > 0) {
789 * Access PCI directly due to the PCI
790 * subsystem isn't initialized yet.
792 bus = read_pci_config_byte(bus, path->device, path->function,
797 for (count = 0; count < MAX_IO_APICS; count++) {
798 if (ir_ioapic[count].iommu == iommu &&
799 ir_ioapic[count].id == scope->enumeration_id)
801 else if (ir_ioapic[count].iommu == NULL && free == -1)
805 pr_warn("Exceeded Max IO APICS\n");
809 ir_ioapic[free].bus = bus;
810 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
811 ir_ioapic[free].iommu = iommu;
812 ir_ioapic[free].id = scope->enumeration_id;
813 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
814 scope->enumeration_id, drhd->address, iommu->seq_id);
819 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
820 struct intel_iommu *iommu)
823 struct acpi_dmar_hardware_unit *drhd;
824 struct acpi_dmar_device_scope *scope;
827 drhd = (struct acpi_dmar_hardware_unit *)header;
828 start = (void *)(drhd + 1);
829 end = ((void *)drhd) + header->length;
831 while (start < end && ret == 0) {
833 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
834 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
835 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
836 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
837 start += scope->length;
843 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
847 for (i = 0; i < MAX_HPET_TBS; i++)
848 if (ir_hpet[i].iommu == iommu)
849 ir_hpet[i].iommu = NULL;
851 for (i = 0; i < MAX_IO_APICS; i++)
852 if (ir_ioapic[i].iommu == iommu)
853 ir_ioapic[i].iommu = NULL;
857 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
860 static int __init parse_ioapics_under_ir(void)
862 struct dmar_drhd_unit *drhd;
863 struct intel_iommu *iommu;
864 int ir_supported = 0;
867 for_each_iommu(iommu, drhd)
868 if (ecap_ir_support(iommu->ecap)) {
869 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
878 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
879 int ioapic_id = mpc_ioapic_id(ioapic_idx);
880 if (!map_ioapic_to_ir(ioapic_id)) {
881 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
882 "interrupt remapping will be disabled\n",
891 static int __init ir_dev_scope_init(void)
895 if (!irq_remapping_enabled)
898 down_write(&dmar_global_lock);
899 ret = dmar_dev_scope_init();
900 up_write(&dmar_global_lock);
904 rootfs_initcall(ir_dev_scope_init);
906 static void disable_irq_remapping(void)
908 struct dmar_drhd_unit *drhd;
909 struct intel_iommu *iommu = NULL;
912 * Disable Interrupt-remapping for all the DRHD's now.
914 for_each_iommu(iommu, drhd) {
915 if (!ecap_ir_support(iommu->ecap))
918 iommu_disable_irq_remapping(iommu);
922 static int reenable_irq_remapping(int eim)
924 struct dmar_drhd_unit *drhd;
926 struct intel_iommu *iommu = NULL;
928 for_each_iommu(iommu, drhd)
930 dmar_reenable_qi(iommu);
933 * Setup Interrupt-remapping for all the DRHD's now.
935 for_each_iommu(iommu, drhd) {
936 if (!ecap_ir_support(iommu->ecap))
939 /* Set up interrupt remapping for iommu.*/
940 iommu_set_irq_remapping(iommu, eim);
951 * handle error condition gracefully here!
956 static void prepare_irte(struct irte *irte, int vector,
959 memset(irte, 0, sizeof(*irte));
962 irte->dst_mode = apic->irq_dest_mode;
964 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
965 * actual level or edge trigger will be setup in the IO-APIC
966 * RTE. This will help simplify level triggered irq migration.
967 * For more details, see the comments (in io_apic.c) explainig IO-APIC
968 * irq migration in the presence of interrupt-remapping.
970 irte->trigger_mode = 0;
971 irte->dlvry_mode = apic->irq_delivery_mode;
972 irte->vector = vector;
973 irte->dest_id = IRTE_DEST(dest);
974 irte->redir_hint = 1;
977 static int intel_setup_ioapic_entry(int irq,
978 struct IO_APIC_route_entry *route_entry,
979 unsigned int destination, int vector,
980 struct io_apic_irq_attr *attr)
982 int ioapic_id = mpc_ioapic_id(attr->ioapic);
983 struct intel_iommu *iommu;
984 struct IR_IO_APIC_route_entry *entry;
988 down_read(&dmar_global_lock);
989 iommu = map_ioapic_to_ir(ioapic_id);
991 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
994 index = alloc_irte(iommu, irq, 1);
996 pr_warn("Failed to allocate IRTE for ioapic %d\n",
1001 up_read(&dmar_global_lock);
1005 prepare_irte(&irte, vector, destination);
1007 /* Set source-id of interrupt request */
1008 set_ioapic_sid(&irte, ioapic_id);
1010 modify_irte(irq, &irte);
1012 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1013 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1014 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1015 "Avail:%X Vector:%02X Dest:%08X "
1016 "SID:%04X SQ:%X SVT:%X)\n",
1017 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
1018 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1019 irte.avail, irte.vector, irte.dest_id,
1020 irte.sid, irte.sq, irte.svt);
1022 entry = (struct IR_IO_APIC_route_entry *)route_entry;
1023 memset(entry, 0, sizeof(*entry));
1025 entry->index2 = (index >> 15) & 0x1;
1028 entry->index = (index & 0x7fff);
1030 * IO-APIC RTE will be configured with virtual vector.
1031 * irq handler will do the explicit EOI to the io-apic.
1033 entry->vector = attr->ioapic_pin;
1034 entry->mask = 0; /* enable IRQ */
1035 entry->trigger = attr->trigger;
1036 entry->polarity = attr->polarity;
1038 /* Mask level triggered irqs.
1039 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1048 * Migrate the IO-APIC irq in the presence of intr-remapping.
1050 * For both level and edge triggered, irq migration is a simple atomic
1051 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1053 * For level triggered, we eliminate the io-apic RTE modification (with the
1054 * updated vector information), by using a virtual vector (io-apic pin number).
1055 * Real vector that is used for interrupting cpu will be coming from
1056 * the interrupt-remapping table entry.
1058 * As the migration is a simple atomic update of IRTE, the same mechanism
1059 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1062 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
1065 struct irq_cfg *cfg = irqd_cfg(data);
1066 unsigned int dest, irq = data->irq;
1070 if (!config_enabled(CONFIG_SMP))
1073 if (!cpumask_intersects(mask, cpu_online_mask))
1076 if (get_irte(irq, &irte))
1079 err = assign_irq_vector(irq, cfg, mask);
1083 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
1085 if (assign_irq_vector(irq, cfg, data->affinity))
1086 pr_err("Failed to recover vector for irq %d\n", irq);
1090 irte.vector = cfg->vector;
1091 irte.dest_id = IRTE_DEST(dest);
1094 * Atomically updates the IRTE with the new destination, vector
1095 * and flushes the interrupt entry cache.
1097 modify_irte(irq, &irte);
1100 * After this point, all the interrupts will start arriving
1101 * at the new destination. So, time to cleanup the previous
1102 * vector allocation.
1104 if (cfg->move_in_progress)
1105 send_cleanup_vector(cfg);
1107 cpumask_copy(data->affinity, mask);
1111 static void intel_compose_msi_msg(struct pci_dev *pdev,
1112 unsigned int irq, unsigned int dest,
1113 struct msi_msg *msg, u8 hpet_id)
1115 struct irq_cfg *cfg;
1122 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
1123 BUG_ON(ir_index == -1);
1125 prepare_irte(&irte, cfg->vector, dest);
1127 /* Set source-id of interrupt request */
1129 set_msi_sid(&irte, pdev);
1131 set_hpet_sid(&irte, hpet_id);
1133 modify_irte(irq, &irte);
1135 msg->address_hi = MSI_ADDR_BASE_HI;
1136 msg->data = sub_handle;
1137 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1139 MSI_ADDR_IR_INDEX1(ir_index) |
1140 MSI_ADDR_IR_INDEX2(ir_index);
1144 * Map the PCI dev to the corresponding remapping hardware unit
1145 * and allocate 'nvec' consecutive interrupt-remapping table entries
1148 static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1150 struct intel_iommu *iommu;
1153 down_read(&dmar_global_lock);
1154 iommu = map_dev_to_ir(dev);
1157 "Unable to map PCI %s to iommu\n", pci_name(dev));
1160 index = alloc_irte(iommu, irq, nvec);
1163 "Unable to allocate %d IRTE for PCI %s\n",
1164 nvec, pci_name(dev));
1168 up_read(&dmar_global_lock);
1173 static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1174 int index, int sub_handle)
1176 struct intel_iommu *iommu;
1179 down_read(&dmar_global_lock);
1180 iommu = map_dev_to_ir(pdev);
1183 * setup the mapping between the irq and the IRTE
1184 * base index, the sub_handle pointing to the
1185 * appropriate interrupt remap table entry.
1187 set_irte_irq(irq, iommu, index, sub_handle);
1190 up_read(&dmar_global_lock);
1195 static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
1198 struct intel_iommu *iommu;
1201 down_read(&dmar_global_lock);
1202 iommu = map_hpet_to_ir(id);
1204 index = alloc_irte(iommu, irq, 1);
1208 up_read(&dmar_global_lock);
1213 struct irq_remap_ops intel_irq_remap_ops = {
1214 .prepare = intel_prepare_irq_remapping,
1215 .enable = intel_enable_irq_remapping,
1216 .disable = disable_irq_remapping,
1217 .reenable = reenable_irq_remapping,
1218 .enable_faulting = enable_drhd_fault_handling,
1219 .setup_ioapic_entry = intel_setup_ioapic_entry,
1220 .set_affinity = intel_ioapic_set_affinity,
1221 .free_irq = free_irte,
1222 .compose_msi_msg = intel_compose_msi_msg,
1223 .msi_alloc_irq = intel_msi_alloc_irq,
1224 .msi_setup_irq = intel_msi_setup_irq,
1225 .alloc_hpet_msi = intel_alloc_hpet_msi,
1229 * Support of Interrupt Remapping Unit Hotplug
1231 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1234 int eim = x2apic_enabled();
1236 if (eim && !ecap_eim_support(iommu->ecap)) {
1237 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1238 iommu->reg_phys, iommu->ecap);
1242 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1243 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1248 /* TODO: check all IOAPICs are covered by IOMMU */
1250 /* Setup Interrupt-remapping now. */
1251 ret = intel_setup_irq_remapping(iommu);
1253 pr_err("DRHD %Lx: failed to allocate resource\n",
1255 ir_remove_ioapic_hpet_scope(iommu);
1260 /* Clear previous faults. */
1261 dmar_fault(-1, iommu);
1262 iommu_disable_irq_remapping(iommu);
1263 dmar_disable_qi(iommu);
1266 /* Enable queued invalidation */
1267 ret = dmar_enable_qi(iommu);
1269 iommu_set_irq_remapping(iommu, eim);
1271 pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
1272 iommu->reg_phys, iommu->ecap, ret);
1273 intel_teardown_irq_remapping(iommu);
1274 ir_remove_ioapic_hpet_scope(iommu);
1280 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1283 struct intel_iommu *iommu = dmaru->iommu;
1285 if (!irq_remapping_enabled)
1289 if (!ecap_ir_support(iommu->ecap))
1293 if (!iommu->ir_table)
1294 ret = dmar_ir_add(dmaru, iommu);
1296 if (iommu->ir_table) {
1297 if (!bitmap_empty(iommu->ir_table->bitmap,
1298 INTR_REMAP_TABLE_ENTRIES)) {
1301 iommu_disable_irq_remapping(iommu);
1302 intel_teardown_irq_remapping(iommu);
1303 ir_remove_ioapic_hpet_scope(iommu);