1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <linux/intel-iommu.h>
10 #include <linux/acpi.h>
11 #include <asm/io_apic.h>
14 #include <asm/irq_remapping.h>
15 #include <asm/pci-direct.h>
16 #include <asm/msidef.h>
18 #include "irq_remapping.h"
21 struct intel_iommu *iommu;
23 unsigned int bus; /* PCI bus number */
24 unsigned int devfn; /* PCI devfn number */
28 struct intel_iommu *iommu;
34 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35 #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
37 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39 static int ir_ioapic_num, ir_hpet_num;
46 * ->iommu->register_lock
48 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
49 * in single-threaded environment with interrupt disabled, so no need to tabke
50 * the dmar_global_lock.
52 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
54 static int __init parse_ioapics_under_ir(void);
56 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
58 struct irq_cfg *cfg = irq_get_chip_data(irq);
59 return cfg ? &cfg->irq_2_iommu : NULL;
62 static int get_irte(int irq, struct irte *entry)
64 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
68 if (!entry || !irq_iommu)
71 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
73 index = irq_iommu->irte_index + irq_iommu->sub_handle;
74 *entry = *(irq_iommu->iommu->ir_table->base + index);
76 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
80 static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
82 struct ir_table *table = iommu->ir_table;
83 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
84 struct irq_cfg *cfg = irq_get_chip_data(irq);
85 unsigned int mask = 0;
89 if (!count || !irq_iommu)
93 count = __roundup_pow_of_two(count);
97 if (mask > ecap_max_handle_mask(iommu->ecap)) {
99 "Requested mask %x exceeds the max invalidation handle"
100 " mask value %Lx\n", mask,
101 ecap_max_handle_mask(iommu->ecap));
105 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
106 index = bitmap_find_free_region(table->bitmap,
107 INTR_REMAP_TABLE_ENTRIES, mask);
109 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
112 irq_iommu->iommu = iommu;
113 irq_iommu->irte_index = index;
114 irq_iommu->sub_handle = 0;
115 irq_iommu->irte_mask = mask;
117 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
122 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
126 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
130 return qi_submit_sync(&desc, iommu);
133 static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
135 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
142 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
143 *sub_handle = irq_iommu->sub_handle;
144 index = irq_iommu->irte_index;
145 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
149 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
151 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
152 struct irq_cfg *cfg = irq_get_chip_data(irq);
158 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
161 irq_iommu->iommu = iommu;
162 irq_iommu->irte_index = index;
163 irq_iommu->sub_handle = subhandle;
164 irq_iommu->irte_mask = 0;
166 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
171 static int modify_irte(int irq, struct irte *irte_modified)
173 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
174 struct intel_iommu *iommu;
182 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
184 iommu = irq_iommu->iommu;
186 index = irq_iommu->irte_index + irq_iommu->sub_handle;
187 irte = &iommu->ir_table->base[index];
189 set_64bit(&irte->low, irte_modified->low);
190 set_64bit(&irte->high, irte_modified->high);
191 __iommu_flush_cache(iommu, irte, sizeof(*irte));
193 rc = qi_flush_iec(iommu, index, 0);
194 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
199 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
203 for (i = 0; i < MAX_HPET_TBS; i++)
204 if (ir_hpet[i].id == hpet_id)
205 return ir_hpet[i].iommu;
209 static struct intel_iommu *map_ioapic_to_ir(int apic)
213 for (i = 0; i < MAX_IO_APICS; i++)
214 if (ir_ioapic[i].id == apic)
215 return ir_ioapic[i].iommu;
219 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
221 struct dmar_drhd_unit *drhd;
223 drhd = dmar_find_matched_drhd_unit(dev);
230 static int clear_entries(struct irq_2_iommu *irq_iommu)
232 struct irte *start, *entry, *end;
233 struct intel_iommu *iommu;
236 if (irq_iommu->sub_handle)
239 iommu = irq_iommu->iommu;
240 index = irq_iommu->irte_index + irq_iommu->sub_handle;
242 start = iommu->ir_table->base + index;
243 end = start + (1 << irq_iommu->irte_mask);
245 for (entry = start; entry < end; entry++) {
246 set_64bit(&entry->low, 0);
247 set_64bit(&entry->high, 0);
249 bitmap_release_region(iommu->ir_table->bitmap, index,
250 irq_iommu->irte_mask);
252 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
255 static int free_irte(int irq)
257 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
264 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
266 rc = clear_entries(irq_iommu);
268 irq_iommu->iommu = NULL;
269 irq_iommu->irte_index = 0;
270 irq_iommu->sub_handle = 0;
271 irq_iommu->irte_mask = 0;
273 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
279 * source validation type
281 #define SVT_NO_VERIFY 0x0 /* no verification is required */
282 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
283 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
286 * source-id qualifier
288 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
289 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
290 * the third least significant bit
292 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
293 * the second and third least significant bits
295 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
296 * the least three significant bits
300 * set SVT, SQ and SID fields of irte to verify
301 * source ids of interrupt requests
303 static void set_irte_sid(struct irte *irte, unsigned int svt,
304 unsigned int sq, unsigned int sid)
306 if (disable_sourceid_checking)
313 static int set_ioapic_sid(struct irte *irte, int apic)
321 down_read(&dmar_global_lock);
322 for (i = 0; i < MAX_IO_APICS; i++) {
323 if (ir_ioapic[i].id == apic) {
324 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
328 up_read(&dmar_global_lock);
331 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
335 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
340 static int set_hpet_sid(struct irte *irte, u8 id)
348 down_read(&dmar_global_lock);
349 for (i = 0; i < MAX_HPET_TBS; i++) {
350 if (ir_hpet[i].id == id) {
351 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
355 up_read(&dmar_global_lock);
358 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
363 * Should really use SQ_ALL_16. Some platforms are broken.
364 * While we figure out the right quirks for these broken platforms, use
365 * SQ_13_IGNORE_3 for now.
367 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
372 struct set_msi_sid_data {
373 struct pci_dev *pdev;
377 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
379 struct set_msi_sid_data *data = opaque;
387 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
389 struct set_msi_sid_data data;
394 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
397 * DMA alias provides us with a PCI device and alias. The only case
398 * where the it will return an alias on a different bus than the
399 * device is the case of a PCIe-to-PCI bridge, where the alias is for
400 * the subordinate bus. In this case we can only verify the bus.
402 * If the alias device is on a different bus than our source device
403 * then we have a topology based alias, use it.
405 * Otherwise, the alias is for a device DMA quirk and we cannot
406 * assume that MSI uses the same requester ID. Therefore use the
409 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
410 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
411 PCI_DEVID(PCI_BUS_NUM(data.alias),
413 else if (data.pdev->bus->number != dev->bus->number)
414 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
416 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
417 PCI_DEVID(dev->bus->number, dev->devfn));
422 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
428 addr = virt_to_phys((void *)iommu->ir_table->base);
430 raw_spin_lock_irqsave(&iommu->register_lock, flags);
432 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
433 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
435 /* Set interrupt-remapping table pointer */
436 iommu->gcmd |= DMA_GCMD_SIRTP;
437 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
439 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
440 readl, (sts & DMA_GSTS_IRTPS), sts);
441 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
444 * global invalidation of interrupt entry cache before enabling
445 * interrupt-remapping.
447 qi_global_iec(iommu);
449 raw_spin_lock_irqsave(&iommu->register_lock, flags);
451 /* Enable interrupt-remapping */
452 iommu->gcmd |= DMA_GCMD_IRE;
453 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
454 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
456 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
457 readl, (sts & DMA_GSTS_IRES), sts);
460 * With CFI clear in the Global Command register, we should be
461 * protected from dangerous (i.e. compatibility) interrupts
462 * regardless of x2apic status. Check just to be sure.
464 if (sts & DMA_GSTS_CFIS)
466 "Compatibility-format IRQs enabled despite intr remapping;\n"
467 "you are vulnerable to IRQ injection.\n");
469 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
473 static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
475 struct ir_table *ir_table;
477 unsigned long *bitmap;
479 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
482 if (!iommu->ir_table)
485 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
486 INTR_REMAP_PAGE_ORDER);
489 pr_err("IR%d: failed to allocate pages of order %d\n",
490 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
491 kfree(iommu->ir_table);
495 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
496 sizeof(long), GFP_ATOMIC);
497 if (bitmap == NULL) {
498 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
499 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
504 ir_table->base = page_address(pages);
505 ir_table->bitmap = bitmap;
507 iommu_set_irq_remapping(iommu, mode);
512 * Disable Interrupt Remapping.
514 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
519 if (!ecap_ir_support(iommu->ecap))
523 * global invalidation of interrupt entry cache before disabling
524 * interrupt-remapping.
526 qi_global_iec(iommu);
528 raw_spin_lock_irqsave(&iommu->register_lock, flags);
530 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
531 if (!(sts & DMA_GSTS_IRES))
534 iommu->gcmd &= ~DMA_GCMD_IRE;
535 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
537 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
538 readl, !(sts & DMA_GSTS_IRES), sts);
541 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
544 static int __init dmar_x2apic_optout(void)
546 struct acpi_table_dmar *dmar;
547 dmar = (struct acpi_table_dmar *)dmar_tbl;
548 if (!dmar || no_x2apic_optout)
550 return dmar->flags & DMAR_X2APIC_OPT_OUT;
553 static int __init intel_irq_remapping_supported(void)
555 struct dmar_drhd_unit *drhd;
556 struct intel_iommu *iommu;
558 if (disable_irq_remap)
560 if (irq_remap_broken) {
562 "This system BIOS has enabled interrupt remapping\n"
563 "on a chipset that contains an erratum making that\n"
564 "feature unstable. To maintain system stability\n"
565 "interrupt remapping is being disabled. Please\n"
566 "contact your BIOS vendor for an update\n");
567 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
568 disable_irq_remap = 1;
572 if (!dmar_ir_support())
575 for_each_iommu(iommu, drhd)
576 if (!ecap_ir_support(iommu->ecap))
582 static int __init intel_enable_irq_remapping(void)
584 struct dmar_drhd_unit *drhd;
585 struct intel_iommu *iommu;
590 x2apic_present = x2apic_supported();
592 if (parse_ioapics_under_ir() != 1) {
593 printk(KERN_INFO "Not enable interrupt remapping\n");
597 if (x2apic_present) {
598 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
600 eim = !dmar_x2apic_optout();
603 "Your BIOS is broken and requested that x2apic be disabled.\n"
604 "This will slightly decrease performance.\n"
605 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
608 for_each_iommu(iommu, drhd) {
610 * If the queued invalidation is already initialized,
611 * shouldn't disable it.
617 * Clear previous faults.
619 dmar_fault(-1, iommu);
622 * Disable intr remapping and queued invalidation, if already
623 * enabled prior to OS handover.
625 iommu_disable_irq_remapping(iommu);
627 dmar_disable_qi(iommu);
631 * check for the Interrupt-remapping support
633 for_each_iommu(iommu, drhd) {
634 if (!ecap_ir_support(iommu->ecap))
637 if (eim && !ecap_eim_support(iommu->ecap)) {
638 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
639 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
645 * Enable queued invalidation for all the DRHD's.
647 for_each_iommu(iommu, drhd) {
648 int ret = dmar_enable_qi(iommu);
651 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
652 " invalidation, ecap %Lx, ret %d\n",
653 drhd->reg_base_addr, iommu->ecap, ret);
659 * Setup Interrupt-remapping for all the DRHD's now.
661 for_each_iommu(iommu, drhd) {
662 if (!ecap_ir_support(iommu->ecap))
665 if (intel_setup_irq_remapping(iommu, eim))
674 irq_remapping_enabled = 1;
677 * VT-d has a different layout for IO-APIC entries when
678 * interrupt remapping is enabled. So it needs a special routine
679 * to print IO-APIC entries for debugging purposes too.
681 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
683 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
685 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
689 * handle error condition gracefully here!
693 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
698 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
699 struct intel_iommu *iommu)
701 struct acpi_dmar_pci_path *path;
706 path = (struct acpi_dmar_pci_path *)(scope + 1);
707 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
708 / sizeof(struct acpi_dmar_pci_path);
710 while (--count > 0) {
712 * Access PCI directly due to the PCI
713 * subsystem isn't initialized yet.
715 bus = read_pci_config_byte(bus, path->device, path->function,
719 ir_hpet[ir_hpet_num].bus = bus;
720 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
721 ir_hpet[ir_hpet_num].iommu = iommu;
722 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
726 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
727 struct intel_iommu *iommu)
729 struct acpi_dmar_pci_path *path;
734 path = (struct acpi_dmar_pci_path *)(scope + 1);
735 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
736 / sizeof(struct acpi_dmar_pci_path);
738 while (--count > 0) {
740 * Access PCI directly due to the PCI
741 * subsystem isn't initialized yet.
743 bus = read_pci_config_byte(bus, path->device, path->function,
748 ir_ioapic[ir_ioapic_num].bus = bus;
749 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
750 ir_ioapic[ir_ioapic_num].iommu = iommu;
751 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
755 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
756 struct intel_iommu *iommu)
758 struct acpi_dmar_hardware_unit *drhd;
759 struct acpi_dmar_device_scope *scope;
762 drhd = (struct acpi_dmar_hardware_unit *)header;
764 start = (void *)(drhd + 1);
765 end = ((void *)drhd) + header->length;
767 while (start < end) {
769 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
770 if (ir_ioapic_num == MAX_IO_APICS) {
771 printk(KERN_WARNING "Exceeded Max IO APICS\n");
775 printk(KERN_INFO "IOAPIC id %d under DRHD base "
776 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
777 drhd->address, iommu->seq_id);
779 ir_parse_one_ioapic_scope(scope, iommu);
780 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
781 if (ir_hpet_num == MAX_HPET_TBS) {
782 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
786 printk(KERN_INFO "HPET id %d under DRHD base"
787 " 0x%Lx\n", scope->enumeration_id,
790 ir_parse_one_hpet_scope(scope, iommu);
792 start += scope->length;
799 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
802 static int __init parse_ioapics_under_ir(void)
804 struct dmar_drhd_unit *drhd;
805 struct intel_iommu *iommu;
806 int ir_supported = 0;
809 for_each_iommu(iommu, drhd)
810 if (ecap_ir_support(iommu->ecap)) {
811 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
820 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
821 int ioapic_id = mpc_ioapic_id(ioapic_idx);
822 if (!map_ioapic_to_ir(ioapic_id)) {
823 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
824 "interrupt remapping will be disabled\n",
833 static int __init ir_dev_scope_init(void)
837 if (!irq_remapping_enabled)
840 down_write(&dmar_global_lock);
841 ret = dmar_dev_scope_init();
842 up_write(&dmar_global_lock);
846 rootfs_initcall(ir_dev_scope_init);
848 static void disable_irq_remapping(void)
850 struct dmar_drhd_unit *drhd;
851 struct intel_iommu *iommu = NULL;
854 * Disable Interrupt-remapping for all the DRHD's now.
856 for_each_iommu(iommu, drhd) {
857 if (!ecap_ir_support(iommu->ecap))
860 iommu_disable_irq_remapping(iommu);
864 static int reenable_irq_remapping(int eim)
866 struct dmar_drhd_unit *drhd;
868 struct intel_iommu *iommu = NULL;
870 for_each_iommu(iommu, drhd)
872 dmar_reenable_qi(iommu);
875 * Setup Interrupt-remapping for all the DRHD's now.
877 for_each_iommu(iommu, drhd) {
878 if (!ecap_ir_support(iommu->ecap))
881 /* Set up interrupt remapping for iommu.*/
882 iommu_set_irq_remapping(iommu, eim);
893 * handle error condition gracefully here!
898 static void prepare_irte(struct irte *irte, int vector,
901 memset(irte, 0, sizeof(*irte));
904 irte->dst_mode = apic->irq_dest_mode;
906 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
907 * actual level or edge trigger will be setup in the IO-APIC
908 * RTE. This will help simplify level triggered irq migration.
909 * For more details, see the comments (in io_apic.c) explainig IO-APIC
910 * irq migration in the presence of interrupt-remapping.
912 irte->trigger_mode = 0;
913 irte->dlvry_mode = apic->irq_delivery_mode;
914 irte->vector = vector;
915 irte->dest_id = IRTE_DEST(dest);
916 irte->redir_hint = 1;
919 static int intel_setup_ioapic_entry(int irq,
920 struct IO_APIC_route_entry *route_entry,
921 unsigned int destination, int vector,
922 struct io_apic_irq_attr *attr)
924 int ioapic_id = mpc_ioapic_id(attr->ioapic);
925 struct intel_iommu *iommu;
926 struct IR_IO_APIC_route_entry *entry;
930 down_read(&dmar_global_lock);
931 iommu = map_ioapic_to_ir(ioapic_id);
933 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
936 index = alloc_irte(iommu, irq, 1);
938 pr_warn("Failed to allocate IRTE for ioapic %d\n",
943 up_read(&dmar_global_lock);
947 prepare_irte(&irte, vector, destination);
949 /* Set source-id of interrupt request */
950 set_ioapic_sid(&irte, ioapic_id);
952 modify_irte(irq, &irte);
954 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
955 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
956 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
957 "Avail:%X Vector:%02X Dest:%08X "
958 "SID:%04X SQ:%X SVT:%X)\n",
959 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
960 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
961 irte.avail, irte.vector, irte.dest_id,
962 irte.sid, irte.sq, irte.svt);
964 entry = (struct IR_IO_APIC_route_entry *)route_entry;
965 memset(entry, 0, sizeof(*entry));
967 entry->index2 = (index >> 15) & 0x1;
970 entry->index = (index & 0x7fff);
972 * IO-APIC RTE will be configured with virtual vector.
973 * irq handler will do the explicit EOI to the io-apic.
975 entry->vector = attr->ioapic_pin;
976 entry->mask = 0; /* enable IRQ */
977 entry->trigger = attr->trigger;
978 entry->polarity = attr->polarity;
980 /* Mask level triggered irqs.
981 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
990 * Migrate the IO-APIC irq in the presence of intr-remapping.
992 * For both level and edge triggered, irq migration is a simple atomic
993 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
995 * For level triggered, we eliminate the io-apic RTE modification (with the
996 * updated vector information), by using a virtual vector (io-apic pin number).
997 * Real vector that is used for interrupting cpu will be coming from
998 * the interrupt-remapping table entry.
1000 * As the migration is a simple atomic update of IRTE, the same mechanism
1001 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1004 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
1007 struct irq_cfg *cfg = data->chip_data;
1008 unsigned int dest, irq = data->irq;
1012 if (!config_enabled(CONFIG_SMP))
1015 if (!cpumask_intersects(mask, cpu_online_mask))
1018 if (get_irte(irq, &irte))
1021 err = assign_irq_vector(irq, cfg, mask);
1025 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
1027 if (assign_irq_vector(irq, cfg, data->affinity))
1028 pr_err("Failed to recover vector for irq %d\n", irq);
1032 irte.vector = cfg->vector;
1033 irte.dest_id = IRTE_DEST(dest);
1036 * Atomically updates the IRTE with the new destination, vector
1037 * and flushes the interrupt entry cache.
1039 modify_irte(irq, &irte);
1042 * After this point, all the interrupts will start arriving
1043 * at the new destination. So, time to cleanup the previous
1044 * vector allocation.
1046 if (cfg->move_in_progress)
1047 send_cleanup_vector(cfg);
1049 cpumask_copy(data->affinity, mask);
1053 static void intel_compose_msi_msg(struct pci_dev *pdev,
1054 unsigned int irq, unsigned int dest,
1055 struct msi_msg *msg, u8 hpet_id)
1057 struct irq_cfg *cfg;
1062 cfg = irq_get_chip_data(irq);
1064 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
1065 BUG_ON(ir_index == -1);
1067 prepare_irte(&irte, cfg->vector, dest);
1069 /* Set source-id of interrupt request */
1071 set_msi_sid(&irte, pdev);
1073 set_hpet_sid(&irte, hpet_id);
1075 modify_irte(irq, &irte);
1077 msg->address_hi = MSI_ADDR_BASE_HI;
1078 msg->data = sub_handle;
1079 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1081 MSI_ADDR_IR_INDEX1(ir_index) |
1082 MSI_ADDR_IR_INDEX2(ir_index);
1086 * Map the PCI dev to the corresponding remapping hardware unit
1087 * and allocate 'nvec' consecutive interrupt-remapping table entries
1090 static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1092 struct intel_iommu *iommu;
1095 down_read(&dmar_global_lock);
1096 iommu = map_dev_to_ir(dev);
1099 "Unable to map PCI %s to iommu\n", pci_name(dev));
1102 index = alloc_irte(iommu, irq, nvec);
1105 "Unable to allocate %d IRTE for PCI %s\n",
1106 nvec, pci_name(dev));
1110 up_read(&dmar_global_lock);
1115 static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1116 int index, int sub_handle)
1118 struct intel_iommu *iommu;
1121 down_read(&dmar_global_lock);
1122 iommu = map_dev_to_ir(pdev);
1125 * setup the mapping between the irq and the IRTE
1126 * base index, the sub_handle pointing to the
1127 * appropriate interrupt remap table entry.
1129 set_irte_irq(irq, iommu, index, sub_handle);
1132 up_read(&dmar_global_lock);
1137 static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
1140 struct intel_iommu *iommu;
1143 down_read(&dmar_global_lock);
1144 iommu = map_hpet_to_ir(id);
1146 index = alloc_irte(iommu, irq, 1);
1150 up_read(&dmar_global_lock);
1155 struct irq_remap_ops intel_irq_remap_ops = {
1156 .supported = intel_irq_remapping_supported,
1157 .prepare = dmar_table_init,
1158 .enable = intel_enable_irq_remapping,
1159 .disable = disable_irq_remapping,
1160 .reenable = reenable_irq_remapping,
1161 .enable_faulting = enable_drhd_fault_handling,
1162 .setup_ioapic_entry = intel_setup_ioapic_entry,
1163 .set_affinity = intel_ioapic_set_affinity,
1164 .free_irq = free_irte,
1165 .compose_msi_msg = intel_compose_msi_msg,
1166 .msi_alloc_irq = intel_msi_alloc_irq,
1167 .msi_setup_irq = intel_msi_setup_irq,
1168 .setup_hpet_msi = intel_setup_hpet_msi,