2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
147 while (start < end) {
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
159 start += scope->length;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
192 dmar_register_drhd_unit(dmaru);
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
211 list_del(&dmaru->list);
218 LIST_HEAD(dmar_rmrr_units);
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222 list_add(&rmrr->list, &dmar_rmrr_units);
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
241 dmar_register_rmrr_unit(rmrru);
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248 struct acpi_dmar_reserved_memory *rmrr;
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
263 static LIST_HEAD(dmar_atsr_units);
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
276 atsru->include_all = atsr->flags & 0x1;
278 list_add(&atsru->list, &dmar_atsr_units);
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
286 struct acpi_dmar_atsr *atsr;
288 if (atsru->include_all)
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
335 if (atsru->include_all)
343 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
345 struct acpi_dmar_hardware_unit *drhd;
346 struct acpi_dmar_reserved_memory *rmrr;
347 struct acpi_dmar_atsr *atsr;
348 struct acpi_dmar_rhsa *rhsa;
350 switch (header->type) {
351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
352 drhd = container_of(header, struct acpi_dmar_hardware_unit,
354 printk (KERN_INFO PREFIX
355 "DRHD base: %#016Lx flags: %#x\n",
356 (unsigned long long)drhd->address, drhd->flags);
358 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
359 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
361 printk (KERN_INFO PREFIX
362 "RMRR base: %#016Lx end: %#016Lx\n",
363 (unsigned long long)rmrr->base_address,
364 (unsigned long long)rmrr->end_address);
366 case ACPI_DMAR_TYPE_ATSR:
367 atsr = container_of(header, struct acpi_dmar_atsr, header);
368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
380 * dmar_table_detect - checks to see if the platform supports DMAR devices
382 static int __init dmar_table_detect(void)
384 acpi_status status = AE_OK;
386 /* if we could find DMAR table, then there are DMAR devices */
387 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388 (struct acpi_table_header **)&dmar_tbl,
391 if (ACPI_SUCCESS(status) && !dmar_tbl) {
392 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393 status = AE_NOT_FOUND;
396 return (ACPI_SUCCESS(status) ? 1 : 0);
400 * parse_dmar_table - parses the DMA reporting table
403 parse_dmar_table(void)
405 struct acpi_table_dmar *dmar;
406 struct acpi_dmar_header *entry_header;
410 * Do it again, earlier dmar_tbl mapping could be mapped with
416 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
417 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
419 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
421 dmar = (struct acpi_table_dmar *)dmar_tbl;
425 if (dmar->width < PAGE_SHIFT - 1) {
426 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
430 printk (KERN_INFO PREFIX "Host address width %d\n",
433 entry_header = (struct acpi_dmar_header *)(dmar + 1);
434 while (((unsigned long)entry_header) <
435 (((unsigned long)dmar) + dmar_tbl->length)) {
436 /* Avoid looping forever on bad ACPI tables */
437 if (entry_header->length == 0) {
438 printk(KERN_WARNING PREFIX
439 "Invalid 0-length structure\n");
444 dmar_table_print_dmar_entry(entry_header);
446 switch (entry_header->type) {
447 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448 ret = dmar_parse_one_drhd(entry_header);
450 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
452 ret = dmar_parse_one_rmrr(entry_header);
455 case ACPI_DMAR_TYPE_ATSR:
457 ret = dmar_parse_one_atsr(entry_header);
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */
464 printk(KERN_WARNING PREFIX
465 "Unknown DMAR structure type %d\n",
467 ret = 0; /* for forward compatibility */
473 entry_header = ((void *)entry_header + entry_header->length);
478 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
484 for (index = 0; index < cnt; index++)
485 if (dev == devices[index])
488 /* Check our parent */
489 dev = dev->bus->self;
495 struct dmar_drhd_unit *
496 dmar_find_matched_drhd_unit(struct pci_dev *dev)
498 struct dmar_drhd_unit *dmaru = NULL;
499 struct acpi_dmar_hardware_unit *drhd;
501 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502 drhd = container_of(dmaru->hdr,
503 struct acpi_dmar_hardware_unit,
506 if (dmaru->include_all &&
507 drhd->segment == pci_domain_nr(dev->bus))
510 if (dmar_pci_device_match(dmaru->devices,
511 dmaru->devices_cnt, dev))
518 int __init dmar_dev_scope_init(void)
520 struct dmar_drhd_unit *drhd, *drhd_n;
523 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
524 ret = dmar_parse_dev(drhd);
531 struct dmar_rmrr_unit *rmrr, *rmrr_n;
532 struct dmar_atsr_unit *atsr, *atsr_n;
534 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
535 ret = rmrr_parse_dev(rmrr);
540 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541 ret = atsr_parse_dev(atsr);
552 int __init dmar_table_init(void)
554 static int dmar_table_initialized;
557 if (dmar_table_initialized)
560 dmar_table_initialized = 1;
562 ret = parse_dmar_table();
565 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
569 if (list_empty(&dmar_drhd_units)) {
570 printk(KERN_INFO PREFIX "No DMAR devices found\n");
575 if (list_empty(&dmar_rmrr_units))
576 printk(KERN_INFO PREFIX "No RMRR found\n");
578 if (list_empty(&dmar_atsr_units))
579 printk(KERN_INFO PREFIX "No ATSR found\n");
585 int __init check_zero_address(void)
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596 /* Avoid looping forever on bad ACPI tables */
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
617 entry_header = ((void *)entry_header + entry_header->length);
622 void __init detect_intel_iommu(void)
626 ret = dmar_table_detect();
628 ret = check_zero_address();
630 #ifdef CONFIG_INTR_REMAP
631 struct acpi_table_dmar *dmar;
633 * for now we will disable dma-remapping when interrupt
634 * remapping is enabled.
635 * When support for queued invalidation for IOTLB invalidation
636 * is added, we will not need this any more.
638 dmar = (struct acpi_table_dmar *) dmar_tbl;
639 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
641 "Queued invalidation will be enabled to support "
642 "x2apic and Intr-remapping.\n");
645 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
650 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
655 int alloc_iommu(struct dmar_drhd_unit *drhd)
657 struct intel_iommu *iommu;
660 static int iommu_allocated = 0;
664 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
668 iommu->seq_id = iommu_allocated++;
669 sprintf (iommu->name, "dmar%d", iommu->seq_id);
671 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
673 printk(KERN_ERR "IOMMU: can't map the region\n");
676 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
677 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
679 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
680 /* Promote an attitude of violence to a BIOS engineer today */
681 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
682 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
684 dmi_get_system_info(DMI_BIOS_VENDOR),
685 dmi_get_system_info(DMI_BIOS_VERSION),
686 dmi_get_system_info(DMI_PRODUCT_VERSION));
691 agaw = iommu_calculate_agaw(iommu);
694 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
698 msagaw = iommu_calculate_max_sagaw(iommu);
701 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
707 iommu->msagaw = msagaw;
709 /* the registers might be more than one page */
710 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
711 cap_max_fault_reg_offset(iommu->cap));
712 map_size = VTD_PAGE_ALIGN(map_size);
713 if (map_size > VTD_PAGE_SIZE) {
715 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
717 printk(KERN_ERR "IOMMU: can't map the region\n");
722 ver = readl(iommu->reg + DMAR_VER_REG);
723 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
724 (unsigned long long)drhd->reg_base_addr,
725 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
726 (unsigned long long)iommu->cap,
727 (unsigned long long)iommu->ecap);
729 spin_lock_init(&iommu->register_lock);
741 void free_iommu(struct intel_iommu *iommu)
747 free_dmar_iommu(iommu);
756 * Reclaim all the submitted descriptors which have completed its work.
758 static inline void reclaim_free_desc(struct q_inval *qi)
760 while (qi->desc_status[qi->free_tail] == QI_DONE ||
761 qi->desc_status[qi->free_tail] == QI_ABORT) {
762 qi->desc_status[qi->free_tail] = QI_FREE;
763 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
768 static int qi_check_fault(struct intel_iommu *iommu, int index)
772 struct q_inval *qi = iommu->qi;
773 int wait_index = (index + 1) % QI_LENGTH;
775 if (qi->desc_status[wait_index] == QI_ABORT)
778 fault = readl(iommu->reg + DMAR_FSTS_REG);
781 * If IQE happens, the head points to the descriptor associated
782 * with the error. No new descriptors are fetched until the IQE
785 if (fault & DMA_FSTS_IQE) {
786 head = readl(iommu->reg + DMAR_IQH_REG);
787 if ((head >> DMAR_IQ_SHIFT) == index) {
788 printk(KERN_ERR "VT-d detected invalid descriptor: "
789 "low=%llx, high=%llx\n",
790 (unsigned long long)qi->desc[index].low,
791 (unsigned long long)qi->desc[index].high);
792 memcpy(&qi->desc[index], &qi->desc[wait_index],
793 sizeof(struct qi_desc));
794 __iommu_flush_cache(iommu, &qi->desc[index],
795 sizeof(struct qi_desc));
796 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
802 * If ITE happens, all pending wait_desc commands are aborted.
803 * No new descriptors are fetched until the ITE is cleared.
805 if (fault & DMA_FSTS_ITE) {
806 head = readl(iommu->reg + DMAR_IQH_REG);
807 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
809 tail = readl(iommu->reg + DMAR_IQT_REG);
810 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
812 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
815 if (qi->desc_status[head] == QI_IN_USE)
816 qi->desc_status[head] = QI_ABORT;
817 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
818 } while (head != tail);
820 if (qi->desc_status[wait_index] == QI_ABORT)
824 if (fault & DMA_FSTS_ICE)
825 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
831 * Submit the queued invalidation descriptor to the remapping
832 * hardware unit and wait for its completion.
834 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
837 struct q_inval *qi = iommu->qi;
838 struct qi_desc *hw, wait_desc;
839 int wait_index, index;
850 spin_lock_irqsave(&qi->q_lock, flags);
851 while (qi->free_cnt < 3) {
852 spin_unlock_irqrestore(&qi->q_lock, flags);
854 spin_lock_irqsave(&qi->q_lock, flags);
857 index = qi->free_head;
858 wait_index = (index + 1) % QI_LENGTH;
860 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
864 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
865 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
866 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
868 hw[wait_index] = wait_desc;
870 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
871 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
873 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
877 * update the HW tail register indicating the presence of
880 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
882 while (qi->desc_status[wait_index] != QI_DONE) {
884 * We will leave the interrupts disabled, to prevent interrupt
885 * context to queue another cmd while a cmd is already submitted
886 * and waiting for completion on this cpu. This is to avoid
887 * a deadlock where the interrupt context can wait indefinitely
888 * for free slots in the queue.
890 rc = qi_check_fault(iommu, index);
894 spin_unlock(&qi->q_lock);
896 spin_lock(&qi->q_lock);
899 qi->desc_status[index] = QI_DONE;
901 reclaim_free_desc(qi);
902 spin_unlock_irqrestore(&qi->q_lock, flags);
911 * Flush the global interrupt entry cache.
913 void qi_global_iec(struct intel_iommu *iommu)
917 desc.low = QI_IEC_TYPE;
920 /* should never fail */
921 qi_submit_sync(&desc, iommu);
924 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
929 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
930 | QI_CC_GRAN(type) | QI_CC_TYPE;
933 qi_submit_sync(&desc, iommu);
936 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
937 unsigned int size_order, u64 type)
944 if (cap_write_drain(iommu->cap))
947 if (cap_read_drain(iommu->cap))
950 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
951 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
952 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
953 | QI_IOTLB_AM(size_order);
955 qi_submit_sync(&desc, iommu);
958 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
959 u64 addr, unsigned mask)
964 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
965 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
966 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
968 desc.high = QI_DEV_IOTLB_ADDR(addr);
970 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
973 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
976 qi_submit_sync(&desc, iommu);
980 * Disable Queued Invalidation interface.
982 void dmar_disable_qi(struct intel_iommu *iommu)
986 cycles_t start_time = get_cycles();
988 if (!ecap_qis(iommu->ecap))
991 spin_lock_irqsave(&iommu->register_lock, flags);
993 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
994 if (!(sts & DMA_GSTS_QIES))
998 * Give a chance to HW to complete the pending invalidation requests.
1000 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1001 readl(iommu->reg + DMAR_IQH_REG)) &&
1002 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1005 iommu->gcmd &= ~DMA_GCMD_QIE;
1006 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1009 !(sts & DMA_GSTS_QIES), sts);
1011 spin_unlock_irqrestore(&iommu->register_lock, flags);
1015 * Enable queued invalidation.
1017 static void __dmar_enable_qi(struct intel_iommu *iommu)
1020 unsigned long flags;
1021 struct q_inval *qi = iommu->qi;
1023 qi->free_head = qi->free_tail = 0;
1024 qi->free_cnt = QI_LENGTH;
1026 spin_lock_irqsave(&iommu->register_lock, flags);
1028 /* write zero to the tail reg */
1029 writel(0, iommu->reg + DMAR_IQT_REG);
1031 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1033 iommu->gcmd |= DMA_GCMD_QIE;
1034 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1036 /* Make sure hardware complete it */
1037 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1039 spin_unlock_irqrestore(&iommu->register_lock, flags);
1043 * Enable Queued Invalidation interface. This is a must to support
1044 * interrupt-remapping. Also used by DMA-remapping, which replaces
1045 * register based IOTLB invalidation.
1047 int dmar_enable_qi(struct intel_iommu *iommu)
1051 if (!ecap_qis(iommu->ecap))
1055 * queued invalidation is already setup and enabled.
1060 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1066 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1073 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1074 if (!qi->desc_status) {
1075 free_page((unsigned long) qi->desc);
1081 qi->free_head = qi->free_tail = 0;
1082 qi->free_cnt = QI_LENGTH;
1084 spin_lock_init(&qi->q_lock);
1086 __dmar_enable_qi(iommu);
1091 /* iommu interrupt handling. Most stuff are MSI-like. */
1099 static const char *dma_remap_fault_reasons[] =
1102 "Present bit in root entry is clear",
1103 "Present bit in context entry is clear",
1104 "Invalid context entry",
1105 "Access beyond MGAW",
1106 "PTE Write access is not set",
1107 "PTE Read access is not set",
1108 "Next page table ptr is invalid",
1109 "Root table address invalid",
1110 "Context table ptr is invalid",
1111 "non-zero reserved fields in RTP",
1112 "non-zero reserved fields in CTP",
1113 "non-zero reserved fields in PTE",
1116 static const char *intr_remap_fault_reasons[] =
1118 "Detected reserved fields in the decoded interrupt-remapped request",
1119 "Interrupt index exceeded the interrupt-remapping table size",
1120 "Present field in the IRTE entry is clear",
1121 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1122 "Detected reserved fields in the IRTE entry",
1123 "Blocked a compatibility format interrupt request",
1124 "Blocked an interrupt request due to source-id verification failure",
1127 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1129 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1131 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1132 ARRAY_SIZE(intr_remap_fault_reasons))) {
1133 *fault_type = INTR_REMAP;
1134 return intr_remap_fault_reasons[fault_reason - 0x20];
1135 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1136 *fault_type = DMA_REMAP;
1137 return dma_remap_fault_reasons[fault_reason];
1139 *fault_type = UNKNOWN;
1144 void dmar_msi_unmask(unsigned int irq)
1146 struct intel_iommu *iommu = get_irq_data(irq);
1150 spin_lock_irqsave(&iommu->register_lock, flag);
1151 writel(0, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
1154 spin_unlock_irqrestore(&iommu->register_lock, flag);
1157 void dmar_msi_mask(unsigned int irq)
1160 struct intel_iommu *iommu = get_irq_data(irq);
1163 spin_lock_irqsave(&iommu->register_lock, flag);
1164 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1165 /* Read a reg to force flush the post write */
1166 readl(iommu->reg + DMAR_FECTL_REG);
1167 spin_unlock_irqrestore(&iommu->register_lock, flag);
1170 void dmar_msi_write(int irq, struct msi_msg *msg)
1172 struct intel_iommu *iommu = get_irq_data(irq);
1175 spin_lock_irqsave(&iommu->register_lock, flag);
1176 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1177 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1178 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1179 spin_unlock_irqrestore(&iommu->register_lock, flag);
1182 void dmar_msi_read(int irq, struct msi_msg *msg)
1184 struct intel_iommu *iommu = get_irq_data(irq);
1187 spin_lock_irqsave(&iommu->register_lock, flag);
1188 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1189 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1190 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1191 spin_unlock_irqrestore(&iommu->register_lock, flag);
1194 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1195 u8 fault_reason, u16 source_id, unsigned long long addr)
1200 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1202 if (fault_type == INTR_REMAP)
1203 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1204 "fault index %llx\n"
1205 "INTR-REMAP:[fault reason %02d] %s\n",
1206 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1207 PCI_FUNC(source_id & 0xFF), addr >> 48,
1208 fault_reason, reason);
1211 "DMAR:[%s] Request device [%02x:%02x.%d] "
1212 "fault addr %llx \n"
1213 "DMAR:[fault reason %02d] %s\n",
1214 (type ? "DMA Read" : "DMA Write"),
1215 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1216 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1220 #define PRIMARY_FAULT_REG_LEN (16)
1221 irqreturn_t dmar_fault(int irq, void *dev_id)
1223 struct intel_iommu *iommu = dev_id;
1224 int reg, fault_index;
1228 spin_lock_irqsave(&iommu->register_lock, flag);
1229 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1231 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1234 /* TBD: ignore advanced fault log currently */
1235 if (!(fault_status & DMA_FSTS_PPF))
1238 fault_index = dma_fsts_fault_record_index(fault_status);
1239 reg = cap_fault_reg_offset(iommu->cap);
1247 /* highest 32 bits */
1248 data = readl(iommu->reg + reg +
1249 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1250 if (!(data & DMA_FRCD_F))
1253 fault_reason = dma_frcd_fault_reason(data);
1254 type = dma_frcd_type(data);
1256 data = readl(iommu->reg + reg +
1257 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1258 source_id = dma_frcd_source_id(data);
1260 guest_addr = dmar_readq(iommu->reg + reg +
1261 fault_index * PRIMARY_FAULT_REG_LEN);
1262 guest_addr = dma_frcd_page_addr(guest_addr);
1263 /* clear the fault */
1264 writel(DMA_FRCD_F, iommu->reg + reg +
1265 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1267 spin_unlock_irqrestore(&iommu->register_lock, flag);
1269 dmar_fault_do_one(iommu, type, fault_reason,
1270 source_id, guest_addr);
1273 if (fault_index >= cap_num_fault_regs(iommu->cap))
1275 spin_lock_irqsave(&iommu->register_lock, flag);
1278 /* clear all the other faults */
1279 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1280 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1282 spin_unlock_irqrestore(&iommu->register_lock, flag);
1286 int dmar_set_interrupt(struct intel_iommu *iommu)
1291 * Check if the fault interrupt is already initialized.
1298 printk(KERN_ERR "IOMMU: no free vectors\n");
1302 set_irq_data(irq, iommu);
1305 ret = arch_setup_dmar_msi(irq);
1307 set_irq_data(irq, NULL);
1313 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1315 printk(KERN_ERR "IOMMU: can't request irq\n");
1319 int __init enable_drhd_fault_handling(void)
1321 struct dmar_drhd_unit *drhd;
1324 * Enable fault control interrupt.
1326 for_each_drhd_unit(drhd) {
1328 struct intel_iommu *iommu = drhd->iommu;
1329 ret = dmar_set_interrupt(iommu);
1332 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1333 " interrupt, ret %d\n",
1334 (unsigned long long)drhd->reg_base_addr, ret);
1343 * Re-enable Queued Invalidation interface.
1345 int dmar_reenable_qi(struct intel_iommu *iommu)
1347 if (!ecap_qis(iommu->ecap))
1354 * First disable queued invalidation.
1356 dmar_disable_qi(iommu);
1358 * Then enable queued invalidation again. Since there is no pending
1359 * invalidation requests now, it's safe to re-enable queued
1362 __dmar_enable_qi(iommu);
1368 * Check interrupt remapping support in DMAR table description.
1370 int dmar_ir_support(void)
1372 struct acpi_table_dmar *dmar;
1373 dmar = (struct acpi_table_dmar *)dmar_tbl;
1374 return dmar->flags & 0x1;