2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
147 while (start < end) {
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
159 start += scope->length;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
192 dmar_register_drhd_unit(dmaru);
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
211 list_del(&dmaru->list);
218 LIST_HEAD(dmar_rmrr_units);
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222 list_add(&rmrr->list, &dmar_rmrr_units);
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
241 dmar_register_rmrr_unit(rmrru);
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248 struct acpi_dmar_reserved_memory *rmrr;
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
263 static LIST_HEAD(dmar_atsr_units);
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
276 atsru->include_all = atsr->flags & 0x1;
278 list_add(&atsru->list, &dmar_atsr_units);
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
286 struct acpi_dmar_atsr *atsr;
288 if (atsru->include_all)
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
335 if (atsru->include_all)
342 #ifdef CONFIG_ACPI_NUMA
344 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
346 struct acpi_dmar_rhsa *rhsa;
347 struct dmar_drhd_unit *drhd;
349 rhsa = (struct acpi_dmar_rhsa *)header;
350 for_each_drhd_unit(drhd) {
351 if (drhd->reg_base_addr == rhsa->base_address) {
352 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
354 if (!node_online(node))
356 drhd->iommu->node = node;
360 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
363 dmi_get_system_info(DMI_BIOS_VENDOR),
364 dmi_get_system_info(DMI_BIOS_VERSION),
365 dmi_get_system_info(DMI_PRODUCT_VERSION));
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
374 struct acpi_dmar_hardware_unit *drhd;
375 struct acpi_dmar_reserved_memory *rmrr;
376 struct acpi_dmar_atsr *atsr;
377 struct acpi_dmar_rhsa *rhsa;
379 switch (header->type) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381 drhd = container_of(header, struct acpi_dmar_hardware_unit,
383 printk (KERN_INFO PREFIX
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd->address, drhd->flags);
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
390 printk (KERN_INFO PREFIX
391 "RMRR base: %#016Lx end: %#016Lx\n",
392 (unsigned long long)rmrr->base_address,
393 (unsigned long long)rmrr->end_address);
395 case ACPI_DMAR_TYPE_ATSR:
396 atsr = container_of(header, struct acpi_dmar_atsr, header);
397 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
399 case ACPI_DMAR_HARDWARE_AFFINITY:
400 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa->base_address,
403 rhsa->proximity_domain);
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
411 static int __init dmar_table_detect(void)
413 acpi_status status = AE_OK;
415 /* if we could find DMAR table, then there are DMAR devices */
416 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417 (struct acpi_table_header **)&dmar_tbl,
420 if (ACPI_SUCCESS(status) && !dmar_tbl) {
421 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422 status = AE_NOT_FOUND;
425 return (ACPI_SUCCESS(status) ? 1 : 0);
429 * parse_dmar_table - parses the DMA reporting table
432 parse_dmar_table(void)
434 struct acpi_table_dmar *dmar;
435 struct acpi_dmar_header *entry_header;
439 * Do it again, earlier dmar_tbl mapping could be mapped with
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
448 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
450 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (dmar->width < PAGE_SHIFT - 1) {
455 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
459 printk (KERN_INFO PREFIX "Host address width %d\n",
462 entry_header = (struct acpi_dmar_header *)(dmar + 1);
463 while (((unsigned long)entry_header) <
464 (((unsigned long)dmar) + dmar_tbl->length)) {
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header->length == 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
473 dmar_table_print_dmar_entry(entry_header);
475 switch (entry_header->type) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477 ret = dmar_parse_one_drhd(entry_header);
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
481 ret = dmar_parse_one_rmrr(entry_header);
484 case ACPI_DMAR_TYPE_ATSR:
486 ret = dmar_parse_one_atsr(entry_header);
489 case ACPI_DMAR_HARDWARE_AFFINITY:
490 #ifdef CONFIG_ACPI_NUMA
491 ret = dmar_parse_one_rhsa(entry_header);
495 printk(KERN_WARNING PREFIX
496 "Unknown DMAR structure type %d\n",
498 ret = 0; /* for forward compatibility */
504 entry_header = ((void *)entry_header + entry_header->length);
509 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
515 for (index = 0; index < cnt; index++)
516 if (dev == devices[index])
519 /* Check our parent */
520 dev = dev->bus->self;
526 struct dmar_drhd_unit *
527 dmar_find_matched_drhd_unit(struct pci_dev *dev)
529 struct dmar_drhd_unit *dmaru = NULL;
530 struct acpi_dmar_hardware_unit *drhd;
532 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
533 drhd = container_of(dmaru->hdr,
534 struct acpi_dmar_hardware_unit,
537 if (dmaru->include_all &&
538 drhd->segment == pci_domain_nr(dev->bus))
541 if (dmar_pci_device_match(dmaru->devices,
542 dmaru->devices_cnt, dev))
549 int __init dmar_dev_scope_init(void)
551 struct dmar_drhd_unit *drhd, *drhd_n;
554 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
555 ret = dmar_parse_dev(drhd);
562 struct dmar_rmrr_unit *rmrr, *rmrr_n;
563 struct dmar_atsr_unit *atsr, *atsr_n;
565 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
566 ret = rmrr_parse_dev(rmrr);
571 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
572 ret = atsr_parse_dev(atsr);
583 int __init dmar_table_init(void)
585 static int dmar_table_initialized;
588 if (dmar_table_initialized)
591 dmar_table_initialized = 1;
593 ret = parse_dmar_table();
596 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
600 if (list_empty(&dmar_drhd_units)) {
601 printk(KERN_INFO PREFIX "No DMAR devices found\n");
606 if (list_empty(&dmar_rmrr_units))
607 printk(KERN_INFO PREFIX "No RMRR found\n");
609 if (list_empty(&dmar_atsr_units))
610 printk(KERN_INFO PREFIX "No ATSR found\n");
616 int __init check_zero_address(void)
618 struct acpi_table_dmar *dmar;
619 struct acpi_dmar_header *entry_header;
620 struct acpi_dmar_hardware_unit *drhd;
622 dmar = (struct acpi_table_dmar *)dmar_tbl;
623 entry_header = (struct acpi_dmar_header *)(dmar + 1);
625 while (((unsigned long)entry_header) <
626 (((unsigned long)dmar) + dmar_tbl->length)) {
627 /* Avoid looping forever on bad ACPI tables */
628 if (entry_header->length == 0) {
629 printk(KERN_WARNING PREFIX
630 "Invalid 0-length structure\n");
634 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
635 drhd = (void *)entry_header;
636 if (!drhd->address) {
637 /* Promote an attitude of violence to a BIOS engineer today */
638 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
639 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
640 dmi_get_system_info(DMI_BIOS_VENDOR),
641 dmi_get_system_info(DMI_BIOS_VERSION),
642 dmi_get_system_info(DMI_PRODUCT_VERSION));
651 entry_header = ((void *)entry_header + entry_header->length);
656 void __init detect_intel_iommu(void)
660 ret = dmar_table_detect();
662 ret = check_zero_address();
664 #ifdef CONFIG_INTR_REMAP
665 struct acpi_table_dmar *dmar;
667 * for now we will disable dma-remapping when interrupt
668 * remapping is enabled.
669 * When support for queued invalidation for IOTLB invalidation
670 * is added, we will not need this any more.
672 dmar = (struct acpi_table_dmar *) dmar_tbl;
673 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
675 "Queued invalidation will be enabled to support "
676 "x2apic and Intr-remapping.\n");
679 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
684 x86_init.iommu.iommu_init = intel_iommu_init;
687 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
692 int alloc_iommu(struct dmar_drhd_unit *drhd)
694 struct intel_iommu *iommu;
697 static int iommu_allocated = 0;
701 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
705 iommu->seq_id = iommu_allocated++;
706 sprintf (iommu->name, "dmar%d", iommu->seq_id);
708 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
710 printk(KERN_ERR "IOMMU: can't map the region\n");
713 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
714 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
716 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
717 /* Promote an attitude of violence to a BIOS engineer today */
718 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
719 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
721 dmi_get_system_info(DMI_BIOS_VENDOR),
722 dmi_get_system_info(DMI_BIOS_VERSION),
723 dmi_get_system_info(DMI_PRODUCT_VERSION));
728 agaw = iommu_calculate_agaw(iommu);
731 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
735 msagaw = iommu_calculate_max_sagaw(iommu);
738 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
744 iommu->msagaw = msagaw;
748 /* the registers might be more than one page */
749 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
750 cap_max_fault_reg_offset(iommu->cap));
751 map_size = VTD_PAGE_ALIGN(map_size);
752 if (map_size > VTD_PAGE_SIZE) {
754 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
756 printk(KERN_ERR "IOMMU: can't map the region\n");
761 ver = readl(iommu->reg + DMAR_VER_REG);
762 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
763 (unsigned long long)drhd->reg_base_addr,
764 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
765 (unsigned long long)iommu->cap,
766 (unsigned long long)iommu->ecap);
768 spin_lock_init(&iommu->register_lock);
780 void free_iommu(struct intel_iommu *iommu)
786 free_dmar_iommu(iommu);
795 * Reclaim all the submitted descriptors which have completed its work.
797 static inline void reclaim_free_desc(struct q_inval *qi)
799 while (qi->desc_status[qi->free_tail] == QI_DONE ||
800 qi->desc_status[qi->free_tail] == QI_ABORT) {
801 qi->desc_status[qi->free_tail] = QI_FREE;
802 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
807 static int qi_check_fault(struct intel_iommu *iommu, int index)
811 struct q_inval *qi = iommu->qi;
812 int wait_index = (index + 1) % QI_LENGTH;
814 if (qi->desc_status[wait_index] == QI_ABORT)
817 fault = readl(iommu->reg + DMAR_FSTS_REG);
820 * If IQE happens, the head points to the descriptor associated
821 * with the error. No new descriptors are fetched until the IQE
824 if (fault & DMA_FSTS_IQE) {
825 head = readl(iommu->reg + DMAR_IQH_REG);
826 if ((head >> DMAR_IQ_SHIFT) == index) {
827 printk(KERN_ERR "VT-d detected invalid descriptor: "
828 "low=%llx, high=%llx\n",
829 (unsigned long long)qi->desc[index].low,
830 (unsigned long long)qi->desc[index].high);
831 memcpy(&qi->desc[index], &qi->desc[wait_index],
832 sizeof(struct qi_desc));
833 __iommu_flush_cache(iommu, &qi->desc[index],
834 sizeof(struct qi_desc));
835 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
841 * If ITE happens, all pending wait_desc commands are aborted.
842 * No new descriptors are fetched until the ITE is cleared.
844 if (fault & DMA_FSTS_ITE) {
845 head = readl(iommu->reg + DMAR_IQH_REG);
846 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
848 tail = readl(iommu->reg + DMAR_IQT_REG);
849 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
851 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
854 if (qi->desc_status[head] == QI_IN_USE)
855 qi->desc_status[head] = QI_ABORT;
856 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
857 } while (head != tail);
859 if (qi->desc_status[wait_index] == QI_ABORT)
863 if (fault & DMA_FSTS_ICE)
864 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
870 * Submit the queued invalidation descriptor to the remapping
871 * hardware unit and wait for its completion.
873 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
876 struct q_inval *qi = iommu->qi;
877 struct qi_desc *hw, wait_desc;
878 int wait_index, index;
889 spin_lock_irqsave(&qi->q_lock, flags);
890 while (qi->free_cnt < 3) {
891 spin_unlock_irqrestore(&qi->q_lock, flags);
893 spin_lock_irqsave(&qi->q_lock, flags);
896 index = qi->free_head;
897 wait_index = (index + 1) % QI_LENGTH;
899 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
903 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
904 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
905 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
907 hw[wait_index] = wait_desc;
909 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
910 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
912 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
916 * update the HW tail register indicating the presence of
919 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
921 while (qi->desc_status[wait_index] != QI_DONE) {
923 * We will leave the interrupts disabled, to prevent interrupt
924 * context to queue another cmd while a cmd is already submitted
925 * and waiting for completion on this cpu. This is to avoid
926 * a deadlock where the interrupt context can wait indefinitely
927 * for free slots in the queue.
929 rc = qi_check_fault(iommu, index);
933 spin_unlock(&qi->q_lock);
935 spin_lock(&qi->q_lock);
938 qi->desc_status[index] = QI_DONE;
940 reclaim_free_desc(qi);
941 spin_unlock_irqrestore(&qi->q_lock, flags);
950 * Flush the global interrupt entry cache.
952 void qi_global_iec(struct intel_iommu *iommu)
956 desc.low = QI_IEC_TYPE;
959 /* should never fail */
960 qi_submit_sync(&desc, iommu);
963 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
968 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
969 | QI_CC_GRAN(type) | QI_CC_TYPE;
972 qi_submit_sync(&desc, iommu);
975 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
976 unsigned int size_order, u64 type)
983 if (cap_write_drain(iommu->cap))
986 if (cap_read_drain(iommu->cap))
989 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
990 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
991 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
992 | QI_IOTLB_AM(size_order);
994 qi_submit_sync(&desc, iommu);
997 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
998 u64 addr, unsigned mask)
1000 struct qi_desc desc;
1003 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1004 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1005 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1007 desc.high = QI_DEV_IOTLB_ADDR(addr);
1009 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1012 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1015 qi_submit_sync(&desc, iommu);
1019 * Disable Queued Invalidation interface.
1021 void dmar_disable_qi(struct intel_iommu *iommu)
1023 unsigned long flags;
1025 cycles_t start_time = get_cycles();
1027 if (!ecap_qis(iommu->ecap))
1030 spin_lock_irqsave(&iommu->register_lock, flags);
1032 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1033 if (!(sts & DMA_GSTS_QIES))
1037 * Give a chance to HW to complete the pending invalidation requests.
1039 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1040 readl(iommu->reg + DMAR_IQH_REG)) &&
1041 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1044 iommu->gcmd &= ~DMA_GCMD_QIE;
1045 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1047 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1048 !(sts & DMA_GSTS_QIES), sts);
1050 spin_unlock_irqrestore(&iommu->register_lock, flags);
1054 * Enable queued invalidation.
1056 static void __dmar_enable_qi(struct intel_iommu *iommu)
1059 unsigned long flags;
1060 struct q_inval *qi = iommu->qi;
1062 qi->free_head = qi->free_tail = 0;
1063 qi->free_cnt = QI_LENGTH;
1065 spin_lock_irqsave(&iommu->register_lock, flags);
1067 /* write zero to the tail reg */
1068 writel(0, iommu->reg + DMAR_IQT_REG);
1070 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1072 iommu->gcmd |= DMA_GCMD_QIE;
1073 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1075 /* Make sure hardware complete it */
1076 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1078 spin_unlock_irqrestore(&iommu->register_lock, flags);
1082 * Enable Queued Invalidation interface. This is a must to support
1083 * interrupt-remapping. Also used by DMA-remapping, which replaces
1084 * register based IOTLB invalidation.
1086 int dmar_enable_qi(struct intel_iommu *iommu)
1089 struct page *desc_page;
1091 if (!ecap_qis(iommu->ecap))
1095 * queued invalidation is already setup and enabled.
1100 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1107 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1114 qi->desc = page_address(desc_page);
1116 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1117 if (!qi->desc_status) {
1118 free_page((unsigned long) qi->desc);
1124 qi->free_head = qi->free_tail = 0;
1125 qi->free_cnt = QI_LENGTH;
1127 spin_lock_init(&qi->q_lock);
1129 __dmar_enable_qi(iommu);
1134 /* iommu interrupt handling. Most stuff are MSI-like. */
1142 static const char *dma_remap_fault_reasons[] =
1145 "Present bit in root entry is clear",
1146 "Present bit in context entry is clear",
1147 "Invalid context entry",
1148 "Access beyond MGAW",
1149 "PTE Write access is not set",
1150 "PTE Read access is not set",
1151 "Next page table ptr is invalid",
1152 "Root table address invalid",
1153 "Context table ptr is invalid",
1154 "non-zero reserved fields in RTP",
1155 "non-zero reserved fields in CTP",
1156 "non-zero reserved fields in PTE",
1159 static const char *intr_remap_fault_reasons[] =
1161 "Detected reserved fields in the decoded interrupt-remapped request",
1162 "Interrupt index exceeded the interrupt-remapping table size",
1163 "Present field in the IRTE entry is clear",
1164 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1165 "Detected reserved fields in the IRTE entry",
1166 "Blocked a compatibility format interrupt request",
1167 "Blocked an interrupt request due to source-id verification failure",
1170 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1172 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1174 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1175 ARRAY_SIZE(intr_remap_fault_reasons))) {
1176 *fault_type = INTR_REMAP;
1177 return intr_remap_fault_reasons[fault_reason - 0x20];
1178 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1179 *fault_type = DMA_REMAP;
1180 return dma_remap_fault_reasons[fault_reason];
1182 *fault_type = UNKNOWN;
1187 void dmar_msi_unmask(unsigned int irq)
1189 struct intel_iommu *iommu = get_irq_data(irq);
1193 spin_lock_irqsave(&iommu->register_lock, flag);
1194 writel(0, iommu->reg + DMAR_FECTL_REG);
1195 /* Read a reg to force flush the post write */
1196 readl(iommu->reg + DMAR_FECTL_REG);
1197 spin_unlock_irqrestore(&iommu->register_lock, flag);
1200 void dmar_msi_mask(unsigned int irq)
1203 struct intel_iommu *iommu = get_irq_data(irq);
1206 spin_lock_irqsave(&iommu->register_lock, flag);
1207 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1208 /* Read a reg to force flush the post write */
1209 readl(iommu->reg + DMAR_FECTL_REG);
1210 spin_unlock_irqrestore(&iommu->register_lock, flag);
1213 void dmar_msi_write(int irq, struct msi_msg *msg)
1215 struct intel_iommu *iommu = get_irq_data(irq);
1218 spin_lock_irqsave(&iommu->register_lock, flag);
1219 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1220 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1221 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1222 spin_unlock_irqrestore(&iommu->register_lock, flag);
1225 void dmar_msi_read(int irq, struct msi_msg *msg)
1227 struct intel_iommu *iommu = get_irq_data(irq);
1230 spin_lock_irqsave(&iommu->register_lock, flag);
1231 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1232 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1233 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1234 spin_unlock_irqrestore(&iommu->register_lock, flag);
1237 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1238 u8 fault_reason, u16 source_id, unsigned long long addr)
1243 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1245 if (fault_type == INTR_REMAP)
1246 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1247 "fault index %llx\n"
1248 "INTR-REMAP:[fault reason %02d] %s\n",
1249 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1250 PCI_FUNC(source_id & 0xFF), addr >> 48,
1251 fault_reason, reason);
1254 "DMAR:[%s] Request device [%02x:%02x.%d] "
1255 "fault addr %llx \n"
1256 "DMAR:[fault reason %02d] %s\n",
1257 (type ? "DMA Read" : "DMA Write"),
1258 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1259 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1263 #define PRIMARY_FAULT_REG_LEN (16)
1264 irqreturn_t dmar_fault(int irq, void *dev_id)
1266 struct intel_iommu *iommu = dev_id;
1267 int reg, fault_index;
1271 spin_lock_irqsave(&iommu->register_lock, flag);
1272 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1274 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1277 /* TBD: ignore advanced fault log currently */
1278 if (!(fault_status & DMA_FSTS_PPF))
1281 fault_index = dma_fsts_fault_record_index(fault_status);
1282 reg = cap_fault_reg_offset(iommu->cap);
1290 /* highest 32 bits */
1291 data = readl(iommu->reg + reg +
1292 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1293 if (!(data & DMA_FRCD_F))
1296 fault_reason = dma_frcd_fault_reason(data);
1297 type = dma_frcd_type(data);
1299 data = readl(iommu->reg + reg +
1300 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1301 source_id = dma_frcd_source_id(data);
1303 guest_addr = dmar_readq(iommu->reg + reg +
1304 fault_index * PRIMARY_FAULT_REG_LEN);
1305 guest_addr = dma_frcd_page_addr(guest_addr);
1306 /* clear the fault */
1307 writel(DMA_FRCD_F, iommu->reg + reg +
1308 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1310 spin_unlock_irqrestore(&iommu->register_lock, flag);
1312 dmar_fault_do_one(iommu, type, fault_reason,
1313 source_id, guest_addr);
1316 if (fault_index >= cap_num_fault_regs(iommu->cap))
1318 spin_lock_irqsave(&iommu->register_lock, flag);
1321 /* clear all the other faults */
1322 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1323 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1325 spin_unlock_irqrestore(&iommu->register_lock, flag);
1329 int dmar_set_interrupt(struct intel_iommu *iommu)
1334 * Check if the fault interrupt is already initialized.
1341 printk(KERN_ERR "IOMMU: no free vectors\n");
1345 set_irq_data(irq, iommu);
1348 ret = arch_setup_dmar_msi(irq);
1350 set_irq_data(irq, NULL);
1356 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1358 printk(KERN_ERR "IOMMU: can't request irq\n");
1362 int __init enable_drhd_fault_handling(void)
1364 struct dmar_drhd_unit *drhd;
1367 * Enable fault control interrupt.
1369 for_each_drhd_unit(drhd) {
1371 struct intel_iommu *iommu = drhd->iommu;
1372 ret = dmar_set_interrupt(iommu);
1375 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1376 " interrupt, ret %d\n",
1377 (unsigned long long)drhd->reg_base_addr, ret);
1386 * Re-enable Queued Invalidation interface.
1388 int dmar_reenable_qi(struct intel_iommu *iommu)
1390 if (!ecap_qis(iommu->ecap))
1397 * First disable queued invalidation.
1399 dmar_disable_qi(iommu);
1401 * Then enable queued invalidation again. Since there is no pending
1402 * invalidation requests now, it's safe to re-enable queued
1405 __dmar_enable_qi(iommu);
1411 * Check interrupt remapping support in DMAR table description.
1413 int dmar_ir_support(void)
1415 struct acpi_table_dmar *dmar;
1416 dmar = (struct acpi_table_dmar *)dmar_tbl;
1417 return dmar->flags & 0x1;