2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #include "irq_remapping.h"
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
78 /* page table handling */
79 #define LEVEL_STRIDE (9)
80 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
98 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100 static inline int agaw_to_level(int agaw)
105 static inline int agaw_to_width(int agaw)
107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
110 static inline int width_to_agaw(int width)
112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
115 static inline unsigned int level_to_offset_bits(int level)
117 return (level - 1) * LEVEL_STRIDE;
120 static inline int pfn_level_offset(unsigned long pfn, int level)
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125 static inline unsigned long level_mask(int level)
127 return -1UL << level_to_offset_bits(level);
130 static inline unsigned long level_size(int level)
132 return 1UL << level_to_offset_bits(level);
135 static inline unsigned long align_to_level(unsigned long pfn, int level)
137 return (pfn + level_size(level) - 1) & level_mask(level);
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156 static inline unsigned long page_to_dma_pfn(struct page *pg)
158 return mm_to_dma_pfn(page_to_pfn(pg));
160 static inline unsigned long virt_to_dma_pfn(void *p)
162 return page_to_dma_pfn(virt_to_page(p));
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu **g_iommus;
168 static void __init check_tylersburg_isoch(void);
169 static int rwbf_quirk;
172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
175 static int force_on = 0;
180 * 12-63: Context Ptr (12 - (haw-1))
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry *root)
190 return (root->val & 1);
192 static inline void set_root_present(struct root_entry *root)
196 static inline void set_root_value(struct root_entry *root, unsigned long value)
198 root->val |= value & VTD_PAGE_MASK;
201 static inline struct context_entry *
202 get_context_addr_from_root(struct root_entry *root)
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
221 struct context_entry {
226 static inline bool context_present(struct context_entry *context)
228 return (context->lo & 1);
230 static inline void context_set_present(struct context_entry *context)
235 static inline void context_set_fault_enable(struct context_entry *context)
237 context->lo &= (((u64)-1) << 2) | 1;
240 static inline void context_set_translation_type(struct context_entry *context,
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
247 static inline void context_set_address_root(struct context_entry *context,
250 context->lo |= value & VTD_PAGE_MASK;
253 static inline void context_set_address_width(struct context_entry *context,
256 context->hi |= value & 7;
259 static inline void context_set_domain_id(struct context_entry *context,
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
265 static inline void context_clear_entry(struct context_entry *context)
278 * 12-63: Host physcial address
284 static inline void dma_clear_pte(struct dma_pte *pte)
289 static inline u64 dma_pte_addr(struct dma_pte *pte)
292 return pte->val & VTD_PAGE_MASK;
294 /* Must have a full atomic 64-bit read */
295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
299 static inline bool dma_pte_present(struct dma_pte *pte)
301 return (pte->val & 3) != 0;
304 static inline bool dma_pte_superpage(struct dma_pte *pte)
306 return (pte->val & DMA_PTE_LARGE_PAGE);
309 static inline int first_pte_in_page(struct dma_pte *pte)
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
320 static struct dmar_domain *si_domain;
321 static int hw_pass_through = 1;
323 /* domain represents a virtual machine, more than one devices
324 * across iommus may be owned in one domain, e.g. kvm guest.
326 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
328 /* si_domain contains mulitple devices */
329 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
331 /* define the limit of IOMMUs supported in each domain */
333 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
335 # define IOMMU_UNITS_SUPPORTED 64
339 int id; /* domain id */
340 int nid; /* node id */
341 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
342 /* bitmap of iommus this domain uses*/
344 struct list_head devices; /* all devices' list */
345 struct iova_domain iovad; /* iova's that belong to this domain */
347 struct dma_pte *pgd; /* virtual address */
348 int gaw; /* max guest address width */
350 /* adjusted guest address width, 0 is level 2 30-bit */
353 int flags; /* flags to find out type of domain */
355 int iommu_coherency;/* indicate coherency of iommu access */
356 int iommu_snooping; /* indicate snooping control feature*/
357 int iommu_count; /* reference count of iommu */
358 int iommu_superpage;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
361 spinlock_t iommu_lock; /* protect iommu set in domain */
362 u64 max_addr; /* maximum mapped address */
365 /* PCI domain-device relationship */
366 struct device_domain_info {
367 struct list_head link; /* link to domain siblings */
368 struct list_head global; /* link to global list */
369 u8 bus; /* PCI bus number */
370 u8 devfn; /* PCI devfn number */
371 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
372 struct intel_iommu *iommu; /* IOMMU used by this device */
373 struct dmar_domain *domain; /* pointer to domain */
376 struct dmar_rmrr_unit {
377 struct list_head list; /* list of rmrr units */
378 struct acpi_dmar_header *hdr; /* ACPI header */
379 u64 base_address; /* reserved base address*/
380 u64 end_address; /* reserved end address */
381 struct dmar_dev_scope *devices; /* target devices */
382 int devices_cnt; /* target device count */
385 struct dmar_atsr_unit {
386 struct list_head list; /* list of ATSR units */
387 struct acpi_dmar_header *hdr; /* ACPI header */
388 struct dmar_dev_scope *devices; /* target devices */
389 int devices_cnt; /* target device count */
390 u8 include_all:1; /* include all ports */
393 static LIST_HEAD(dmar_atsr_units);
394 static LIST_HEAD(dmar_rmrr_units);
396 #define for_each_rmrr_units(rmrr) \
397 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
399 static void flush_unmaps_timeout(unsigned long data);
401 static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
403 #define HIGH_WATER_MARK 250
404 struct deferred_flush_tables {
406 struct iova *iova[HIGH_WATER_MARK];
407 struct dmar_domain *domain[HIGH_WATER_MARK];
408 struct page *freelist[HIGH_WATER_MARK];
411 static struct deferred_flush_tables *deferred_flush;
413 /* bitmap for indexing intel_iommus */
414 static int g_num_of_iommus;
416 static DEFINE_SPINLOCK(async_umap_flush_lock);
417 static LIST_HEAD(unmaps_to_do);
420 static long list_size;
422 static void domain_exit(struct dmar_domain *domain);
423 static void domain_remove_dev_info(struct dmar_domain *domain);
424 static void domain_remove_one_dev_info(struct dmar_domain *domain,
426 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
428 static int domain_detach_iommu(struct dmar_domain *domain,
429 struct intel_iommu *iommu);
431 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
432 int dmar_disabled = 0;
434 int dmar_disabled = 1;
435 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
437 int intel_iommu_enabled = 0;
438 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440 static int dmar_map_gfx = 1;
441 static int dmar_forcedac;
442 static int intel_iommu_strict;
443 static int intel_iommu_superpage = 1;
445 int intel_iommu_gfx_mapped;
446 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449 static DEFINE_SPINLOCK(device_domain_lock);
450 static LIST_HEAD(device_domain_list);
452 static const struct iommu_ops intel_iommu_ops;
454 static int __init intel_iommu_setup(char *str)
459 if (!strncmp(str, "on", 2)) {
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
465 } else if (!strncmp(str, "igfx_off", 8)) {
468 "Intel-IOMMU: disable GFX device mapping\n");
469 } else if (!strncmp(str, "forcedac", 8)) {
471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
473 } else if (!strncmp(str, "strict", 6)) {
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
477 } else if (!strncmp(str, "sp_off", 6)) {
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
483 str += strcspn(str, ",");
489 __setup("intel_iommu=", intel_iommu_setup);
491 static struct kmem_cache *iommu_domain_cache;
492 static struct kmem_cache *iommu_devinfo_cache;
493 static struct kmem_cache *iommu_iova_cache;
495 static inline void *alloc_pgtable_page(int node)
500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
502 vaddr = page_address(page);
506 static inline void free_pgtable_page(void *vaddr)
508 free_page((unsigned long)vaddr);
511 static inline void *alloc_domain_mem(void)
513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
516 static void free_domain_mem(void *vaddr)
518 kmem_cache_free(iommu_domain_cache, vaddr);
521 static inline void * alloc_devinfo_mem(void)
523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
526 static inline void free_devinfo_mem(void *vaddr)
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
531 struct iova *alloc_iova_mem(void)
533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
536 void free_iova_mem(struct iova *iova)
538 kmem_cache_free(iommu_iova_cache, iova);
541 static inline int domain_type_is_vm(struct dmar_domain *domain)
543 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
546 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
548 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
549 DOMAIN_FLAG_STATIC_IDENTITY);
552 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
557 sagaw = cap_sagaw(iommu->cap);
558 for (agaw = width_to_agaw(max_gaw);
560 if (test_bit(agaw, &sagaw))
568 * Calculate max SAGAW for each iommu.
570 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
572 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
576 * calculate agaw for each iommu.
577 * "SAGAW" may be different across iommus, use a default agaw, and
578 * get a supported less agaw for iommus that don't support the default agaw.
580 int iommu_calculate_agaw(struct intel_iommu *iommu)
582 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
585 /* This functionin only returns single iommu in a domain */
586 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
590 /* si_domain and vm domain should not get here. */
591 BUG_ON(domain_type_is_vm_or_si(domain));
592 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
593 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
596 return g_iommus[iommu_id];
599 static void domain_update_iommu_coherency(struct dmar_domain *domain)
601 struct dmar_drhd_unit *drhd;
602 struct intel_iommu *iommu;
605 domain->iommu_coherency = 1;
607 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
609 if (!ecap_coherent(g_iommus[i]->ecap)) {
610 domain->iommu_coherency = 0;
617 /* No hardware attached; use lowest common denominator */
619 for_each_active_iommu(iommu, drhd) {
620 if (!ecap_coherent(iommu->ecap)) {
621 domain->iommu_coherency = 0;
628 static void domain_update_iommu_snooping(struct dmar_domain *domain)
632 domain->iommu_snooping = 1;
634 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
635 if (!ecap_sc_support(g_iommus[i]->ecap)) {
636 domain->iommu_snooping = 0;
642 static void domain_update_iommu_superpage(struct dmar_domain *domain)
644 struct dmar_drhd_unit *drhd;
645 struct intel_iommu *iommu = NULL;
648 if (!intel_iommu_superpage) {
649 domain->iommu_superpage = 0;
653 /* set iommu_superpage to the smallest common denominator */
655 for_each_active_iommu(iommu, drhd) {
656 mask &= cap_super_page_val(iommu->cap);
663 domain->iommu_superpage = fls(mask);
666 /* Some capabilities may be different across iommus */
667 static void domain_update_iommu_cap(struct dmar_domain *domain)
669 domain_update_iommu_coherency(domain);
670 domain_update_iommu_snooping(domain);
671 domain_update_iommu_superpage(domain);
674 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
676 struct dmar_drhd_unit *drhd = NULL;
677 struct intel_iommu *iommu;
679 struct pci_dev *ptmp, *pdev = NULL;
683 if (dev_is_pci(dev)) {
684 pdev = to_pci_dev(dev);
685 segment = pci_domain_nr(pdev->bus);
686 } else if (ACPI_COMPANION(dev))
687 dev = &ACPI_COMPANION(dev)->dev;
690 for_each_active_iommu(iommu, drhd) {
691 if (pdev && segment != drhd->segment)
694 for_each_active_dev_scope(drhd->devices,
695 drhd->devices_cnt, i, tmp) {
697 *bus = drhd->devices[i].bus;
698 *devfn = drhd->devices[i].devfn;
702 if (!pdev || !dev_is_pci(tmp))
705 ptmp = to_pci_dev(tmp);
706 if (ptmp->subordinate &&
707 ptmp->subordinate->number <= pdev->bus->number &&
708 ptmp->subordinate->busn_res.end >= pdev->bus->number)
712 if (pdev && drhd->include_all) {
714 *bus = pdev->bus->number;
715 *devfn = pdev->devfn;
726 static void domain_flush_cache(struct dmar_domain *domain,
727 void *addr, int size)
729 if (!domain->iommu_coherency)
730 clflush_cache_range(addr, size);
733 /* Gets context entry for a given bus and devfn */
734 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
737 struct root_entry *root;
738 struct context_entry *context;
739 unsigned long phy_addr;
742 spin_lock_irqsave(&iommu->lock, flags);
743 root = &iommu->root_entry[bus];
744 context = get_context_addr_from_root(root);
746 context = (struct context_entry *)
747 alloc_pgtable_page(iommu->node);
749 spin_unlock_irqrestore(&iommu->lock, flags);
752 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
753 phy_addr = virt_to_phys((void *)context);
754 set_root_value(root, phy_addr);
755 set_root_present(root);
756 __iommu_flush_cache(iommu, root, sizeof(*root));
758 spin_unlock_irqrestore(&iommu->lock, flags);
759 return &context[devfn];
762 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
764 struct root_entry *root;
765 struct context_entry *context;
769 spin_lock_irqsave(&iommu->lock, flags);
770 root = &iommu->root_entry[bus];
771 context = get_context_addr_from_root(root);
776 ret = context_present(&context[devfn]);
778 spin_unlock_irqrestore(&iommu->lock, flags);
782 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
784 struct root_entry *root;
785 struct context_entry *context;
788 spin_lock_irqsave(&iommu->lock, flags);
789 root = &iommu->root_entry[bus];
790 context = get_context_addr_from_root(root);
792 context_clear_entry(&context[devfn]);
793 __iommu_flush_cache(iommu, &context[devfn], \
796 spin_unlock_irqrestore(&iommu->lock, flags);
799 static void free_context_table(struct intel_iommu *iommu)
801 struct root_entry *root;
804 struct context_entry *context;
806 spin_lock_irqsave(&iommu->lock, flags);
807 if (!iommu->root_entry) {
810 for (i = 0; i < ROOT_ENTRY_NR; i++) {
811 root = &iommu->root_entry[i];
812 context = get_context_addr_from_root(root);
814 free_pgtable_page(context);
816 free_pgtable_page(iommu->root_entry);
817 iommu->root_entry = NULL;
819 spin_unlock_irqrestore(&iommu->lock, flags);
822 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
823 unsigned long pfn, int *target_level)
825 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
826 struct dma_pte *parent, *pte = NULL;
827 int level = agaw_to_level(domain->agaw);
830 BUG_ON(!domain->pgd);
832 if (addr_width < BITS_PER_LONG && pfn >> addr_width)
833 /* Address beyond IOMMU's addressing capabilities. */
836 parent = domain->pgd;
841 offset = pfn_level_offset(pfn, level);
842 pte = &parent[offset];
843 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
845 if (level == *target_level)
848 if (!dma_pte_present(pte)) {
851 tmp_page = alloc_pgtable_page(domain->nid);
856 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
857 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
858 if (cmpxchg64(&pte->val, 0ULL, pteval))
859 /* Someone else set it while we were thinking; use theirs. */
860 free_pgtable_page(tmp_page);
862 domain_flush_cache(domain, pte, sizeof(*pte));
867 parent = phys_to_virt(dma_pte_addr(pte));
872 *target_level = level;
878 /* return address's pte at specific level */
879 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
881 int level, int *large_page)
883 struct dma_pte *parent, *pte = NULL;
884 int total = agaw_to_level(domain->agaw);
887 parent = domain->pgd;
888 while (level <= total) {
889 offset = pfn_level_offset(pfn, total);
890 pte = &parent[offset];
894 if (!dma_pte_present(pte)) {
899 if (dma_pte_superpage(pte)) {
904 parent = phys_to_virt(dma_pte_addr(pte));
910 /* clear last level pte, a tlb flush should be followed */
911 static void dma_pte_clear_range(struct dmar_domain *domain,
912 unsigned long start_pfn,
913 unsigned long last_pfn)
915 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
916 unsigned int large_page = 1;
917 struct dma_pte *first_pte, *pte;
919 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
920 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
921 BUG_ON(start_pfn > last_pfn);
923 /* we don't need lock here; nobody else touches the iova range */
926 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
928 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
933 start_pfn += lvl_to_nr_pages(large_page);
935 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
937 domain_flush_cache(domain, first_pte,
938 (void *)pte - (void *)first_pte);
940 } while (start_pfn && start_pfn <= last_pfn);
943 static void dma_pte_free_level(struct dmar_domain *domain, int level,
944 struct dma_pte *pte, unsigned long pfn,
945 unsigned long start_pfn, unsigned long last_pfn)
947 pfn = max(start_pfn, pfn);
948 pte = &pte[pfn_level_offset(pfn, level)];
951 unsigned long level_pfn;
952 struct dma_pte *level_pte;
954 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
957 level_pfn = pfn & level_mask(level - 1);
958 level_pte = phys_to_virt(dma_pte_addr(pte));
961 dma_pte_free_level(domain, level - 1, level_pte,
962 level_pfn, start_pfn, last_pfn);
964 /* If range covers entire pagetable, free it */
965 if (!(start_pfn > level_pfn ||
966 last_pfn < level_pfn + level_size(level) - 1)) {
968 domain_flush_cache(domain, pte, sizeof(*pte));
969 free_pgtable_page(level_pte);
972 pfn += level_size(level);
973 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
976 /* free page table pages. last level pte should already be cleared */
977 static void dma_pte_free_pagetable(struct dmar_domain *domain,
978 unsigned long start_pfn,
979 unsigned long last_pfn)
981 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
983 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
984 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
985 BUG_ON(start_pfn > last_pfn);
987 /* We don't need lock here; nobody else touches the iova range */
988 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
989 domain->pgd, 0, start_pfn, last_pfn);
992 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
993 free_pgtable_page(domain->pgd);
998 /* When a page at a given level is being unlinked from its parent, we don't
999 need to *modify* it at all. All we need to do is make a list of all the
1000 pages which can be freed just as soon as we've flushed the IOTLB and we
1001 know the hardware page-walk will no longer touch them.
1002 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1004 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1005 int level, struct dma_pte *pte,
1006 struct page *freelist)
1010 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1011 pg->freelist = freelist;
1017 pte = page_address(pg);
1019 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1020 freelist = dma_pte_list_pagetables(domain, level - 1,
1023 } while (!first_pte_in_page(pte));
1028 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1029 struct dma_pte *pte, unsigned long pfn,
1030 unsigned long start_pfn,
1031 unsigned long last_pfn,
1032 struct page *freelist)
1034 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1036 pfn = max(start_pfn, pfn);
1037 pte = &pte[pfn_level_offset(pfn, level)];
1040 unsigned long level_pfn;
1042 if (!dma_pte_present(pte))
1045 level_pfn = pfn & level_mask(level);
1047 /* If range covers entire pagetable, free it */
1048 if (start_pfn <= level_pfn &&
1049 last_pfn >= level_pfn + level_size(level) - 1) {
1050 /* These suborbinate page tables are going away entirely. Don't
1051 bother to clear them; we're just going to *free* them. */
1052 if (level > 1 && !dma_pte_superpage(pte))
1053 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1059 } else if (level > 1) {
1060 /* Recurse down into a level that isn't *entirely* obsolete */
1061 freelist = dma_pte_clear_level(domain, level - 1,
1062 phys_to_virt(dma_pte_addr(pte)),
1063 level_pfn, start_pfn, last_pfn,
1067 pfn += level_size(level);
1068 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1071 domain_flush_cache(domain, first_pte,
1072 (void *)++last_pte - (void *)first_pte);
1077 /* We can't just free the pages because the IOMMU may still be walking
1078 the page tables, and may have cached the intermediate levels. The
1079 pages can only be freed after the IOTLB flush has been done. */
1080 struct page *domain_unmap(struct dmar_domain *domain,
1081 unsigned long start_pfn,
1082 unsigned long last_pfn)
1084 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1085 struct page *freelist = NULL;
1087 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1088 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1089 BUG_ON(start_pfn > last_pfn);
1091 /* we don't need lock here; nobody else touches the iova range */
1092 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1093 domain->pgd, 0, start_pfn, last_pfn, NULL);
1096 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1097 struct page *pgd_page = virt_to_page(domain->pgd);
1098 pgd_page->freelist = freelist;
1099 freelist = pgd_page;
1107 void dma_free_pagelist(struct page *freelist)
1111 while ((pg = freelist)) {
1112 freelist = pg->freelist;
1113 free_pgtable_page(page_address(pg));
1117 /* iommu handling */
1118 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1120 struct root_entry *root;
1121 unsigned long flags;
1123 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1127 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1129 spin_lock_irqsave(&iommu->lock, flags);
1130 iommu->root_entry = root;
1131 spin_unlock_irqrestore(&iommu->lock, flags);
1136 static void iommu_set_root_entry(struct intel_iommu *iommu)
1142 addr = iommu->root_entry;
1144 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1145 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1147 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1149 /* Make sure hardware complete it */
1150 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1151 readl, (sts & DMA_GSTS_RTPS), sts);
1153 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1156 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1161 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1164 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1165 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1167 /* Make sure hardware complete it */
1168 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1169 readl, (!(val & DMA_GSTS_WBFS)), val);
1171 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1174 /* return value determine if we need a write buffer flush */
1175 static void __iommu_flush_context(struct intel_iommu *iommu,
1176 u16 did, u16 source_id, u8 function_mask,
1183 case DMA_CCMD_GLOBAL_INVL:
1184 val = DMA_CCMD_GLOBAL_INVL;
1186 case DMA_CCMD_DOMAIN_INVL:
1187 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1189 case DMA_CCMD_DEVICE_INVL:
1190 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1191 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1196 val |= DMA_CCMD_ICC;
1198 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1199 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1201 /* Make sure hardware complete it */
1202 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1203 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1205 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1208 /* return value determine if we need a write buffer flush */
1209 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1210 u64 addr, unsigned int size_order, u64 type)
1212 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1213 u64 val = 0, val_iva = 0;
1217 case DMA_TLB_GLOBAL_FLUSH:
1218 /* global flush doesn't need set IVA_REG */
1219 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1221 case DMA_TLB_DSI_FLUSH:
1222 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1224 case DMA_TLB_PSI_FLUSH:
1225 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1226 /* IH bit is passed in as part of address */
1227 val_iva = size_order | addr;
1232 /* Note: set drain read/write */
1235 * This is probably to be super secure.. Looks like we can
1236 * ignore it without any impact.
1238 if (cap_read_drain(iommu->cap))
1239 val |= DMA_TLB_READ_DRAIN;
1241 if (cap_write_drain(iommu->cap))
1242 val |= DMA_TLB_WRITE_DRAIN;
1244 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1245 /* Note: Only uses first TLB reg currently */
1247 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1248 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1250 /* Make sure hardware complete it */
1251 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1252 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1254 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1256 /* check IOTLB invalidation granularity */
1257 if (DMA_TLB_IAIG(val) == 0)
1258 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1259 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1260 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1261 (unsigned long long)DMA_TLB_IIRG(type),
1262 (unsigned long long)DMA_TLB_IAIG(val));
1265 static struct device_domain_info *
1266 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1270 unsigned long flags;
1271 struct device_domain_info *info;
1272 struct pci_dev *pdev;
1274 if (!ecap_dev_iotlb_support(iommu->ecap))
1280 spin_lock_irqsave(&device_domain_lock, flags);
1281 list_for_each_entry(info, &domain->devices, link)
1282 if (info->iommu == iommu && info->bus == bus &&
1283 info->devfn == devfn) {
1287 spin_unlock_irqrestore(&device_domain_lock, flags);
1289 if (!found || !info->dev || !dev_is_pci(info->dev))
1292 pdev = to_pci_dev(info->dev);
1294 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1297 if (!dmar_find_matched_atsr_unit(pdev))
1303 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1305 if (!info || !dev_is_pci(info->dev))
1308 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1311 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1313 if (!info->dev || !dev_is_pci(info->dev) ||
1314 !pci_ats_enabled(to_pci_dev(info->dev)))
1317 pci_disable_ats(to_pci_dev(info->dev));
1320 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1321 u64 addr, unsigned mask)
1324 unsigned long flags;
1325 struct device_domain_info *info;
1327 spin_lock_irqsave(&device_domain_lock, flags);
1328 list_for_each_entry(info, &domain->devices, link) {
1329 struct pci_dev *pdev;
1330 if (!info->dev || !dev_is_pci(info->dev))
1333 pdev = to_pci_dev(info->dev);
1334 if (!pci_ats_enabled(pdev))
1337 sid = info->bus << 8 | info->devfn;
1338 qdep = pci_ats_queue_depth(pdev);
1339 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1341 spin_unlock_irqrestore(&device_domain_lock, flags);
1344 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1345 unsigned long pfn, unsigned int pages, int ih, int map)
1347 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1348 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1355 * Fallback to domain selective flush if no PSI support or the size is
1357 * PSI requires page size to be 2 ^ x, and the base address is naturally
1358 * aligned to the size
1360 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1361 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1364 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1368 * In caching mode, changes of pages from non-present to present require
1369 * flush. However, device IOTLB doesn't need to be flushed in this case.
1371 if (!cap_caching_mode(iommu->cap) || !map)
1372 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1375 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1378 unsigned long flags;
1380 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1381 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1382 pmen &= ~DMA_PMEN_EPM;
1383 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1385 /* wait for the protected region status bit to clear */
1386 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1387 readl, !(pmen & DMA_PMEN_PRS), pmen);
1389 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1392 static int iommu_enable_translation(struct intel_iommu *iommu)
1395 unsigned long flags;
1397 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1398 iommu->gcmd |= DMA_GCMD_TE;
1399 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1401 /* Make sure hardware complete it */
1402 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1403 readl, (sts & DMA_GSTS_TES), sts);
1405 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1409 static int iommu_disable_translation(struct intel_iommu *iommu)
1414 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1415 iommu->gcmd &= ~DMA_GCMD_TE;
1416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1418 /* Make sure hardware complete it */
1419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1420 readl, (!(sts & DMA_GSTS_TES)), sts);
1422 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1427 static int iommu_init_domains(struct intel_iommu *iommu)
1429 unsigned long ndomains;
1430 unsigned long nlongs;
1432 ndomains = cap_ndoms(iommu->cap);
1433 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1434 iommu->seq_id, ndomains);
1435 nlongs = BITS_TO_LONGS(ndomains);
1437 spin_lock_init(&iommu->lock);
1439 /* TBD: there might be 64K domains,
1440 * consider other allocation for future chip
1442 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1443 if (!iommu->domain_ids) {
1444 pr_err("IOMMU%d: allocating domain id array failed\n",
1448 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1450 if (!iommu->domains) {
1451 pr_err("IOMMU%d: allocating domain array failed\n",
1453 kfree(iommu->domain_ids);
1454 iommu->domain_ids = NULL;
1459 * if Caching mode is set, then invalid translations are tagged
1460 * with domainid 0. Hence we need to pre-allocate it.
1462 if (cap_caching_mode(iommu->cap))
1463 set_bit(0, iommu->domain_ids);
1467 static void free_dmar_iommu(struct intel_iommu *iommu)
1469 struct dmar_domain *domain;
1472 if ((iommu->domains) && (iommu->domain_ids)) {
1473 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1475 * Domain id 0 is reserved for invalid translation
1476 * if hardware supports caching mode.
1478 if (cap_caching_mode(iommu->cap) && i == 0)
1481 domain = iommu->domains[i];
1482 clear_bit(i, iommu->domain_ids);
1483 if (domain_detach_iommu(domain, iommu) == 0)
1484 domain_exit(domain);
1488 if (iommu->gcmd & DMA_GCMD_TE)
1489 iommu_disable_translation(iommu);
1491 kfree(iommu->domains);
1492 kfree(iommu->domain_ids);
1493 iommu->domains = NULL;
1494 iommu->domain_ids = NULL;
1496 g_iommus[iommu->seq_id] = NULL;
1498 /* free context mapping */
1499 free_context_table(iommu);
1502 static struct dmar_domain *alloc_domain(int flags)
1504 /* domain id for virtual machine, it won't be set in context */
1505 static atomic_t vm_domid = ATOMIC_INIT(0);
1506 struct dmar_domain *domain;
1508 domain = alloc_domain_mem();
1512 memset(domain, 0, sizeof(*domain));
1514 domain->flags = flags;
1515 spin_lock_init(&domain->iommu_lock);
1516 INIT_LIST_HEAD(&domain->devices);
1517 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1518 domain->id = atomic_inc_return(&vm_domid);
1523 static int __iommu_attach_domain(struct dmar_domain *domain,
1524 struct intel_iommu *iommu)
1527 unsigned long ndomains;
1529 ndomains = cap_ndoms(iommu->cap);
1530 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1531 if (num < ndomains) {
1532 set_bit(num, iommu->domain_ids);
1533 iommu->domains[num] = domain;
1541 static int iommu_attach_domain(struct dmar_domain *domain,
1542 struct intel_iommu *iommu)
1545 unsigned long flags;
1547 spin_lock_irqsave(&iommu->lock, flags);
1548 num = __iommu_attach_domain(domain, iommu);
1549 spin_unlock_irqrestore(&iommu->lock, flags);
1551 pr_err("IOMMU: no free domain ids\n");
1556 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1557 struct intel_iommu *iommu)
1560 unsigned long ndomains;
1562 ndomains = cap_ndoms(iommu->cap);
1563 for_each_set_bit(num, iommu->domain_ids, ndomains)
1564 if (iommu->domains[num] == domain)
1567 return __iommu_attach_domain(domain, iommu);
1570 static void iommu_detach_domain(struct dmar_domain *domain,
1571 struct intel_iommu *iommu)
1573 unsigned long flags;
1576 spin_lock_irqsave(&iommu->lock, flags);
1577 if (domain_type_is_vm_or_si(domain)) {
1578 ndomains = cap_ndoms(iommu->cap);
1579 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1580 if (iommu->domains[num] == domain) {
1581 clear_bit(num, iommu->domain_ids);
1582 iommu->domains[num] = NULL;
1587 clear_bit(domain->id, iommu->domain_ids);
1588 iommu->domains[domain->id] = NULL;
1590 spin_unlock_irqrestore(&iommu->lock, flags);
1593 static void domain_attach_iommu(struct dmar_domain *domain,
1594 struct intel_iommu *iommu)
1596 unsigned long flags;
1598 spin_lock_irqsave(&domain->iommu_lock, flags);
1599 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1600 domain->iommu_count++;
1601 if (domain->iommu_count == 1)
1602 domain->nid = iommu->node;
1603 domain_update_iommu_cap(domain);
1605 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1608 static int domain_detach_iommu(struct dmar_domain *domain,
1609 struct intel_iommu *iommu)
1611 unsigned long flags;
1612 int count = INT_MAX;
1614 spin_lock_irqsave(&domain->iommu_lock, flags);
1615 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1616 count = --domain->iommu_count;
1617 domain_update_iommu_cap(domain);
1619 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1624 static struct iova_domain reserved_iova_list;
1625 static struct lock_class_key reserved_rbtree_key;
1627 static int dmar_init_reserved_ranges(void)
1629 struct pci_dev *pdev = NULL;
1633 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1635 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1636 &reserved_rbtree_key);
1638 /* IOAPIC ranges shouldn't be accessed by DMA */
1639 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1640 IOVA_PFN(IOAPIC_RANGE_END));
1642 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1646 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1647 for_each_pci_dev(pdev) {
1650 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1651 r = &pdev->resource[i];
1652 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1654 iova = reserve_iova(&reserved_iova_list,
1658 printk(KERN_ERR "Reserve iova failed\n");
1666 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1668 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1671 static inline int guestwidth_to_adjustwidth(int gaw)
1674 int r = (gaw - 12) % 9;
1685 static int domain_init(struct dmar_domain *domain, int guest_width)
1687 struct intel_iommu *iommu;
1688 int adjust_width, agaw;
1689 unsigned long sagaw;
1691 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1692 domain_reserve_special_ranges(domain);
1694 /* calculate AGAW */
1695 iommu = domain_get_iommu(domain);
1696 if (guest_width > cap_mgaw(iommu->cap))
1697 guest_width = cap_mgaw(iommu->cap);
1698 domain->gaw = guest_width;
1699 adjust_width = guestwidth_to_adjustwidth(guest_width);
1700 agaw = width_to_agaw(adjust_width);
1701 sagaw = cap_sagaw(iommu->cap);
1702 if (!test_bit(agaw, &sagaw)) {
1703 /* hardware doesn't support it, choose a bigger one */
1704 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1705 agaw = find_next_bit(&sagaw, 5, agaw);
1709 domain->agaw = agaw;
1711 if (ecap_coherent(iommu->ecap))
1712 domain->iommu_coherency = 1;
1714 domain->iommu_coherency = 0;
1716 if (ecap_sc_support(iommu->ecap))
1717 domain->iommu_snooping = 1;
1719 domain->iommu_snooping = 0;
1721 if (intel_iommu_superpage)
1722 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1724 domain->iommu_superpage = 0;
1726 domain->nid = iommu->node;
1728 /* always allocate the top pgd */
1729 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1732 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1736 static void domain_exit(struct dmar_domain *domain)
1738 struct dmar_drhd_unit *drhd;
1739 struct intel_iommu *iommu;
1740 struct page *freelist = NULL;
1742 /* Domain 0 is reserved, so dont process it */
1746 /* Flush any lazy unmaps that may reference this domain */
1747 if (!intel_iommu_strict)
1748 flush_unmaps_timeout(0);
1750 /* remove associated devices */
1751 domain_remove_dev_info(domain);
1754 put_iova_domain(&domain->iovad);
1756 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1758 /* clear attached or cached domains */
1760 for_each_active_iommu(iommu, drhd)
1761 iommu_detach_domain(domain, iommu);
1764 dma_free_pagelist(freelist);
1766 free_domain_mem(domain);
1769 static int domain_context_mapping_one(struct dmar_domain *domain,
1770 struct intel_iommu *iommu,
1771 u8 bus, u8 devfn, int translation)
1773 struct context_entry *context;
1774 unsigned long flags;
1775 struct dma_pte *pgd;
1778 struct device_domain_info *info = NULL;
1780 pr_debug("Set context mapping for %02x:%02x.%d\n",
1781 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1783 BUG_ON(!domain->pgd);
1784 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1785 translation != CONTEXT_TT_MULTI_LEVEL);
1787 context = device_to_context_entry(iommu, bus, devfn);
1790 spin_lock_irqsave(&iommu->lock, flags);
1791 if (context_present(context)) {
1792 spin_unlock_irqrestore(&iommu->lock, flags);
1799 if (domain_type_is_vm_or_si(domain)) {
1800 if (domain_type_is_vm(domain)) {
1801 id = iommu_attach_vm_domain(domain, iommu);
1803 spin_unlock_irqrestore(&iommu->lock, flags);
1804 pr_err("IOMMU: no free domain ids\n");
1809 /* Skip top levels of page tables for
1810 * iommu which has less agaw than default.
1811 * Unnecessary for PT mode.
1813 if (translation != CONTEXT_TT_PASS_THROUGH) {
1814 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1815 pgd = phys_to_virt(dma_pte_addr(pgd));
1816 if (!dma_pte_present(pgd)) {
1817 spin_unlock_irqrestore(&iommu->lock, flags);
1824 context_set_domain_id(context, id);
1826 if (translation != CONTEXT_TT_PASS_THROUGH) {
1827 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1828 translation = info ? CONTEXT_TT_DEV_IOTLB :
1829 CONTEXT_TT_MULTI_LEVEL;
1832 * In pass through mode, AW must be programmed to indicate the largest
1833 * AGAW value supported by hardware. And ASR is ignored by hardware.
1835 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1836 context_set_address_width(context, iommu->msagaw);
1838 context_set_address_root(context, virt_to_phys(pgd));
1839 context_set_address_width(context, iommu->agaw);
1842 context_set_translation_type(context, translation);
1843 context_set_fault_enable(context);
1844 context_set_present(context);
1845 domain_flush_cache(domain, context, sizeof(*context));
1848 * It's a non-present to present mapping. If hardware doesn't cache
1849 * non-present entry we only need to flush the write-buffer. If the
1850 * _does_ cache non-present entries, then it does so in the special
1851 * domain #0, which we have to flush:
1853 if (cap_caching_mode(iommu->cap)) {
1854 iommu->flush.flush_context(iommu, 0,
1855 (((u16)bus) << 8) | devfn,
1856 DMA_CCMD_MASK_NOBIT,
1857 DMA_CCMD_DEVICE_INVL);
1858 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1860 iommu_flush_write_buffer(iommu);
1862 iommu_enable_dev_iotlb(info);
1863 spin_unlock_irqrestore(&iommu->lock, flags);
1865 domain_attach_iommu(domain, iommu);
1870 struct domain_context_mapping_data {
1871 struct dmar_domain *domain;
1872 struct intel_iommu *iommu;
1876 static int domain_context_mapping_cb(struct pci_dev *pdev,
1877 u16 alias, void *opaque)
1879 struct domain_context_mapping_data *data = opaque;
1881 return domain_context_mapping_one(data->domain, data->iommu,
1882 PCI_BUS_NUM(alias), alias & 0xff,
1887 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1890 struct intel_iommu *iommu;
1892 struct domain_context_mapping_data data;
1894 iommu = device_to_iommu(dev, &bus, &devfn);
1898 if (!dev_is_pci(dev))
1899 return domain_context_mapping_one(domain, iommu, bus, devfn,
1902 data.domain = domain;
1904 data.translation = translation;
1906 return pci_for_each_dma_alias(to_pci_dev(dev),
1907 &domain_context_mapping_cb, &data);
1910 static int domain_context_mapped_cb(struct pci_dev *pdev,
1911 u16 alias, void *opaque)
1913 struct intel_iommu *iommu = opaque;
1915 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1918 static int domain_context_mapped(struct device *dev)
1920 struct intel_iommu *iommu;
1923 iommu = device_to_iommu(dev, &bus, &devfn);
1927 if (!dev_is_pci(dev))
1928 return device_context_mapped(iommu, bus, devfn);
1930 return !pci_for_each_dma_alias(to_pci_dev(dev),
1931 domain_context_mapped_cb, iommu);
1934 /* Returns a number of VTD pages, but aligned to MM page size */
1935 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1938 host_addr &= ~PAGE_MASK;
1939 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1942 /* Return largest possible superpage level for a given mapping */
1943 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1944 unsigned long iov_pfn,
1945 unsigned long phy_pfn,
1946 unsigned long pages)
1948 int support, level = 1;
1949 unsigned long pfnmerge;
1951 support = domain->iommu_superpage;
1953 /* To use a large page, the virtual *and* physical addresses
1954 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1955 of them will mean we have to use smaller pages. So just
1956 merge them and check both at once. */
1957 pfnmerge = iov_pfn | phy_pfn;
1959 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1960 pages >>= VTD_STRIDE_SHIFT;
1963 pfnmerge >>= VTD_STRIDE_SHIFT;
1970 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1971 struct scatterlist *sg, unsigned long phys_pfn,
1972 unsigned long nr_pages, int prot)
1974 struct dma_pte *first_pte = NULL, *pte = NULL;
1975 phys_addr_t uninitialized_var(pteval);
1976 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1977 unsigned long sg_res;
1978 unsigned int largepage_lvl = 0;
1979 unsigned long lvl_pages = 0;
1981 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1983 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1986 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1991 sg_res = nr_pages + 1;
1992 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1995 while (nr_pages > 0) {
1999 sg_res = aligned_nrpages(sg->offset, sg->length);
2000 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2001 sg->dma_length = sg->length;
2002 pteval = page_to_phys(sg_page(sg)) | prot;
2003 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2007 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2009 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2012 /* It is large page*/
2013 if (largepage_lvl > 1) {
2014 pteval |= DMA_PTE_LARGE_PAGE;
2015 /* Ensure that old small page tables are removed to make room
2016 for superpage, if they exist. */
2017 dma_pte_clear_range(domain, iov_pfn,
2018 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2019 dma_pte_free_pagetable(domain, iov_pfn,
2020 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2022 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2026 /* We don't need lock here, nobody else
2027 * touches the iova range
2029 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2031 static int dumps = 5;
2032 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2033 iov_pfn, tmp, (unsigned long long)pteval);
2036 debug_dma_dump_mappings(NULL);
2041 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2043 BUG_ON(nr_pages < lvl_pages);
2044 BUG_ON(sg_res < lvl_pages);
2046 nr_pages -= lvl_pages;
2047 iov_pfn += lvl_pages;
2048 phys_pfn += lvl_pages;
2049 pteval += lvl_pages * VTD_PAGE_SIZE;
2050 sg_res -= lvl_pages;
2052 /* If the next PTE would be the first in a new page, then we
2053 need to flush the cache on the entries we've just written.
2054 And then we'll need to recalculate 'pte', so clear it and
2055 let it get set again in the if (!pte) block above.
2057 If we're done (!nr_pages) we need to flush the cache too.
2059 Also if we've been setting superpages, we may need to
2060 recalculate 'pte' and switch back to smaller pages for the
2061 end of the mapping, if the trailing size is not enough to
2062 use another superpage (i.e. sg_res < lvl_pages). */
2064 if (!nr_pages || first_pte_in_page(pte) ||
2065 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2066 domain_flush_cache(domain, first_pte,
2067 (void *)pte - (void *)first_pte);
2071 if (!sg_res && nr_pages)
2077 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2078 struct scatterlist *sg, unsigned long nr_pages,
2081 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2084 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2085 unsigned long phys_pfn, unsigned long nr_pages,
2088 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2091 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2096 clear_context_table(iommu, bus, devfn);
2097 iommu->flush.flush_context(iommu, 0, 0, 0,
2098 DMA_CCMD_GLOBAL_INVL);
2099 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2102 static inline void unlink_domain_info(struct device_domain_info *info)
2104 assert_spin_locked(&device_domain_lock);
2105 list_del(&info->link);
2106 list_del(&info->global);
2108 info->dev->archdata.iommu = NULL;
2111 static void domain_remove_dev_info(struct dmar_domain *domain)
2113 struct device_domain_info *info, *tmp;
2114 unsigned long flags;
2116 spin_lock_irqsave(&device_domain_lock, flags);
2117 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2118 unlink_domain_info(info);
2119 spin_unlock_irqrestore(&device_domain_lock, flags);
2121 iommu_disable_dev_iotlb(info);
2122 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2124 if (domain_type_is_vm(domain)) {
2125 iommu_detach_dependent_devices(info->iommu, info->dev);
2126 domain_detach_iommu(domain, info->iommu);
2129 free_devinfo_mem(info);
2130 spin_lock_irqsave(&device_domain_lock, flags);
2132 spin_unlock_irqrestore(&device_domain_lock, flags);
2137 * Note: we use struct device->archdata.iommu stores the info
2139 static struct dmar_domain *find_domain(struct device *dev)
2141 struct device_domain_info *info;
2143 /* No lock here, assumes no domain exit in normal case */
2144 info = dev->archdata.iommu;
2146 return info->domain;
2150 static inline struct device_domain_info *
2151 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2153 struct device_domain_info *info;
2155 list_for_each_entry(info, &device_domain_list, global)
2156 if (info->iommu->segment == segment && info->bus == bus &&
2157 info->devfn == devfn)
2163 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2166 struct dmar_domain *domain)
2168 struct dmar_domain *found = NULL;
2169 struct device_domain_info *info;
2170 unsigned long flags;
2172 info = alloc_devinfo_mem();
2177 info->devfn = devfn;
2179 info->domain = domain;
2180 info->iommu = iommu;
2182 spin_lock_irqsave(&device_domain_lock, flags);
2184 found = find_domain(dev);
2186 struct device_domain_info *info2;
2187 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2189 found = info2->domain;
2192 spin_unlock_irqrestore(&device_domain_lock, flags);
2193 free_devinfo_mem(info);
2194 /* Caller must free the original domain */
2198 list_add(&info->link, &domain->devices);
2199 list_add(&info->global, &device_domain_list);
2201 dev->archdata.iommu = info;
2202 spin_unlock_irqrestore(&device_domain_lock, flags);
2207 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2209 *(u16 *)opaque = alias;
2213 /* domain is initialized */
2214 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2216 struct dmar_domain *domain, *tmp;
2217 struct intel_iommu *iommu;
2218 struct device_domain_info *info;
2220 unsigned long flags;
2223 domain = find_domain(dev);
2227 iommu = device_to_iommu(dev, &bus, &devfn);
2231 if (dev_is_pci(dev)) {
2232 struct pci_dev *pdev = to_pci_dev(dev);
2234 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2236 spin_lock_irqsave(&device_domain_lock, flags);
2237 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2238 PCI_BUS_NUM(dma_alias),
2241 iommu = info->iommu;
2242 domain = info->domain;
2244 spin_unlock_irqrestore(&device_domain_lock, flags);
2246 /* DMA alias already has a domain, uses it */
2251 /* Allocate and initialize new domain for the device */
2252 domain = alloc_domain(0);
2255 domain->id = iommu_attach_domain(domain, iommu);
2256 if (domain->id < 0) {
2257 free_domain_mem(domain);
2260 domain_attach_iommu(domain, iommu);
2261 if (domain_init(domain, gaw)) {
2262 domain_exit(domain);
2266 /* register PCI DMA alias device */
2267 if (dev_is_pci(dev)) {
2268 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2269 dma_alias & 0xff, NULL, domain);
2271 if (!tmp || tmp != domain) {
2272 domain_exit(domain);
2281 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2283 if (!tmp || tmp != domain) {
2284 domain_exit(domain);
2291 static int iommu_identity_mapping;
2292 #define IDENTMAP_ALL 1
2293 #define IDENTMAP_GFX 2
2294 #define IDENTMAP_AZALIA 4
2296 static int iommu_domain_identity_map(struct dmar_domain *domain,
2297 unsigned long long start,
2298 unsigned long long end)
2300 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2301 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2303 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2304 dma_to_mm_pfn(last_vpfn))) {
2305 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2309 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2310 start, end, domain->id);
2312 * RMRR range might have overlap with physical memory range,
2315 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2317 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2318 last_vpfn - first_vpfn + 1,
2319 DMA_PTE_READ|DMA_PTE_WRITE);
2322 static int iommu_prepare_identity_map(struct device *dev,
2323 unsigned long long start,
2324 unsigned long long end)
2326 struct dmar_domain *domain;
2329 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2333 /* For _hardware_ passthrough, don't bother. But for software
2334 passthrough, we do it anyway -- it may indicate a memory
2335 range which is reserved in E820, so which didn't get set
2336 up to start with in si_domain */
2337 if (domain == si_domain && hw_pass_through) {
2338 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2339 dev_name(dev), start, end);
2344 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2345 dev_name(dev), start, end);
2348 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2349 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2350 dmi_get_system_info(DMI_BIOS_VENDOR),
2351 dmi_get_system_info(DMI_BIOS_VERSION),
2352 dmi_get_system_info(DMI_PRODUCT_VERSION));
2357 if (end >> agaw_to_width(domain->agaw)) {
2358 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2359 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2360 agaw_to_width(domain->agaw),
2361 dmi_get_system_info(DMI_BIOS_VENDOR),
2362 dmi_get_system_info(DMI_BIOS_VERSION),
2363 dmi_get_system_info(DMI_PRODUCT_VERSION));
2368 ret = iommu_domain_identity_map(domain, start, end);
2372 /* context entry init */
2373 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2380 domain_exit(domain);
2384 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2387 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2389 return iommu_prepare_identity_map(dev, rmrr->base_address,
2393 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2394 static inline void iommu_prepare_isa(void)
2396 struct pci_dev *pdev;
2399 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2403 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2404 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2407 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2408 "floppy might not work\n");
2413 static inline void iommu_prepare_isa(void)
2417 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2419 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2421 static int __init si_domain_init(int hw)
2423 struct dmar_drhd_unit *drhd;
2424 struct intel_iommu *iommu;
2428 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2432 for_each_active_iommu(iommu, drhd) {
2433 ret = iommu_attach_domain(si_domain, iommu);
2435 domain_exit(si_domain);
2438 si_domain->id = ret;
2440 } else if (si_domain->id != ret) {
2441 domain_exit(si_domain);
2444 domain_attach_iommu(si_domain, iommu);
2447 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2448 domain_exit(si_domain);
2452 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2458 for_each_online_node(nid) {
2459 unsigned long start_pfn, end_pfn;
2462 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2463 ret = iommu_domain_identity_map(si_domain,
2464 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2473 static int identity_mapping(struct device *dev)
2475 struct device_domain_info *info;
2477 if (likely(!iommu_identity_mapping))
2480 info = dev->archdata.iommu;
2481 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2482 return (info->domain == si_domain);
2487 static int domain_add_dev_info(struct dmar_domain *domain,
2488 struct device *dev, int translation)
2490 struct dmar_domain *ndomain;
2491 struct intel_iommu *iommu;
2495 iommu = device_to_iommu(dev, &bus, &devfn);
2499 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2500 if (ndomain != domain)
2503 ret = domain_context_mapping(domain, dev, translation);
2505 domain_remove_one_dev_info(domain, dev);
2512 static bool device_has_rmrr(struct device *dev)
2514 struct dmar_rmrr_unit *rmrr;
2519 for_each_rmrr_units(rmrr) {
2521 * Return TRUE if this RMRR contains the device that
2524 for_each_active_dev_scope(rmrr->devices,
2525 rmrr->devices_cnt, i, tmp)
2535 static int iommu_should_identity_map(struct device *dev, int startup)
2538 if (dev_is_pci(dev)) {
2539 struct pci_dev *pdev = to_pci_dev(dev);
2542 * We want to prevent any device associated with an RMRR from
2543 * getting placed into the SI Domain. This is done because
2544 * problems exist when devices are moved in and out of domains
2545 * and their respective RMRR info is lost. We exempt USB devices
2546 * from this process due to their usage of RMRRs that are known
2547 * to not be needed after BIOS hand-off to OS.
2549 if (device_has_rmrr(dev) &&
2550 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2553 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2556 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2559 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2563 * We want to start off with all devices in the 1:1 domain, and
2564 * take them out later if we find they can't access all of memory.
2566 * However, we can't do this for PCI devices behind bridges,
2567 * because all PCI devices behind the same bridge will end up
2568 * with the same source-id on their transactions.
2570 * Practically speaking, we can't change things around for these
2571 * devices at run-time, because we can't be sure there'll be no
2572 * DMA transactions in flight for any of their siblings.
2574 * So PCI devices (unless they're on the root bus) as well as
2575 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2576 * the 1:1 domain, just in _case_ one of their siblings turns out
2577 * not to be able to map all of memory.
2579 if (!pci_is_pcie(pdev)) {
2580 if (!pci_is_root_bus(pdev->bus))
2582 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2584 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2587 if (device_has_rmrr(dev))
2592 * At boot time, we don't yet know if devices will be 64-bit capable.
2593 * Assume that they will — if they turn out not to be, then we can
2594 * take them out of the 1:1 domain later.
2598 * If the device's dma_mask is less than the system's memory
2599 * size then this is not a candidate for identity mapping.
2601 u64 dma_mask = *dev->dma_mask;
2603 if (dev->coherent_dma_mask &&
2604 dev->coherent_dma_mask < dma_mask)
2605 dma_mask = dev->coherent_dma_mask;
2607 return dma_mask >= dma_get_required_mask(dev);
2613 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2617 if (!iommu_should_identity_map(dev, 1))
2620 ret = domain_add_dev_info(si_domain, dev,
2621 hw ? CONTEXT_TT_PASS_THROUGH :
2622 CONTEXT_TT_MULTI_LEVEL);
2624 pr_info("IOMMU: %s identity mapping for device %s\n",
2625 hw ? "hardware" : "software", dev_name(dev));
2626 else if (ret == -ENODEV)
2627 /* device not associated with an iommu */
2634 static int __init iommu_prepare_static_identity_mapping(int hw)
2636 struct pci_dev *pdev = NULL;
2637 struct dmar_drhd_unit *drhd;
2638 struct intel_iommu *iommu;
2643 ret = si_domain_init(hw);
2647 for_each_pci_dev(pdev) {
2648 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2653 for_each_active_iommu(iommu, drhd)
2654 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2655 struct acpi_device_physical_node *pn;
2656 struct acpi_device *adev;
2658 if (dev->bus != &acpi_bus_type)
2661 adev= to_acpi_device(dev);
2662 mutex_lock(&adev->physical_node_lock);
2663 list_for_each_entry(pn, &adev->physical_node_list, node) {
2664 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2668 mutex_unlock(&adev->physical_node_lock);
2676 static int __init init_dmars(void)
2678 struct dmar_drhd_unit *drhd;
2679 struct dmar_rmrr_unit *rmrr;
2681 struct intel_iommu *iommu;
2687 * initialize and program root entry to not present
2690 for_each_drhd_unit(drhd) {
2692 * lock not needed as this is only incremented in the single
2693 * threaded kernel __init code path all other access are read
2696 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2700 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2701 IOMMU_UNITS_SUPPORTED);
2704 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2707 printk(KERN_ERR "Allocating global iommu array failed\n");
2712 deferred_flush = kzalloc(g_num_of_iommus *
2713 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2714 if (!deferred_flush) {
2719 for_each_active_iommu(iommu, drhd) {
2720 g_iommus[iommu->seq_id] = iommu;
2722 ret = iommu_init_domains(iommu);
2728 * we could share the same root & context tables
2729 * among all IOMMU's. Need to Split it later.
2731 ret = iommu_alloc_root_entry(iommu);
2733 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2736 if (!ecap_pass_through(iommu->ecap))
2737 hw_pass_through = 0;
2741 * Start from the sane iommu hardware state.
2743 for_each_active_iommu(iommu, drhd) {
2745 * If the queued invalidation is already initialized by us
2746 * (for example, while enabling interrupt-remapping) then
2747 * we got the things already rolling from a sane state.
2753 * Clear any previous faults.
2755 dmar_fault(-1, iommu);
2757 * Disable queued invalidation if supported and already enabled
2758 * before OS handover.
2760 dmar_disable_qi(iommu);
2763 for_each_active_iommu(iommu, drhd) {
2764 if (dmar_enable_qi(iommu)) {
2766 * Queued Invalidate not enabled, use Register Based
2769 iommu->flush.flush_context = __iommu_flush_context;
2770 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2771 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2774 (unsigned long long)drhd->reg_base_addr);
2776 iommu->flush.flush_context = qi_flush_context;
2777 iommu->flush.flush_iotlb = qi_flush_iotlb;
2778 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2781 (unsigned long long)drhd->reg_base_addr);
2785 if (iommu_pass_through)
2786 iommu_identity_mapping |= IDENTMAP_ALL;
2788 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2789 iommu_identity_mapping |= IDENTMAP_GFX;
2792 check_tylersburg_isoch();
2795 * If pass through is not set or not enabled, setup context entries for
2796 * identity mappings for rmrr, gfx, and isa and may fall back to static
2797 * identity mapping if iommu_identity_mapping is set.
2799 if (iommu_identity_mapping) {
2800 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2802 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2808 * for each dev attached to rmrr
2810 * locate drhd for dev, alloc domain for dev
2811 * allocate free domain
2812 * allocate page table entries for rmrr
2813 * if context not allocated for bus
2814 * allocate and init context
2815 * set present in root table for this bus
2816 * init context with domain, translation etc
2820 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2821 for_each_rmrr_units(rmrr) {
2822 /* some BIOS lists non-exist devices in DMAR table. */
2823 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2825 ret = iommu_prepare_rmrr_dev(rmrr, dev);
2828 "IOMMU: mapping reserved region failed\n");
2832 iommu_prepare_isa();
2837 * global invalidate context cache
2838 * global invalidate iotlb
2839 * enable translation
2841 for_each_iommu(iommu, drhd) {
2842 if (drhd->ignored) {
2844 * we always have to disable PMRs or DMA may fail on
2848 iommu_disable_protect_mem_regions(iommu);
2852 iommu_flush_write_buffer(iommu);
2854 ret = dmar_set_interrupt(iommu);
2858 iommu_set_root_entry(iommu);
2860 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2861 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2863 ret = iommu_enable_translation(iommu);
2867 iommu_disable_protect_mem_regions(iommu);
2873 for_each_active_iommu(iommu, drhd)
2874 free_dmar_iommu(iommu);
2875 kfree(deferred_flush);
2882 /* This takes a number of _MM_ pages, not VTD pages */
2883 static struct iova *intel_alloc_iova(struct device *dev,
2884 struct dmar_domain *domain,
2885 unsigned long nrpages, uint64_t dma_mask)
2887 struct iova *iova = NULL;
2889 /* Restrict dma_mask to the width that the iommu can handle */
2890 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2892 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2894 * First try to allocate an io virtual address in
2895 * DMA_BIT_MASK(32) and if that fails then try allocating
2898 iova = alloc_iova(&domain->iovad, nrpages,
2899 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2903 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2904 if (unlikely(!iova)) {
2905 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2906 nrpages, dev_name(dev));
2913 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2915 struct dmar_domain *domain;
2918 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2920 printk(KERN_ERR "Allocating domain for %s failed",
2925 /* make sure context mapping is ok */
2926 if (unlikely(!domain_context_mapped(dev))) {
2927 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2929 printk(KERN_ERR "Domain context map for %s failed",
2938 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2940 struct device_domain_info *info;
2942 /* No lock here, assumes no domain exit in normal case */
2943 info = dev->archdata.iommu;
2945 return info->domain;
2947 return __get_valid_domain_for_dev(dev);
2950 static int iommu_dummy(struct device *dev)
2952 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2955 /* Check if the dev needs to go through non-identity map and unmap process.*/
2956 static int iommu_no_mapping(struct device *dev)
2960 if (iommu_dummy(dev))
2963 if (!iommu_identity_mapping)
2966 found = identity_mapping(dev);
2968 if (iommu_should_identity_map(dev, 0))
2972 * 32 bit DMA is removed from si_domain and fall back
2973 * to non-identity mapping.
2975 domain_remove_one_dev_info(si_domain, dev);
2976 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2982 * In case of a detached 64 bit DMA device from vm, the device
2983 * is put into si_domain for identity mapping.
2985 if (iommu_should_identity_map(dev, 0)) {
2987 ret = domain_add_dev_info(si_domain, dev,
2989 CONTEXT_TT_PASS_THROUGH :
2990 CONTEXT_TT_MULTI_LEVEL);
2992 printk(KERN_INFO "64bit %s uses identity mapping\n",
3002 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3003 size_t size, int dir, u64 dma_mask)
3005 struct dmar_domain *domain;
3006 phys_addr_t start_paddr;
3010 struct intel_iommu *iommu;
3011 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3013 BUG_ON(dir == DMA_NONE);
3015 if (iommu_no_mapping(dev))
3018 domain = get_valid_domain_for_dev(dev);
3022 iommu = domain_get_iommu(domain);
3023 size = aligned_nrpages(paddr, size);
3025 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3030 * Check if DMAR supports zero-length reads on write only
3033 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3034 !cap_zlr(iommu->cap))
3035 prot |= DMA_PTE_READ;
3036 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3037 prot |= DMA_PTE_WRITE;
3039 * paddr - (paddr + size) might be partial page, we should map the whole
3040 * page. Note: if two part of one page are separately mapped, we
3041 * might have two guest_addr mapping to the same host paddr, but this
3042 * is not a big problem
3044 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3045 mm_to_dma_pfn(paddr_pfn), size, prot);
3049 /* it's a non-present to present mapping. Only flush if caching mode */
3050 if (cap_caching_mode(iommu->cap))
3051 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3053 iommu_flush_write_buffer(iommu);
3055 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3056 start_paddr += paddr & ~PAGE_MASK;
3061 __free_iova(&domain->iovad, iova);
3062 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3063 dev_name(dev), size, (unsigned long long)paddr, dir);
3067 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3068 unsigned long offset, size_t size,
3069 enum dma_data_direction dir,
3070 struct dma_attrs *attrs)
3072 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3073 dir, *dev->dma_mask);
3076 static void flush_unmaps(void)
3082 /* just flush them all */
3083 for (i = 0; i < g_num_of_iommus; i++) {
3084 struct intel_iommu *iommu = g_iommus[i];
3088 if (!deferred_flush[i].next)
3091 /* In caching mode, global flushes turn emulation expensive */
3092 if (!cap_caching_mode(iommu->cap))
3093 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3094 DMA_TLB_GLOBAL_FLUSH);
3095 for (j = 0; j < deferred_flush[i].next; j++) {
3097 struct iova *iova = deferred_flush[i].iova[j];
3098 struct dmar_domain *domain = deferred_flush[i].domain[j];
3100 /* On real hardware multiple invalidations are expensive */
3101 if (cap_caching_mode(iommu->cap))
3102 iommu_flush_iotlb_psi(iommu, domain->id,
3103 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3104 !deferred_flush[i].freelist[j], 0);
3106 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3107 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3108 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3110 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3111 if (deferred_flush[i].freelist[j])
3112 dma_free_pagelist(deferred_flush[i].freelist[j]);
3114 deferred_flush[i].next = 0;
3120 static void flush_unmaps_timeout(unsigned long data)
3122 unsigned long flags;
3124 spin_lock_irqsave(&async_umap_flush_lock, flags);
3126 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3129 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3131 unsigned long flags;
3133 struct intel_iommu *iommu;
3135 spin_lock_irqsave(&async_umap_flush_lock, flags);
3136 if (list_size == HIGH_WATER_MARK)
3139 iommu = domain_get_iommu(dom);
3140 iommu_id = iommu->seq_id;
3142 next = deferred_flush[iommu_id].next;
3143 deferred_flush[iommu_id].domain[next] = dom;
3144 deferred_flush[iommu_id].iova[next] = iova;
3145 deferred_flush[iommu_id].freelist[next] = freelist;
3146 deferred_flush[iommu_id].next++;
3149 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3153 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3156 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3157 size_t size, enum dma_data_direction dir,
3158 struct dma_attrs *attrs)
3160 struct dmar_domain *domain;
3161 unsigned long start_pfn, last_pfn;
3163 struct intel_iommu *iommu;
3164 struct page *freelist;
3166 if (iommu_no_mapping(dev))
3169 domain = find_domain(dev);
3172 iommu = domain_get_iommu(domain);
3174 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3175 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3176 (unsigned long long)dev_addr))
3179 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3180 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3182 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3183 dev_name(dev), start_pfn, last_pfn);
3185 freelist = domain_unmap(domain, start_pfn, last_pfn);
3187 if (intel_iommu_strict) {
3188 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3189 last_pfn - start_pfn + 1, !freelist, 0);
3191 __free_iova(&domain->iovad, iova);
3192 dma_free_pagelist(freelist);
3194 add_unmap(domain, iova, freelist);
3196 * queue up the release of the unmap to save the 1/6th of the
3197 * cpu used up by the iotlb flush operation...
3202 static void *intel_alloc_coherent(struct device *dev, size_t size,
3203 dma_addr_t *dma_handle, gfp_t flags,
3204 struct dma_attrs *attrs)
3206 struct page *page = NULL;
3209 size = PAGE_ALIGN(size);
3210 order = get_order(size);
3212 if (!iommu_no_mapping(dev))
3213 flags &= ~(GFP_DMA | GFP_DMA32);
3214 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3215 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3221 if (flags & __GFP_WAIT) {
3222 unsigned int count = size >> PAGE_SHIFT;
3224 page = dma_alloc_from_contiguous(dev, count, order);
3225 if (page && iommu_no_mapping(dev) &&
3226 page_to_phys(page) + size > dev->coherent_dma_mask) {
3227 dma_release_from_contiguous(dev, page, count);
3233 page = alloc_pages(flags, order);
3236 memset(page_address(page), 0, size);
3238 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3240 dev->coherent_dma_mask);
3242 return page_address(page);
3243 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3244 __free_pages(page, order);
3249 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3250 dma_addr_t dma_handle, struct dma_attrs *attrs)
3253 struct page *page = virt_to_page(vaddr);
3255 size = PAGE_ALIGN(size);
3256 order = get_order(size);
3258 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3259 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3260 __free_pages(page, order);
3263 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3264 int nelems, enum dma_data_direction dir,
3265 struct dma_attrs *attrs)
3267 struct dmar_domain *domain;
3268 unsigned long start_pfn, last_pfn;
3270 struct intel_iommu *iommu;
3271 struct page *freelist;
3273 if (iommu_no_mapping(dev))
3276 domain = find_domain(dev);
3279 iommu = domain_get_iommu(domain);
3281 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3282 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3283 (unsigned long long)sglist[0].dma_address))
3286 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3287 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3289 freelist = domain_unmap(domain, start_pfn, last_pfn);
3291 if (intel_iommu_strict) {
3292 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3293 last_pfn - start_pfn + 1, !freelist, 0);
3295 __free_iova(&domain->iovad, iova);
3296 dma_free_pagelist(freelist);
3298 add_unmap(domain, iova, freelist);
3300 * queue up the release of the unmap to save the 1/6th of the
3301 * cpu used up by the iotlb flush operation...
3306 static int intel_nontranslate_map_sg(struct device *hddev,
3307 struct scatterlist *sglist, int nelems, int dir)
3310 struct scatterlist *sg;
3312 for_each_sg(sglist, sg, nelems, i) {
3313 BUG_ON(!sg_page(sg));
3314 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3315 sg->dma_length = sg->length;
3320 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3321 enum dma_data_direction dir, struct dma_attrs *attrs)
3324 struct dmar_domain *domain;
3327 struct iova *iova = NULL;
3329 struct scatterlist *sg;
3330 unsigned long start_vpfn;
3331 struct intel_iommu *iommu;
3333 BUG_ON(dir == DMA_NONE);
3334 if (iommu_no_mapping(dev))
3335 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3337 domain = get_valid_domain_for_dev(dev);
3341 iommu = domain_get_iommu(domain);
3343 for_each_sg(sglist, sg, nelems, i)
3344 size += aligned_nrpages(sg->offset, sg->length);
3346 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3349 sglist->dma_length = 0;
3354 * Check if DMAR supports zero-length reads on write only
3357 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3358 !cap_zlr(iommu->cap))
3359 prot |= DMA_PTE_READ;
3360 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3361 prot |= DMA_PTE_WRITE;
3363 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3365 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3366 if (unlikely(ret)) {
3367 /* clear the page */
3368 dma_pte_clear_range(domain, start_vpfn,
3369 start_vpfn + size - 1);
3370 /* free page tables */
3371 dma_pte_free_pagetable(domain, start_vpfn,
3372 start_vpfn + size - 1);
3374 __free_iova(&domain->iovad, iova);
3378 /* it's a non-present to present mapping. Only flush if caching mode */
3379 if (cap_caching_mode(iommu->cap))
3380 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3382 iommu_flush_write_buffer(iommu);
3387 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3392 struct dma_map_ops intel_dma_ops = {
3393 .alloc = intel_alloc_coherent,
3394 .free = intel_free_coherent,
3395 .map_sg = intel_map_sg,
3396 .unmap_sg = intel_unmap_sg,
3397 .map_page = intel_map_page,
3398 .unmap_page = intel_unmap_page,
3399 .mapping_error = intel_mapping_error,
3402 static inline int iommu_domain_cache_init(void)
3406 iommu_domain_cache = kmem_cache_create("iommu_domain",
3407 sizeof(struct dmar_domain),
3412 if (!iommu_domain_cache) {
3413 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3420 static inline int iommu_devinfo_cache_init(void)
3424 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3425 sizeof(struct device_domain_info),
3429 if (!iommu_devinfo_cache) {
3430 printk(KERN_ERR "Couldn't create devinfo cache\n");
3437 static inline int iommu_iova_cache_init(void)
3441 iommu_iova_cache = kmem_cache_create("iommu_iova",
3442 sizeof(struct iova),
3446 if (!iommu_iova_cache) {
3447 printk(KERN_ERR "Couldn't create iova cache\n");
3454 static int __init iommu_init_mempool(void)
3457 ret = iommu_iova_cache_init();
3461 ret = iommu_domain_cache_init();
3465 ret = iommu_devinfo_cache_init();
3469 kmem_cache_destroy(iommu_domain_cache);
3471 kmem_cache_destroy(iommu_iova_cache);
3476 static void __init iommu_exit_mempool(void)
3478 kmem_cache_destroy(iommu_devinfo_cache);
3479 kmem_cache_destroy(iommu_domain_cache);
3480 kmem_cache_destroy(iommu_iova_cache);
3484 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3486 struct dmar_drhd_unit *drhd;
3490 /* We know that this device on this chipset has its own IOMMU.
3491 * If we find it under a different IOMMU, then the BIOS is lying
3492 * to us. Hope that the IOMMU for this device is actually
3493 * disabled, and it needs no translation...
3495 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3497 /* "can't" happen */
3498 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3501 vtbar &= 0xffff0000;
3503 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3504 drhd = dmar_find_matched_drhd_unit(pdev);
3505 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3506 TAINT_FIRMWARE_WORKAROUND,
3507 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3508 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3510 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3512 static void __init init_no_remapping_devices(void)
3514 struct dmar_drhd_unit *drhd;
3518 for_each_drhd_unit(drhd) {
3519 if (!drhd->include_all) {
3520 for_each_active_dev_scope(drhd->devices,
3521 drhd->devices_cnt, i, dev)
3523 /* ignore DMAR unit if no devices exist */
3524 if (i == drhd->devices_cnt)
3529 for_each_active_drhd_unit(drhd) {
3530 if (drhd->include_all)
3533 for_each_active_dev_scope(drhd->devices,
3534 drhd->devices_cnt, i, dev)
3535 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3537 if (i < drhd->devices_cnt)
3540 /* This IOMMU has *only* gfx devices. Either bypass it or
3541 set the gfx_mapped flag, as appropriate */
3543 intel_iommu_gfx_mapped = 1;
3546 for_each_active_dev_scope(drhd->devices,
3547 drhd->devices_cnt, i, dev)
3548 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3553 #ifdef CONFIG_SUSPEND
3554 static int init_iommu_hw(void)
3556 struct dmar_drhd_unit *drhd;
3557 struct intel_iommu *iommu = NULL;
3559 for_each_active_iommu(iommu, drhd)
3561 dmar_reenable_qi(iommu);
3563 for_each_iommu(iommu, drhd) {
3564 if (drhd->ignored) {
3566 * we always have to disable PMRs or DMA may fail on
3570 iommu_disable_protect_mem_regions(iommu);
3574 iommu_flush_write_buffer(iommu);
3576 iommu_set_root_entry(iommu);
3578 iommu->flush.flush_context(iommu, 0, 0, 0,
3579 DMA_CCMD_GLOBAL_INVL);
3580 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3581 DMA_TLB_GLOBAL_FLUSH);
3582 if (iommu_enable_translation(iommu))
3584 iommu_disable_protect_mem_regions(iommu);
3590 static void iommu_flush_all(void)
3592 struct dmar_drhd_unit *drhd;
3593 struct intel_iommu *iommu;
3595 for_each_active_iommu(iommu, drhd) {
3596 iommu->flush.flush_context(iommu, 0, 0, 0,
3597 DMA_CCMD_GLOBAL_INVL);
3598 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3599 DMA_TLB_GLOBAL_FLUSH);
3603 static int iommu_suspend(void)
3605 struct dmar_drhd_unit *drhd;
3606 struct intel_iommu *iommu = NULL;
3609 for_each_active_iommu(iommu, drhd) {
3610 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3612 if (!iommu->iommu_state)
3618 for_each_active_iommu(iommu, drhd) {
3619 iommu_disable_translation(iommu);
3621 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3623 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3624 readl(iommu->reg + DMAR_FECTL_REG);
3625 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3626 readl(iommu->reg + DMAR_FEDATA_REG);
3627 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3628 readl(iommu->reg + DMAR_FEADDR_REG);
3629 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3630 readl(iommu->reg + DMAR_FEUADDR_REG);
3632 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3637 for_each_active_iommu(iommu, drhd)
3638 kfree(iommu->iommu_state);
3643 static void iommu_resume(void)
3645 struct dmar_drhd_unit *drhd;
3646 struct intel_iommu *iommu = NULL;
3649 if (init_iommu_hw()) {
3651 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3653 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3657 for_each_active_iommu(iommu, drhd) {
3659 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3661 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3662 iommu->reg + DMAR_FECTL_REG);
3663 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3664 iommu->reg + DMAR_FEDATA_REG);
3665 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3666 iommu->reg + DMAR_FEADDR_REG);
3667 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3668 iommu->reg + DMAR_FEUADDR_REG);
3670 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3673 for_each_active_iommu(iommu, drhd)
3674 kfree(iommu->iommu_state);
3677 static struct syscore_ops iommu_syscore_ops = {
3678 .resume = iommu_resume,
3679 .suspend = iommu_suspend,
3682 static void __init init_iommu_pm_ops(void)
3684 register_syscore_ops(&iommu_syscore_ops);
3688 static inline void init_iommu_pm_ops(void) {}
3689 #endif /* CONFIG_PM */
3692 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3694 struct acpi_dmar_reserved_memory *rmrr;
3695 struct dmar_rmrr_unit *rmrru;
3697 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3701 rmrru->hdr = header;
3702 rmrr = (struct acpi_dmar_reserved_memory *)header;
3703 rmrru->base_address = rmrr->base_address;
3704 rmrru->end_address = rmrr->end_address;
3705 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3706 ((void *)rmrr) + rmrr->header.length,
3707 &rmrru->devices_cnt);
3708 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3713 list_add(&rmrru->list, &dmar_rmrr_units);
3718 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3720 struct acpi_dmar_atsr *atsr;
3721 struct dmar_atsr_unit *atsru;
3723 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3724 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3729 atsru->include_all = atsr->flags & 0x1;
3730 if (!atsru->include_all) {
3731 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3732 (void *)atsr + atsr->header.length,
3733 &atsru->devices_cnt);
3734 if (atsru->devices_cnt && atsru->devices == NULL) {
3740 list_add_rcu(&atsru->list, &dmar_atsr_units);
3745 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3747 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3751 static void intel_iommu_free_dmars(void)
3753 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3754 struct dmar_atsr_unit *atsru, *atsr_n;
3756 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3757 list_del(&rmrru->list);
3758 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3762 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3763 list_del(&atsru->list);
3764 intel_iommu_free_atsr(atsru);
3768 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3771 struct pci_bus *bus;
3772 struct pci_dev *bridge = NULL;
3774 struct acpi_dmar_atsr *atsr;
3775 struct dmar_atsr_unit *atsru;
3777 dev = pci_physfn(dev);
3778 for (bus = dev->bus; bus; bus = bus->parent) {
3780 if (!bridge || !pci_is_pcie(bridge) ||
3781 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3783 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3790 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3791 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3792 if (atsr->segment != pci_domain_nr(dev->bus))
3795 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3796 if (tmp == &bridge->dev)
3799 if (atsru->include_all)
3809 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3812 struct dmar_rmrr_unit *rmrru;
3813 struct dmar_atsr_unit *atsru;
3814 struct acpi_dmar_atsr *atsr;
3815 struct acpi_dmar_reserved_memory *rmrr;
3817 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3820 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3821 rmrr = container_of(rmrru->hdr,
3822 struct acpi_dmar_reserved_memory, header);
3823 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3824 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3825 ((void *)rmrr) + rmrr->header.length,
3826 rmrr->segment, rmrru->devices,
3827 rmrru->devices_cnt);
3830 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3831 dmar_remove_dev_scope(info, rmrr->segment,
3832 rmrru->devices, rmrru->devices_cnt);
3836 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3837 if (atsru->include_all)
3840 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3841 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3842 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3843 (void *)atsr + atsr->header.length,
3844 atsr->segment, atsru->devices,
3845 atsru->devices_cnt);
3850 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3851 if (dmar_remove_dev_scope(info, atsr->segment,
3852 atsru->devices, atsru->devices_cnt))
3861 * Here we only respond to action of unbound device from driver.
3863 * Added device is not attached to its DMAR domain here yet. That will happen
3864 * when mapping the device to iova.
3866 static int device_notifier(struct notifier_block *nb,
3867 unsigned long action, void *data)
3869 struct device *dev = data;
3870 struct dmar_domain *domain;
3872 if (iommu_dummy(dev))
3875 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3876 action != BUS_NOTIFY_DEL_DEVICE)
3879 domain = find_domain(dev);
3883 down_read(&dmar_global_lock);
3884 domain_remove_one_dev_info(domain, dev);
3885 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
3886 domain_exit(domain);
3887 up_read(&dmar_global_lock);
3892 static struct notifier_block device_nb = {
3893 .notifier_call = device_notifier,
3896 static int intel_iommu_memory_notifier(struct notifier_block *nb,
3897 unsigned long val, void *v)
3899 struct memory_notify *mhp = v;
3900 unsigned long long start, end;
3901 unsigned long start_vpfn, last_vpfn;
3904 case MEM_GOING_ONLINE:
3905 start = mhp->start_pfn << PAGE_SHIFT;
3906 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3907 if (iommu_domain_identity_map(si_domain, start, end)) {
3908 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3915 case MEM_CANCEL_ONLINE:
3916 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3917 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3918 while (start_vpfn <= last_vpfn) {
3920 struct dmar_drhd_unit *drhd;
3921 struct intel_iommu *iommu;
3922 struct page *freelist;
3924 iova = find_iova(&si_domain->iovad, start_vpfn);
3926 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3931 iova = split_and_remove_iova(&si_domain->iovad, iova,
3932 start_vpfn, last_vpfn);
3934 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3935 start_vpfn, last_vpfn);
3939 freelist = domain_unmap(si_domain, iova->pfn_lo,
3943 for_each_active_iommu(iommu, drhd)
3944 iommu_flush_iotlb_psi(iommu, si_domain->id,
3946 iova->pfn_hi - iova->pfn_lo + 1,
3949 dma_free_pagelist(freelist);
3951 start_vpfn = iova->pfn_hi + 1;
3952 free_iova_mem(iova);
3960 static struct notifier_block intel_iommu_memory_nb = {
3961 .notifier_call = intel_iommu_memory_notifier,
3966 static ssize_t intel_iommu_show_version(struct device *dev,
3967 struct device_attribute *attr,
3970 struct intel_iommu *iommu = dev_get_drvdata(dev);
3971 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3972 return sprintf(buf, "%d:%d\n",
3973 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3975 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3977 static ssize_t intel_iommu_show_address(struct device *dev,
3978 struct device_attribute *attr,
3981 struct intel_iommu *iommu = dev_get_drvdata(dev);
3982 return sprintf(buf, "%llx\n", iommu->reg_phys);
3984 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3986 static ssize_t intel_iommu_show_cap(struct device *dev,
3987 struct device_attribute *attr,
3990 struct intel_iommu *iommu = dev_get_drvdata(dev);
3991 return sprintf(buf, "%llx\n", iommu->cap);
3993 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3995 static ssize_t intel_iommu_show_ecap(struct device *dev,
3996 struct device_attribute *attr,
3999 struct intel_iommu *iommu = dev_get_drvdata(dev);
4000 return sprintf(buf, "%llx\n", iommu->ecap);
4002 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4004 static struct attribute *intel_iommu_attrs[] = {
4005 &dev_attr_version.attr,
4006 &dev_attr_address.attr,
4008 &dev_attr_ecap.attr,
4012 static struct attribute_group intel_iommu_group = {
4013 .name = "intel-iommu",
4014 .attrs = intel_iommu_attrs,
4017 const struct attribute_group *intel_iommu_groups[] = {
4022 int __init intel_iommu_init(void)
4025 struct dmar_drhd_unit *drhd;
4026 struct intel_iommu *iommu;
4028 /* VT-d is required for a TXT/tboot launch, so enforce that */
4029 force_on = tboot_force_iommu();
4031 if (iommu_init_mempool()) {
4033 panic("tboot: Failed to initialize iommu memory\n");
4037 down_write(&dmar_global_lock);
4038 if (dmar_table_init()) {
4040 panic("tboot: Failed to initialize DMAR table\n");
4045 * Disable translation if already enabled prior to OS handover.
4047 for_each_active_iommu(iommu, drhd)
4048 if (iommu->gcmd & DMA_GCMD_TE)
4049 iommu_disable_translation(iommu);
4051 if (dmar_dev_scope_init() < 0) {
4053 panic("tboot: Failed to initialize DMAR device scope\n");
4057 if (no_iommu || dmar_disabled)
4060 if (list_empty(&dmar_rmrr_units))
4061 printk(KERN_INFO "DMAR: No RMRR found\n");
4063 if (list_empty(&dmar_atsr_units))
4064 printk(KERN_INFO "DMAR: No ATSR found\n");
4066 if (dmar_init_reserved_ranges()) {
4068 panic("tboot: Failed to reserve iommu ranges\n");
4069 goto out_free_reserved_range;
4072 init_no_remapping_devices();
4077 panic("tboot: Failed to initialize DMARs\n");
4078 printk(KERN_ERR "IOMMU: dmar init failed\n");
4079 goto out_free_reserved_range;
4081 up_write(&dmar_global_lock);
4083 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4085 init_timer(&unmap_timer);
4086 #ifdef CONFIG_SWIOTLB
4089 dma_ops = &intel_dma_ops;
4091 init_iommu_pm_ops();
4093 for_each_active_iommu(iommu, drhd)
4094 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4098 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4099 bus_register_notifier(&pci_bus_type, &device_nb);
4100 if (si_domain && !hw_pass_through)
4101 register_memory_notifier(&intel_iommu_memory_nb);
4103 intel_iommu_enabled = 1;
4107 out_free_reserved_range:
4108 put_iova_domain(&reserved_iova_list);
4110 intel_iommu_free_dmars();
4111 up_write(&dmar_global_lock);
4112 iommu_exit_mempool();
4116 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4118 struct intel_iommu *iommu = opaque;
4120 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4125 * NB - intel-iommu lacks any sort of reference counting for the users of
4126 * dependent devices. If multiple endpoints have intersecting dependent
4127 * devices, unbinding the driver from any one of them will possibly leave
4128 * the others unable to operate.
4130 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4133 if (!iommu || !dev || !dev_is_pci(dev))
4136 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4139 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4142 struct device_domain_info *info, *tmp;
4143 struct intel_iommu *iommu;
4144 unsigned long flags;
4148 iommu = device_to_iommu(dev, &bus, &devfn);
4152 spin_lock_irqsave(&device_domain_lock, flags);
4153 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4154 if (info->iommu == iommu && info->bus == bus &&
4155 info->devfn == devfn) {
4156 unlink_domain_info(info);
4157 spin_unlock_irqrestore(&device_domain_lock, flags);
4159 iommu_disable_dev_iotlb(info);
4160 iommu_detach_dev(iommu, info->bus, info->devfn);
4161 iommu_detach_dependent_devices(iommu, dev);
4162 free_devinfo_mem(info);
4164 spin_lock_irqsave(&device_domain_lock, flags);
4172 /* if there is no other devices under the same iommu
4173 * owned by this domain, clear this iommu in iommu_bmp
4174 * update iommu count and coherency
4176 if (info->iommu == iommu)
4180 spin_unlock_irqrestore(&device_domain_lock, flags);
4183 domain_detach_iommu(domain, iommu);
4184 if (!domain_type_is_vm_or_si(domain))
4185 iommu_detach_domain(domain, iommu);
4189 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4193 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
4194 domain_reserve_special_ranges(domain);
4196 /* calculate AGAW */
4197 domain->gaw = guest_width;
4198 adjust_width = guestwidth_to_adjustwidth(guest_width);
4199 domain->agaw = width_to_agaw(adjust_width);
4201 domain->iommu_coherency = 0;
4202 domain->iommu_snooping = 0;
4203 domain->iommu_superpage = 0;
4204 domain->max_addr = 0;
4206 /* always allocate the top pgd */
4207 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4210 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4214 static int intel_iommu_domain_init(struct iommu_domain *domain)
4216 struct dmar_domain *dmar_domain;
4218 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4221 "intel_iommu_domain_init: dmar_domain == NULL\n");
4224 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4226 "intel_iommu_domain_init() failed\n");
4227 domain_exit(dmar_domain);
4230 domain_update_iommu_cap(dmar_domain);
4231 domain->priv = dmar_domain;
4233 domain->geometry.aperture_start = 0;
4234 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4235 domain->geometry.force_aperture = true;
4240 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4242 struct dmar_domain *dmar_domain = domain->priv;
4244 domain->priv = NULL;
4245 domain_exit(dmar_domain);
4248 static int intel_iommu_attach_device(struct iommu_domain *domain,
4251 struct dmar_domain *dmar_domain = domain->priv;
4252 struct intel_iommu *iommu;
4256 /* normally dev is not mapped */
4257 if (unlikely(domain_context_mapped(dev))) {
4258 struct dmar_domain *old_domain;
4260 old_domain = find_domain(dev);
4262 if (domain_type_is_vm_or_si(dmar_domain))
4263 domain_remove_one_dev_info(old_domain, dev);
4265 domain_remove_dev_info(old_domain);
4269 iommu = device_to_iommu(dev, &bus, &devfn);
4273 /* check if this iommu agaw is sufficient for max mapped address */
4274 addr_width = agaw_to_width(iommu->agaw);
4275 if (addr_width > cap_mgaw(iommu->cap))
4276 addr_width = cap_mgaw(iommu->cap);
4278 if (dmar_domain->max_addr > (1LL << addr_width)) {
4279 printk(KERN_ERR "%s: iommu width (%d) is not "
4280 "sufficient for the mapped address (%llx)\n",
4281 __func__, addr_width, dmar_domain->max_addr);
4284 dmar_domain->gaw = addr_width;
4287 * Knock out extra levels of page tables if necessary
4289 while (iommu->agaw < dmar_domain->agaw) {
4290 struct dma_pte *pte;
4292 pte = dmar_domain->pgd;
4293 if (dma_pte_present(pte)) {
4294 dmar_domain->pgd = (struct dma_pte *)
4295 phys_to_virt(dma_pte_addr(pte));
4296 free_pgtable_page(pte);
4298 dmar_domain->agaw--;
4301 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4304 static void intel_iommu_detach_device(struct iommu_domain *domain,
4307 struct dmar_domain *dmar_domain = domain->priv;
4309 domain_remove_one_dev_info(dmar_domain, dev);
4312 static int intel_iommu_map(struct iommu_domain *domain,
4313 unsigned long iova, phys_addr_t hpa,
4314 size_t size, int iommu_prot)
4316 struct dmar_domain *dmar_domain = domain->priv;
4321 if (iommu_prot & IOMMU_READ)
4322 prot |= DMA_PTE_READ;
4323 if (iommu_prot & IOMMU_WRITE)
4324 prot |= DMA_PTE_WRITE;
4325 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4326 prot |= DMA_PTE_SNP;
4328 max_addr = iova + size;
4329 if (dmar_domain->max_addr < max_addr) {
4332 /* check if minimum agaw is sufficient for mapped address */
4333 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4334 if (end < max_addr) {
4335 printk(KERN_ERR "%s: iommu width (%d) is not "
4336 "sufficient for the mapped address (%llx)\n",
4337 __func__, dmar_domain->gaw, max_addr);
4340 dmar_domain->max_addr = max_addr;
4342 /* Round up size to next multiple of PAGE_SIZE, if it and
4343 the low bits of hpa would take us onto the next page */
4344 size = aligned_nrpages(hpa, size);
4345 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4346 hpa >> VTD_PAGE_SHIFT, size, prot);
4350 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4351 unsigned long iova, size_t size)
4353 struct dmar_domain *dmar_domain = domain->priv;
4354 struct page *freelist = NULL;
4355 struct intel_iommu *iommu;
4356 unsigned long start_pfn, last_pfn;
4357 unsigned int npages;
4358 int iommu_id, num, ndomains, level = 0;
4360 /* Cope with horrid API which requires us to unmap more than the
4361 size argument if it happens to be a large-page mapping. */
4362 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4365 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4366 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4368 start_pfn = iova >> VTD_PAGE_SHIFT;
4369 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4371 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4373 npages = last_pfn - start_pfn + 1;
4375 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4376 iommu = g_iommus[iommu_id];
4379 * find bit position of dmar_domain
4381 ndomains = cap_ndoms(iommu->cap);
4382 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4383 if (iommu->domains[num] == dmar_domain)
4384 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4385 npages, !freelist, 0);
4390 dma_free_pagelist(freelist);
4392 if (dmar_domain->max_addr == iova + size)
4393 dmar_domain->max_addr = iova;
4398 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4401 struct dmar_domain *dmar_domain = domain->priv;
4402 struct dma_pte *pte;
4406 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4408 phys = dma_pte_addr(pte);
4413 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4416 struct dmar_domain *dmar_domain = domain->priv;
4418 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4419 return dmar_domain->iommu_snooping;
4420 if (cap == IOMMU_CAP_INTR_REMAP)
4421 return irq_remapping_enabled;
4426 static int intel_iommu_add_device(struct device *dev)
4428 struct intel_iommu *iommu;
4429 struct iommu_group *group;
4432 iommu = device_to_iommu(dev, &bus, &devfn);
4436 iommu_device_link(iommu->iommu_dev, dev);
4438 group = iommu_group_get_for_dev(dev);
4441 return PTR_ERR(group);
4443 iommu_group_put(group);
4447 static void intel_iommu_remove_device(struct device *dev)
4449 struct intel_iommu *iommu;
4452 iommu = device_to_iommu(dev, &bus, &devfn);
4456 iommu_group_remove_device(dev);
4458 iommu_device_unlink(iommu->iommu_dev, dev);
4461 static const struct iommu_ops intel_iommu_ops = {
4462 .domain_init = intel_iommu_domain_init,
4463 .domain_destroy = intel_iommu_domain_destroy,
4464 .attach_dev = intel_iommu_attach_device,
4465 .detach_dev = intel_iommu_detach_device,
4466 .map = intel_iommu_map,
4467 .unmap = intel_iommu_unmap,
4468 .iova_to_phys = intel_iommu_iova_to_phys,
4469 .domain_has_cap = intel_iommu_domain_has_cap,
4470 .add_device = intel_iommu_add_device,
4471 .remove_device = intel_iommu_remove_device,
4472 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4475 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4477 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4478 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4482 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4483 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4484 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4485 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4486 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4488 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4490 static void quirk_iommu_rwbf(struct pci_dev *dev)
4493 * Mobile 4 Series Chipset neglects to set RWBF capability,
4494 * but needs it. Same seems to hold for the desktop versions.
4496 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4500 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4501 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4502 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4503 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4504 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4505 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4509 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4510 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4511 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4512 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4513 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4514 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4515 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4516 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4518 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4522 if (pci_read_config_word(dev, GGC, &ggc))
4525 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4526 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4528 } else if (dmar_map_gfx) {
4529 /* we have to ensure the gfx device is idle before we flush */
4530 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4531 intel_iommu_strict = 1;
4534 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4535 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4536 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4537 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4539 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4540 ISOCH DMAR unit for the Azalia sound device, but not give it any
4541 TLB entries, which causes it to deadlock. Check for that. We do
4542 this in a function called from init_dmars(), instead of in a PCI
4543 quirk, because we don't want to print the obnoxious "BIOS broken"
4544 message if VT-d is actually disabled.
4546 static void __init check_tylersburg_isoch(void)
4548 struct pci_dev *pdev;
4549 uint32_t vtisochctrl;
4551 /* If there's no Azalia in the system anyway, forget it. */
4552 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4557 /* System Management Registers. Might be hidden, in which case
4558 we can't do the sanity check. But that's OK, because the
4559 known-broken BIOSes _don't_ actually hide it, so far. */
4560 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4564 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4571 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4572 if (vtisochctrl & 1)
4575 /* Drop all bits other than the number of TLB entries */
4576 vtisochctrl &= 0x1c;
4578 /* If we have the recommended number of TLB entries (16), fine. */
4579 if (vtisochctrl == 0x10)
4582 /* Zero TLB entries? You get to ride the short bus to school. */
4584 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4585 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4586 dmi_get_system_info(DMI_BIOS_VENDOR),
4587 dmi_get_system_info(DMI_BIOS_VERSION),
4588 dmi_get_system_info(DMI_PRODUCT_VERSION));
4589 iommu_identity_mapping |= IDENTMAP_AZALIA;
4593 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",