2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
64 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
74 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
75 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
77 /* page table handling */
78 #define LEVEL_STRIDE (9)
79 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
97 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99 static inline int agaw_to_level(int agaw)
104 static inline int agaw_to_width(int agaw)
106 return 30 + agaw * LEVEL_STRIDE;
109 static inline int width_to_agaw(int width)
111 return (width - 30) / LEVEL_STRIDE;
114 static inline unsigned int level_to_offset_bits(int level)
116 return (level - 1) * LEVEL_STRIDE;
119 static inline int pfn_level_offset(unsigned long pfn, int level)
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124 static inline unsigned long level_mask(int level)
126 return -1UL << level_to_offset_bits(level);
129 static inline unsigned long level_size(int level)
131 return 1UL << level_to_offset_bits(level);
134 static inline unsigned long align_to_level(unsigned long pfn, int level)
136 return (pfn + level_size(level) - 1) & level_mask(level);
139 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
144 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 static inline unsigned long page_to_dma_pfn(struct page *pg)
157 return mm_to_dma_pfn(page_to_pfn(pg));
159 static inline unsigned long virt_to_dma_pfn(void *p)
161 return page_to_dma_pfn(virt_to_page(p));
164 /* global iommu list, set NULL for ignored DMAR units */
165 static struct intel_iommu **g_iommus;
167 static void __init check_tylersburg_isoch(void);
168 static int rwbf_quirk;
171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
174 static int force_on = 0;
179 * 12-63: Context Ptr (12 - (haw-1))
186 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 static inline bool root_present(struct root_entry *root)
189 return (root->val & 1);
191 static inline void set_root_present(struct root_entry *root)
195 static inline void set_root_value(struct root_entry *root, unsigned long value)
197 root->val |= value & VTD_PAGE_MASK;
200 static inline struct context_entry *
201 get_context_addr_from_root(struct root_entry *root)
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
220 struct context_entry {
225 static inline bool context_present(struct context_entry *context)
227 return (context->lo & 1);
229 static inline void context_set_present(struct context_entry *context)
234 static inline void context_set_fault_enable(struct context_entry *context)
236 context->lo &= (((u64)-1) << 2) | 1;
239 static inline void context_set_translation_type(struct context_entry *context,
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
246 static inline void context_set_address_root(struct context_entry *context,
249 context->lo |= value & VTD_PAGE_MASK;
252 static inline void context_set_address_width(struct context_entry *context,
255 context->hi |= value & 7;
258 static inline void context_set_domain_id(struct context_entry *context,
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
264 static inline void context_clear_entry(struct context_entry *context)
277 * 12-63: Host physcial address
283 static inline void dma_clear_pte(struct dma_pte *pte)
288 static inline void dma_set_pte_readable(struct dma_pte *pte)
290 pte->val |= DMA_PTE_READ;
293 static inline void dma_set_pte_writable(struct dma_pte *pte)
295 pte->val |= DMA_PTE_WRITE;
298 static inline void dma_set_pte_snp(struct dma_pte *pte)
300 pte->val |= DMA_PTE_SNP;
303 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
305 pte->val = (pte->val & ~3) | (prot & 3);
308 static inline u64 dma_pte_addr(struct dma_pte *pte)
311 return pte->val & VTD_PAGE_MASK;
313 /* Must have a full atomic 64-bit read */
314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
318 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
323 static inline bool dma_pte_present(struct dma_pte *pte)
325 return (pte->val & 3) != 0;
328 static inline bool dma_pte_superpage(struct dma_pte *pte)
330 return (pte->val & (1 << 7));
333 static inline int first_pte_in_page(struct dma_pte *pte)
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
344 static struct dmar_domain *si_domain;
345 static int hw_pass_through = 1;
347 /* devices under the same p2p bridge are owned in one domain */
348 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
350 /* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
353 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
355 /* si_domain contains mulitple devices */
356 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
358 /* define the limit of IOMMUs supported in each domain */
360 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
362 # define IOMMU_UNITS_SUPPORTED 64
366 int id; /* domain id */
367 int nid; /* node id */
368 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
369 /* bitmap of iommus this domain uses*/
371 struct list_head devices; /* all devices' list */
372 struct iova_domain iovad; /* iova's that belong to this domain */
374 struct dma_pte *pgd; /* virtual address */
375 int gaw; /* max guest address width */
377 /* adjusted guest address width, 0 is level 2 30-bit */
380 int flags; /* flags to find out type of domain */
382 int iommu_coherency;/* indicate coherency of iommu access */
383 int iommu_snooping; /* indicate snooping control feature*/
384 int iommu_count; /* reference count of iommu */
385 int iommu_superpage;/* Level of superpages supported:
386 0 == 4KiB (no superpages), 1 == 2MiB,
387 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
388 spinlock_t iommu_lock; /* protect iommu set in domain */
389 u64 max_addr; /* maximum mapped address */
392 /* PCI domain-device relationship */
393 struct device_domain_info {
394 struct list_head link; /* link to domain siblings */
395 struct list_head global; /* link to global list */
396 int segment; /* PCI domain */
397 u8 bus; /* PCI bus number */
398 u8 devfn; /* PCI devfn number */
399 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
400 struct intel_iommu *iommu; /* IOMMU used by this device */
401 struct dmar_domain *domain; /* pointer to domain */
404 static void flush_unmaps_timeout(unsigned long data);
406 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
408 #define HIGH_WATER_MARK 250
409 struct deferred_flush_tables {
411 struct iova *iova[HIGH_WATER_MARK];
412 struct dmar_domain *domain[HIGH_WATER_MARK];
415 static struct deferred_flush_tables *deferred_flush;
417 /* bitmap for indexing intel_iommus */
418 static int g_num_of_iommus;
420 static DEFINE_SPINLOCK(async_umap_flush_lock);
421 static LIST_HEAD(unmaps_to_do);
424 static long list_size;
426 static void domain_remove_dev_info(struct dmar_domain *domain);
428 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
429 int dmar_disabled = 0;
431 int dmar_disabled = 1;
432 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
434 int intel_iommu_enabled = 0;
435 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
437 static int dmar_map_gfx = 1;
438 static int dmar_forcedac;
439 static int intel_iommu_strict;
440 static int intel_iommu_superpage = 1;
442 int intel_iommu_gfx_mapped;
443 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
445 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
446 static DEFINE_SPINLOCK(device_domain_lock);
447 static LIST_HEAD(device_domain_list);
449 static struct iommu_ops intel_iommu_ops;
451 static int __init intel_iommu_setup(char *str)
456 if (!strncmp(str, "on", 2)) {
458 printk(KERN_INFO "Intel-IOMMU: enabled\n");
459 } else if (!strncmp(str, "off", 3)) {
461 printk(KERN_INFO "Intel-IOMMU: disabled\n");
462 } else if (!strncmp(str, "igfx_off", 8)) {
465 "Intel-IOMMU: disable GFX device mapping\n");
466 } else if (!strncmp(str, "forcedac", 8)) {
468 "Intel-IOMMU: Forcing DAC for PCI devices\n");
470 } else if (!strncmp(str, "strict", 6)) {
472 "Intel-IOMMU: disable batched IOTLB flush\n");
473 intel_iommu_strict = 1;
474 } else if (!strncmp(str, "sp_off", 6)) {
476 "Intel-IOMMU: disable supported super page\n");
477 intel_iommu_superpage = 0;
480 str += strcspn(str, ",");
486 __setup("intel_iommu=", intel_iommu_setup);
488 static struct kmem_cache *iommu_domain_cache;
489 static struct kmem_cache *iommu_devinfo_cache;
490 static struct kmem_cache *iommu_iova_cache;
492 static inline void *alloc_pgtable_page(int node)
497 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
499 vaddr = page_address(page);
503 static inline void free_pgtable_page(void *vaddr)
505 free_page((unsigned long)vaddr);
508 static inline void *alloc_domain_mem(void)
510 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
513 static void free_domain_mem(void *vaddr)
515 kmem_cache_free(iommu_domain_cache, vaddr);
518 static inline void * alloc_devinfo_mem(void)
520 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
523 static inline void free_devinfo_mem(void *vaddr)
525 kmem_cache_free(iommu_devinfo_cache, vaddr);
528 struct iova *alloc_iova_mem(void)
530 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
533 void free_iova_mem(struct iova *iova)
535 kmem_cache_free(iommu_iova_cache, iova);
539 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
544 sagaw = cap_sagaw(iommu->cap);
545 for (agaw = width_to_agaw(max_gaw);
547 if (test_bit(agaw, &sagaw))
555 * Calculate max SAGAW for each iommu.
557 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
559 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563 * calculate agaw for each iommu.
564 * "SAGAW" may be different across iommus, use a default agaw, and
565 * get a supported less agaw for iommus that don't support the default agaw.
567 int iommu_calculate_agaw(struct intel_iommu *iommu)
569 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
572 /* This functionin only returns single iommu in a domain */
573 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577 /* si_domain and vm domain should not get here. */
578 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
579 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
581 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
582 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
585 return g_iommus[iommu_id];
588 static void domain_update_iommu_coherency(struct dmar_domain *domain)
592 domain->iommu_coherency = 1;
594 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
595 if (!ecap_coherent(g_iommus[i]->ecap)) {
596 domain->iommu_coherency = 0;
602 static void domain_update_iommu_snooping(struct dmar_domain *domain)
606 domain->iommu_snooping = 1;
608 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
609 if (!ecap_sc_support(g_iommus[i]->ecap)) {
610 domain->iommu_snooping = 0;
616 static void domain_update_iommu_superpage(struct dmar_domain *domain)
618 struct dmar_drhd_unit *drhd;
619 struct intel_iommu *iommu = NULL;
622 if (!intel_iommu_superpage) {
623 domain->iommu_superpage = 0;
627 /* set iommu_superpage to the smallest common denominator */
628 for_each_active_iommu(iommu, drhd) {
629 mask &= cap_super_page_val(iommu->cap);
634 domain->iommu_superpage = fls(mask);
637 /* Some capabilities may be different across iommus */
638 static void domain_update_iommu_cap(struct dmar_domain *domain)
640 domain_update_iommu_coherency(domain);
641 domain_update_iommu_snooping(domain);
642 domain_update_iommu_superpage(domain);
645 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
647 struct dmar_drhd_unit *drhd = NULL;
650 for_each_drhd_unit(drhd) {
653 if (segment != drhd->segment)
656 for (i = 0; i < drhd->devices_cnt; i++) {
657 if (drhd->devices[i] &&
658 drhd->devices[i]->bus->number == bus &&
659 drhd->devices[i]->devfn == devfn)
661 if (drhd->devices[i] &&
662 drhd->devices[i]->subordinate &&
663 drhd->devices[i]->subordinate->number <= bus &&
664 drhd->devices[i]->subordinate->subordinate >= bus)
668 if (drhd->include_all)
675 static void domain_flush_cache(struct dmar_domain *domain,
676 void *addr, int size)
678 if (!domain->iommu_coherency)
679 clflush_cache_range(addr, size);
682 /* Gets context entry for a given bus and devfn */
683 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
686 struct root_entry *root;
687 struct context_entry *context;
688 unsigned long phy_addr;
691 spin_lock_irqsave(&iommu->lock, flags);
692 root = &iommu->root_entry[bus];
693 context = get_context_addr_from_root(root);
695 context = (struct context_entry *)
696 alloc_pgtable_page(iommu->node);
698 spin_unlock_irqrestore(&iommu->lock, flags);
701 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
702 phy_addr = virt_to_phys((void *)context);
703 set_root_value(root, phy_addr);
704 set_root_present(root);
705 __iommu_flush_cache(iommu, root, sizeof(*root));
707 spin_unlock_irqrestore(&iommu->lock, flags);
708 return &context[devfn];
711 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
713 struct root_entry *root;
714 struct context_entry *context;
718 spin_lock_irqsave(&iommu->lock, flags);
719 root = &iommu->root_entry[bus];
720 context = get_context_addr_from_root(root);
725 ret = context_present(&context[devfn]);
727 spin_unlock_irqrestore(&iommu->lock, flags);
731 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
733 struct root_entry *root;
734 struct context_entry *context;
737 spin_lock_irqsave(&iommu->lock, flags);
738 root = &iommu->root_entry[bus];
739 context = get_context_addr_from_root(root);
741 context_clear_entry(&context[devfn]);
742 __iommu_flush_cache(iommu, &context[devfn], \
745 spin_unlock_irqrestore(&iommu->lock, flags);
748 static void free_context_table(struct intel_iommu *iommu)
750 struct root_entry *root;
753 struct context_entry *context;
755 spin_lock_irqsave(&iommu->lock, flags);
756 if (!iommu->root_entry) {
759 for (i = 0; i < ROOT_ENTRY_NR; i++) {
760 root = &iommu->root_entry[i];
761 context = get_context_addr_from_root(root);
763 free_pgtable_page(context);
765 free_pgtable_page(iommu->root_entry);
766 iommu->root_entry = NULL;
768 spin_unlock_irqrestore(&iommu->lock, flags);
771 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
772 unsigned long pfn, int target_level)
774 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
775 struct dma_pte *parent, *pte = NULL;
776 int level = agaw_to_level(domain->agaw);
779 BUG_ON(!domain->pgd);
780 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
781 parent = domain->pgd;
786 offset = pfn_level_offset(pfn, level);
787 pte = &parent[offset];
788 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
790 if (level == target_level)
793 if (!dma_pte_present(pte)) {
796 tmp_page = alloc_pgtable_page(domain->nid);
801 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
802 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
803 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
804 /* Someone else set it while we were thinking; use theirs. */
805 free_pgtable_page(tmp_page);
808 domain_flush_cache(domain, pte, sizeof(*pte));
811 parent = phys_to_virt(dma_pte_addr(pte));
819 /* return address's pte at specific level */
820 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
822 int level, int *large_page)
824 struct dma_pte *parent, *pte = NULL;
825 int total = agaw_to_level(domain->agaw);
828 parent = domain->pgd;
829 while (level <= total) {
830 offset = pfn_level_offset(pfn, total);
831 pte = &parent[offset];
835 if (!dma_pte_present(pte)) {
840 if (pte->val & DMA_PTE_LARGE_PAGE) {
845 parent = phys_to_virt(dma_pte_addr(pte));
851 /* clear last level pte, a tlb flush should be followed */
852 static int dma_pte_clear_range(struct dmar_domain *domain,
853 unsigned long start_pfn,
854 unsigned long last_pfn)
856 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
857 unsigned int large_page = 1;
858 struct dma_pte *first_pte, *pte;
861 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
862 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
863 BUG_ON(start_pfn > last_pfn);
865 /* we don't need lock here; nobody else touches the iova range */
868 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
870 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
875 start_pfn += lvl_to_nr_pages(large_page);
877 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
879 domain_flush_cache(domain, first_pte,
880 (void *)pte - (void *)first_pte);
882 } while (start_pfn && start_pfn <= last_pfn);
884 order = (large_page - 1) * 9;
888 /* free page table pages. last level pte should already be cleared */
889 static void dma_pte_free_pagetable(struct dmar_domain *domain,
890 unsigned long start_pfn,
891 unsigned long last_pfn)
893 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
894 struct dma_pte *first_pte, *pte;
895 int total = agaw_to_level(domain->agaw);
900 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
901 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
902 BUG_ON(start_pfn > last_pfn);
904 /* We don't need lock here; nobody else touches the iova range */
906 while (level <= total) {
907 tmp = align_to_level(start_pfn, level);
909 /* If we can't even clear one PTE at this level, we're done */
910 if (tmp + level_size(level) - 1 > last_pfn)
915 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
916 if (large_page > level)
917 level = large_page + 1;
919 tmp = align_to_level(tmp + 1, level + 1);
923 if (dma_pte_present(pte)) {
924 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
928 tmp += level_size(level);
929 } while (!first_pte_in_page(pte) &&
930 tmp + level_size(level) - 1 <= last_pfn);
932 domain_flush_cache(domain, first_pte,
933 (void *)pte - (void *)first_pte);
935 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
939 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
940 free_pgtable_page(domain->pgd);
946 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
948 struct root_entry *root;
951 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
955 __iommu_flush_cache(iommu, root, ROOT_SIZE);
957 spin_lock_irqsave(&iommu->lock, flags);
958 iommu->root_entry = root;
959 spin_unlock_irqrestore(&iommu->lock, flags);
964 static void iommu_set_root_entry(struct intel_iommu *iommu)
970 addr = iommu->root_entry;
972 raw_spin_lock_irqsave(&iommu->register_lock, flag);
973 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
975 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
977 /* Make sure hardware complete it */
978 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
979 readl, (sts & DMA_GSTS_RTPS), sts);
981 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
984 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
989 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
992 raw_spin_lock_irqsave(&iommu->register_lock, flag);
993 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
995 /* Make sure hardware complete it */
996 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
997 readl, (!(val & DMA_GSTS_WBFS)), val);
999 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1002 /* return value determine if we need a write buffer flush */
1003 static void __iommu_flush_context(struct intel_iommu *iommu,
1004 u16 did, u16 source_id, u8 function_mask,
1011 case DMA_CCMD_GLOBAL_INVL:
1012 val = DMA_CCMD_GLOBAL_INVL;
1014 case DMA_CCMD_DOMAIN_INVL:
1015 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1017 case DMA_CCMD_DEVICE_INVL:
1018 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1019 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1024 val |= DMA_CCMD_ICC;
1026 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1027 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1029 /* Make sure hardware complete it */
1030 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1031 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1033 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1036 /* return value determine if we need a write buffer flush */
1037 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1038 u64 addr, unsigned int size_order, u64 type)
1040 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1041 u64 val = 0, val_iva = 0;
1045 case DMA_TLB_GLOBAL_FLUSH:
1046 /* global flush doesn't need set IVA_REG */
1047 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1049 case DMA_TLB_DSI_FLUSH:
1050 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1052 case DMA_TLB_PSI_FLUSH:
1053 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1054 /* Note: always flush non-leaf currently */
1055 val_iva = size_order | addr;
1060 /* Note: set drain read/write */
1063 * This is probably to be super secure.. Looks like we can
1064 * ignore it without any impact.
1066 if (cap_read_drain(iommu->cap))
1067 val |= DMA_TLB_READ_DRAIN;
1069 if (cap_write_drain(iommu->cap))
1070 val |= DMA_TLB_WRITE_DRAIN;
1072 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1073 /* Note: Only uses first TLB reg currently */
1075 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1076 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1078 /* Make sure hardware complete it */
1079 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1080 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1082 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1084 /* check IOTLB invalidation granularity */
1085 if (DMA_TLB_IAIG(val) == 0)
1086 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1087 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1088 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1089 (unsigned long long)DMA_TLB_IIRG(type),
1090 (unsigned long long)DMA_TLB_IAIG(val));
1093 static struct device_domain_info *iommu_support_dev_iotlb(
1094 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1097 unsigned long flags;
1098 struct device_domain_info *info;
1099 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1101 if (!ecap_dev_iotlb_support(iommu->ecap))
1107 spin_lock_irqsave(&device_domain_lock, flags);
1108 list_for_each_entry(info, &domain->devices, link)
1109 if (info->bus == bus && info->devfn == devfn) {
1113 spin_unlock_irqrestore(&device_domain_lock, flags);
1115 if (!found || !info->dev)
1118 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1121 if (!dmar_find_matched_atsr_unit(info->dev))
1124 info->iommu = iommu;
1129 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1134 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1137 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1139 if (!info->dev || !pci_ats_enabled(info->dev))
1142 pci_disable_ats(info->dev);
1145 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1146 u64 addr, unsigned mask)
1149 unsigned long flags;
1150 struct device_domain_info *info;
1152 spin_lock_irqsave(&device_domain_lock, flags);
1153 list_for_each_entry(info, &domain->devices, link) {
1154 if (!info->dev || !pci_ats_enabled(info->dev))
1157 sid = info->bus << 8 | info->devfn;
1158 qdep = pci_ats_queue_depth(info->dev);
1159 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1161 spin_unlock_irqrestore(&device_domain_lock, flags);
1164 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1165 unsigned long pfn, unsigned int pages, int map)
1167 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1168 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1173 * Fallback to domain selective flush if no PSI support or the size is
1175 * PSI requires page size to be 2 ^ x, and the base address is naturally
1176 * aligned to the size
1178 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1179 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1182 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1186 * In caching mode, changes of pages from non-present to present require
1187 * flush. However, device IOTLB doesn't need to be flushed in this case.
1189 if (!cap_caching_mode(iommu->cap) || !map)
1190 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1193 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1196 unsigned long flags;
1198 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1199 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1200 pmen &= ~DMA_PMEN_EPM;
1201 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1203 /* wait for the protected region status bit to clear */
1204 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1205 readl, !(pmen & DMA_PMEN_PRS), pmen);
1207 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1210 static int iommu_enable_translation(struct intel_iommu *iommu)
1213 unsigned long flags;
1215 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1216 iommu->gcmd |= DMA_GCMD_TE;
1217 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1219 /* Make sure hardware complete it */
1220 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1221 readl, (sts & DMA_GSTS_TES), sts);
1223 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1227 static int iommu_disable_translation(struct intel_iommu *iommu)
1232 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1233 iommu->gcmd &= ~DMA_GCMD_TE;
1234 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1236 /* Make sure hardware complete it */
1237 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1238 readl, (!(sts & DMA_GSTS_TES)), sts);
1240 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1245 static int iommu_init_domains(struct intel_iommu *iommu)
1247 unsigned long ndomains;
1248 unsigned long nlongs;
1250 ndomains = cap_ndoms(iommu->cap);
1251 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
1253 nlongs = BITS_TO_LONGS(ndomains);
1255 spin_lock_init(&iommu->lock);
1257 /* TBD: there might be 64K domains,
1258 * consider other allocation for future chip
1260 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1261 if (!iommu->domain_ids) {
1262 printk(KERN_ERR "Allocating domain id array failed\n");
1265 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1267 if (!iommu->domains) {
1268 printk(KERN_ERR "Allocating domain array failed\n");
1273 * if Caching mode is set, then invalid translations are tagged
1274 * with domainid 0. Hence we need to pre-allocate it.
1276 if (cap_caching_mode(iommu->cap))
1277 set_bit(0, iommu->domain_ids);
1282 static void domain_exit(struct dmar_domain *domain);
1283 static void vm_domain_exit(struct dmar_domain *domain);
1285 void free_dmar_iommu(struct intel_iommu *iommu)
1287 struct dmar_domain *domain;
1289 unsigned long flags;
1291 if ((iommu->domains) && (iommu->domain_ids)) {
1292 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1293 domain = iommu->domains[i];
1294 clear_bit(i, iommu->domain_ids);
1296 spin_lock_irqsave(&domain->iommu_lock, flags);
1297 if (--domain->iommu_count == 0) {
1298 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1299 vm_domain_exit(domain);
1301 domain_exit(domain);
1303 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1307 if (iommu->gcmd & DMA_GCMD_TE)
1308 iommu_disable_translation(iommu);
1311 irq_set_handler_data(iommu->irq, NULL);
1312 /* This will mask the irq */
1313 free_irq(iommu->irq, iommu);
1314 destroy_irq(iommu->irq);
1317 kfree(iommu->domains);
1318 kfree(iommu->domain_ids);
1320 g_iommus[iommu->seq_id] = NULL;
1322 /* if all iommus are freed, free g_iommus */
1323 for (i = 0; i < g_num_of_iommus; i++) {
1328 if (i == g_num_of_iommus)
1331 /* free context mapping */
1332 free_context_table(iommu);
1335 static struct dmar_domain *alloc_domain(void)
1337 struct dmar_domain *domain;
1339 domain = alloc_domain_mem();
1344 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1350 static int iommu_attach_domain(struct dmar_domain *domain,
1351 struct intel_iommu *iommu)
1354 unsigned long ndomains;
1355 unsigned long flags;
1357 ndomains = cap_ndoms(iommu->cap);
1359 spin_lock_irqsave(&iommu->lock, flags);
1361 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1362 if (num >= ndomains) {
1363 spin_unlock_irqrestore(&iommu->lock, flags);
1364 printk(KERN_ERR "IOMMU: no free domain ids\n");
1369 set_bit(num, iommu->domain_ids);
1370 set_bit(iommu->seq_id, domain->iommu_bmp);
1371 iommu->domains[num] = domain;
1372 spin_unlock_irqrestore(&iommu->lock, flags);
1377 static void iommu_detach_domain(struct dmar_domain *domain,
1378 struct intel_iommu *iommu)
1380 unsigned long flags;
1384 spin_lock_irqsave(&iommu->lock, flags);
1385 ndomains = cap_ndoms(iommu->cap);
1386 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1387 if (iommu->domains[num] == domain) {
1394 clear_bit(num, iommu->domain_ids);
1395 clear_bit(iommu->seq_id, domain->iommu_bmp);
1396 iommu->domains[num] = NULL;
1398 spin_unlock_irqrestore(&iommu->lock, flags);
1401 static struct iova_domain reserved_iova_list;
1402 static struct lock_class_key reserved_rbtree_key;
1404 static int dmar_init_reserved_ranges(void)
1406 struct pci_dev *pdev = NULL;
1410 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1412 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1413 &reserved_rbtree_key);
1415 /* IOAPIC ranges shouldn't be accessed by DMA */
1416 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1417 IOVA_PFN(IOAPIC_RANGE_END));
1419 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1423 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1424 for_each_pci_dev(pdev) {
1427 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1428 r = &pdev->resource[i];
1429 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1431 iova = reserve_iova(&reserved_iova_list,
1435 printk(KERN_ERR "Reserve iova failed\n");
1443 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1445 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1448 static inline int guestwidth_to_adjustwidth(int gaw)
1451 int r = (gaw - 12) % 9;
1462 static int domain_init(struct dmar_domain *domain, int guest_width)
1464 struct intel_iommu *iommu;
1465 int adjust_width, agaw;
1466 unsigned long sagaw;
1468 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1469 spin_lock_init(&domain->iommu_lock);
1471 domain_reserve_special_ranges(domain);
1473 /* calculate AGAW */
1474 iommu = domain_get_iommu(domain);
1475 if (guest_width > cap_mgaw(iommu->cap))
1476 guest_width = cap_mgaw(iommu->cap);
1477 domain->gaw = guest_width;
1478 adjust_width = guestwidth_to_adjustwidth(guest_width);
1479 agaw = width_to_agaw(adjust_width);
1480 sagaw = cap_sagaw(iommu->cap);
1481 if (!test_bit(agaw, &sagaw)) {
1482 /* hardware doesn't support it, choose a bigger one */
1483 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1484 agaw = find_next_bit(&sagaw, 5, agaw);
1488 domain->agaw = agaw;
1489 INIT_LIST_HEAD(&domain->devices);
1491 if (ecap_coherent(iommu->ecap))
1492 domain->iommu_coherency = 1;
1494 domain->iommu_coherency = 0;
1496 if (ecap_sc_support(iommu->ecap))
1497 domain->iommu_snooping = 1;
1499 domain->iommu_snooping = 0;
1501 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1502 domain->iommu_count = 1;
1503 domain->nid = iommu->node;
1505 /* always allocate the top pgd */
1506 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1509 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1513 static void domain_exit(struct dmar_domain *domain)
1515 struct dmar_drhd_unit *drhd;
1516 struct intel_iommu *iommu;
1518 /* Domain 0 is reserved, so dont process it */
1522 /* Flush any lazy unmaps that may reference this domain */
1523 if (!intel_iommu_strict)
1524 flush_unmaps_timeout(0);
1526 domain_remove_dev_info(domain);
1528 put_iova_domain(&domain->iovad);
1531 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1533 /* free page tables */
1534 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1536 for_each_active_iommu(iommu, drhd)
1537 if (test_bit(iommu->seq_id, domain->iommu_bmp))
1538 iommu_detach_domain(domain, iommu);
1540 free_domain_mem(domain);
1543 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1544 u8 bus, u8 devfn, int translation)
1546 struct context_entry *context;
1547 unsigned long flags;
1548 struct intel_iommu *iommu;
1549 struct dma_pte *pgd;
1551 unsigned long ndomains;
1554 struct device_domain_info *info = NULL;
1556 pr_debug("Set context mapping for %02x:%02x.%d\n",
1557 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1559 BUG_ON(!domain->pgd);
1560 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1561 translation != CONTEXT_TT_MULTI_LEVEL);
1563 iommu = device_to_iommu(segment, bus, devfn);
1567 context = device_to_context_entry(iommu, bus, devfn);
1570 spin_lock_irqsave(&iommu->lock, flags);
1571 if (context_present(context)) {
1572 spin_unlock_irqrestore(&iommu->lock, flags);
1579 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1580 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1583 /* find an available domain id for this device in iommu */
1584 ndomains = cap_ndoms(iommu->cap);
1585 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1586 if (iommu->domains[num] == domain) {
1594 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1595 if (num >= ndomains) {
1596 spin_unlock_irqrestore(&iommu->lock, flags);
1597 printk(KERN_ERR "IOMMU: no free domain ids\n");
1601 set_bit(num, iommu->domain_ids);
1602 iommu->domains[num] = domain;
1606 /* Skip top levels of page tables for
1607 * iommu which has less agaw than default.
1608 * Unnecessary for PT mode.
1610 if (translation != CONTEXT_TT_PASS_THROUGH) {
1611 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1612 pgd = phys_to_virt(dma_pte_addr(pgd));
1613 if (!dma_pte_present(pgd)) {
1614 spin_unlock_irqrestore(&iommu->lock, flags);
1621 context_set_domain_id(context, id);
1623 if (translation != CONTEXT_TT_PASS_THROUGH) {
1624 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1625 translation = info ? CONTEXT_TT_DEV_IOTLB :
1626 CONTEXT_TT_MULTI_LEVEL;
1629 * In pass through mode, AW must be programmed to indicate the largest
1630 * AGAW value supported by hardware. And ASR is ignored by hardware.
1632 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1633 context_set_address_width(context, iommu->msagaw);
1635 context_set_address_root(context, virt_to_phys(pgd));
1636 context_set_address_width(context, iommu->agaw);
1639 context_set_translation_type(context, translation);
1640 context_set_fault_enable(context);
1641 context_set_present(context);
1642 domain_flush_cache(domain, context, sizeof(*context));
1645 * It's a non-present to present mapping. If hardware doesn't cache
1646 * non-present entry we only need to flush the write-buffer. If the
1647 * _does_ cache non-present entries, then it does so in the special
1648 * domain #0, which we have to flush:
1650 if (cap_caching_mode(iommu->cap)) {
1651 iommu->flush.flush_context(iommu, 0,
1652 (((u16)bus) << 8) | devfn,
1653 DMA_CCMD_MASK_NOBIT,
1654 DMA_CCMD_DEVICE_INVL);
1655 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1657 iommu_flush_write_buffer(iommu);
1659 iommu_enable_dev_iotlb(info);
1660 spin_unlock_irqrestore(&iommu->lock, flags);
1662 spin_lock_irqsave(&domain->iommu_lock, flags);
1663 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1664 domain->iommu_count++;
1665 if (domain->iommu_count == 1)
1666 domain->nid = iommu->node;
1667 domain_update_iommu_cap(domain);
1669 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1674 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1678 struct pci_dev *tmp, *parent;
1680 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1681 pdev->bus->number, pdev->devfn,
1686 /* dependent device mapping */
1687 tmp = pci_find_upstream_pcie_bridge(pdev);
1690 /* Secondary interface's bus number and devfn 0 */
1691 parent = pdev->bus->self;
1692 while (parent != tmp) {
1693 ret = domain_context_mapping_one(domain,
1694 pci_domain_nr(parent->bus),
1695 parent->bus->number,
1696 parent->devfn, translation);
1699 parent = parent->bus->self;
1701 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1702 return domain_context_mapping_one(domain,
1703 pci_domain_nr(tmp->subordinate),
1704 tmp->subordinate->number, 0,
1706 else /* this is a legacy PCI bridge */
1707 return domain_context_mapping_one(domain,
1708 pci_domain_nr(tmp->bus),
1714 static int domain_context_mapped(struct pci_dev *pdev)
1717 struct pci_dev *tmp, *parent;
1718 struct intel_iommu *iommu;
1720 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1725 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1728 /* dependent device mapping */
1729 tmp = pci_find_upstream_pcie_bridge(pdev);
1732 /* Secondary interface's bus number and devfn 0 */
1733 parent = pdev->bus->self;
1734 while (parent != tmp) {
1735 ret = device_context_mapped(iommu, parent->bus->number,
1739 parent = parent->bus->self;
1741 if (pci_is_pcie(tmp))
1742 return device_context_mapped(iommu, tmp->subordinate->number,
1745 return device_context_mapped(iommu, tmp->bus->number,
1749 /* Returns a number of VTD pages, but aligned to MM page size */
1750 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1753 host_addr &= ~PAGE_MASK;
1754 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1757 /* Return largest possible superpage level for a given mapping */
1758 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1759 unsigned long iov_pfn,
1760 unsigned long phy_pfn,
1761 unsigned long pages)
1763 int support, level = 1;
1764 unsigned long pfnmerge;
1766 support = domain->iommu_superpage;
1768 /* To use a large page, the virtual *and* physical addresses
1769 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1770 of them will mean we have to use smaller pages. So just
1771 merge them and check both at once. */
1772 pfnmerge = iov_pfn | phy_pfn;
1774 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1775 pages >>= VTD_STRIDE_SHIFT;
1778 pfnmerge >>= VTD_STRIDE_SHIFT;
1785 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1786 struct scatterlist *sg, unsigned long phys_pfn,
1787 unsigned long nr_pages, int prot)
1789 struct dma_pte *first_pte = NULL, *pte = NULL;
1790 phys_addr_t uninitialized_var(pteval);
1791 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1792 unsigned long sg_res;
1793 unsigned int largepage_lvl = 0;
1794 unsigned long lvl_pages = 0;
1796 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1798 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1801 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1806 sg_res = nr_pages + 1;
1807 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1810 while (nr_pages > 0) {
1814 sg_res = aligned_nrpages(sg->offset, sg->length);
1815 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1816 sg->dma_length = sg->length;
1817 pteval = page_to_phys(sg_page(sg)) | prot;
1818 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1822 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1824 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1827 /* It is large page*/
1828 if (largepage_lvl > 1)
1829 pteval |= DMA_PTE_LARGE_PAGE;
1831 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1834 /* We don't need lock here, nobody else
1835 * touches the iova range
1837 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1839 static int dumps = 5;
1840 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1841 iov_pfn, tmp, (unsigned long long)pteval);
1844 debug_dma_dump_mappings(NULL);
1849 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1851 BUG_ON(nr_pages < lvl_pages);
1852 BUG_ON(sg_res < lvl_pages);
1854 nr_pages -= lvl_pages;
1855 iov_pfn += lvl_pages;
1856 phys_pfn += lvl_pages;
1857 pteval += lvl_pages * VTD_PAGE_SIZE;
1858 sg_res -= lvl_pages;
1860 /* If the next PTE would be the first in a new page, then we
1861 need to flush the cache on the entries we've just written.
1862 And then we'll need to recalculate 'pte', so clear it and
1863 let it get set again in the if (!pte) block above.
1865 If we're done (!nr_pages) we need to flush the cache too.
1867 Also if we've been setting superpages, we may need to
1868 recalculate 'pte' and switch back to smaller pages for the
1869 end of the mapping, if the trailing size is not enough to
1870 use another superpage (i.e. sg_res < lvl_pages). */
1872 if (!nr_pages || first_pte_in_page(pte) ||
1873 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1874 domain_flush_cache(domain, first_pte,
1875 (void *)pte - (void *)first_pte);
1879 if (!sg_res && nr_pages)
1885 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1886 struct scatterlist *sg, unsigned long nr_pages,
1889 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1892 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1893 unsigned long phys_pfn, unsigned long nr_pages,
1896 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1899 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1904 clear_context_table(iommu, bus, devfn);
1905 iommu->flush.flush_context(iommu, 0, 0, 0,
1906 DMA_CCMD_GLOBAL_INVL);
1907 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1910 static void domain_remove_dev_info(struct dmar_domain *domain)
1912 struct device_domain_info *info;
1913 unsigned long flags;
1914 struct intel_iommu *iommu;
1916 spin_lock_irqsave(&device_domain_lock, flags);
1917 while (!list_empty(&domain->devices)) {
1918 info = list_entry(domain->devices.next,
1919 struct device_domain_info, link);
1920 list_del(&info->link);
1921 list_del(&info->global);
1923 info->dev->dev.archdata.iommu = NULL;
1924 spin_unlock_irqrestore(&device_domain_lock, flags);
1926 iommu_disable_dev_iotlb(info);
1927 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1928 iommu_detach_dev(iommu, info->bus, info->devfn);
1929 free_devinfo_mem(info);
1931 spin_lock_irqsave(&device_domain_lock, flags);
1933 spin_unlock_irqrestore(&device_domain_lock, flags);
1938 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1940 static struct dmar_domain *
1941 find_domain(struct pci_dev *pdev)
1943 struct device_domain_info *info;
1945 /* No lock here, assumes no domain exit in normal case */
1946 info = pdev->dev.archdata.iommu;
1948 return info->domain;
1952 /* domain is initialized */
1953 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1955 struct dmar_domain *domain, *found = NULL;
1956 struct intel_iommu *iommu;
1957 struct dmar_drhd_unit *drhd;
1958 struct device_domain_info *info, *tmp;
1959 struct pci_dev *dev_tmp;
1960 unsigned long flags;
1961 int bus = 0, devfn = 0;
1965 domain = find_domain(pdev);
1969 segment = pci_domain_nr(pdev->bus);
1971 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1973 if (pci_is_pcie(dev_tmp)) {
1974 bus = dev_tmp->subordinate->number;
1977 bus = dev_tmp->bus->number;
1978 devfn = dev_tmp->devfn;
1980 spin_lock_irqsave(&device_domain_lock, flags);
1981 list_for_each_entry(info, &device_domain_list, global) {
1982 if (info->segment == segment &&
1983 info->bus == bus && info->devfn == devfn) {
1984 found = info->domain;
1988 spin_unlock_irqrestore(&device_domain_lock, flags);
1989 /* pcie-pci bridge already has a domain, uses it */
1996 domain = alloc_domain();
2000 /* Allocate new domain for the device */
2001 drhd = dmar_find_matched_drhd_unit(pdev);
2003 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2007 iommu = drhd->iommu;
2009 ret = iommu_attach_domain(domain, iommu);
2011 free_domain_mem(domain);
2015 if (domain_init(domain, gaw)) {
2016 domain_exit(domain);
2020 /* register pcie-to-pci device */
2022 info = alloc_devinfo_mem();
2024 domain_exit(domain);
2027 info->segment = segment;
2029 info->devfn = devfn;
2031 info->domain = domain;
2032 /* This domain is shared by devices under p2p bridge */
2033 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2035 /* pcie-to-pci bridge already has a domain, uses it */
2037 spin_lock_irqsave(&device_domain_lock, flags);
2038 list_for_each_entry(tmp, &device_domain_list, global) {
2039 if (tmp->segment == segment &&
2040 tmp->bus == bus && tmp->devfn == devfn) {
2041 found = tmp->domain;
2046 spin_unlock_irqrestore(&device_domain_lock, flags);
2047 free_devinfo_mem(info);
2048 domain_exit(domain);
2051 list_add(&info->link, &domain->devices);
2052 list_add(&info->global, &device_domain_list);
2053 spin_unlock_irqrestore(&device_domain_lock, flags);
2058 info = alloc_devinfo_mem();
2061 info->segment = segment;
2062 info->bus = pdev->bus->number;
2063 info->devfn = pdev->devfn;
2065 info->domain = domain;
2066 spin_lock_irqsave(&device_domain_lock, flags);
2067 /* somebody is fast */
2068 found = find_domain(pdev);
2069 if (found != NULL) {
2070 spin_unlock_irqrestore(&device_domain_lock, flags);
2071 if (found != domain) {
2072 domain_exit(domain);
2075 free_devinfo_mem(info);
2078 list_add(&info->link, &domain->devices);
2079 list_add(&info->global, &device_domain_list);
2080 pdev->dev.archdata.iommu = info;
2081 spin_unlock_irqrestore(&device_domain_lock, flags);
2084 /* recheck it here, maybe others set it */
2085 return find_domain(pdev);
2088 static int iommu_identity_mapping;
2089 #define IDENTMAP_ALL 1
2090 #define IDENTMAP_GFX 2
2091 #define IDENTMAP_AZALIA 4
2093 static int iommu_domain_identity_map(struct dmar_domain *domain,
2094 unsigned long long start,
2095 unsigned long long end)
2097 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2098 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2100 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2101 dma_to_mm_pfn(last_vpfn))) {
2102 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2106 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2107 start, end, domain->id);
2109 * RMRR range might have overlap with physical memory range,
2112 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2114 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2115 last_vpfn - first_vpfn + 1,
2116 DMA_PTE_READ|DMA_PTE_WRITE);
2119 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2120 unsigned long long start,
2121 unsigned long long end)
2123 struct dmar_domain *domain;
2126 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2130 /* For _hardware_ passthrough, don't bother. But for software
2131 passthrough, we do it anyway -- it may indicate a memory
2132 range which is reserved in E820, so which didn't get set
2133 up to start with in si_domain */
2134 if (domain == si_domain && hw_pass_through) {
2135 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2136 pci_name(pdev), start, end);
2141 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2142 pci_name(pdev), start, end);
2145 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2146 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2147 dmi_get_system_info(DMI_BIOS_VENDOR),
2148 dmi_get_system_info(DMI_BIOS_VERSION),
2149 dmi_get_system_info(DMI_PRODUCT_VERSION));
2154 if (end >> agaw_to_width(domain->agaw)) {
2155 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2156 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2157 agaw_to_width(domain->agaw),
2158 dmi_get_system_info(DMI_BIOS_VENDOR),
2159 dmi_get_system_info(DMI_BIOS_VERSION),
2160 dmi_get_system_info(DMI_PRODUCT_VERSION));
2165 ret = iommu_domain_identity_map(domain, start, end);
2169 /* context entry init */
2170 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2177 domain_exit(domain);
2181 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2182 struct pci_dev *pdev)
2184 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2186 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2190 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2191 static inline void iommu_prepare_isa(void)
2193 struct pci_dev *pdev;
2196 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2200 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2201 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2204 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2205 "floppy might not work\n");
2209 static inline void iommu_prepare_isa(void)
2213 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2215 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2217 static int __init si_domain_init(int hw)
2219 struct dmar_drhd_unit *drhd;
2220 struct intel_iommu *iommu;
2223 si_domain = alloc_domain();
2227 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2229 for_each_active_iommu(iommu, drhd) {
2230 ret = iommu_attach_domain(si_domain, iommu);
2232 domain_exit(si_domain);
2237 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2238 domain_exit(si_domain);
2242 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2247 for_each_online_node(nid) {
2248 unsigned long start_pfn, end_pfn;
2251 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2252 ret = iommu_domain_identity_map(si_domain,
2253 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2262 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2263 struct pci_dev *pdev);
2264 static int identity_mapping(struct pci_dev *pdev)
2266 struct device_domain_info *info;
2268 if (likely(!iommu_identity_mapping))
2271 info = pdev->dev.archdata.iommu;
2272 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2273 return (info->domain == si_domain);
2278 static int domain_add_dev_info(struct dmar_domain *domain,
2279 struct pci_dev *pdev,
2282 struct device_domain_info *info;
2283 unsigned long flags;
2286 info = alloc_devinfo_mem();
2290 ret = domain_context_mapping(domain, pdev, translation);
2292 free_devinfo_mem(info);
2296 info->segment = pci_domain_nr(pdev->bus);
2297 info->bus = pdev->bus->number;
2298 info->devfn = pdev->devfn;
2300 info->domain = domain;
2302 spin_lock_irqsave(&device_domain_lock, flags);
2303 list_add(&info->link, &domain->devices);
2304 list_add(&info->global, &device_domain_list);
2305 pdev->dev.archdata.iommu = info;
2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2311 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2313 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2316 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2319 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2323 * We want to start off with all devices in the 1:1 domain, and
2324 * take them out later if we find they can't access all of memory.
2326 * However, we can't do this for PCI devices behind bridges,
2327 * because all PCI devices behind the same bridge will end up
2328 * with the same source-id on their transactions.
2330 * Practically speaking, we can't change things around for these
2331 * devices at run-time, because we can't be sure there'll be no
2332 * DMA transactions in flight for any of their siblings.
2334 * So PCI devices (unless they're on the root bus) as well as
2335 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2336 * the 1:1 domain, just in _case_ one of their siblings turns out
2337 * not to be able to map all of memory.
2339 if (!pci_is_pcie(pdev)) {
2340 if (!pci_is_root_bus(pdev->bus))
2342 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2344 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2348 * At boot time, we don't yet know if devices will be 64-bit capable.
2349 * Assume that they will -- if they turn out not to be, then we can
2350 * take them out of the 1:1 domain later.
2354 * If the device's dma_mask is less than the system's memory
2355 * size then this is not a candidate for identity mapping.
2357 u64 dma_mask = pdev->dma_mask;
2359 if (pdev->dev.coherent_dma_mask &&
2360 pdev->dev.coherent_dma_mask < dma_mask)
2361 dma_mask = pdev->dev.coherent_dma_mask;
2363 return dma_mask >= dma_get_required_mask(&pdev->dev);
2369 static int __init iommu_prepare_static_identity_mapping(int hw)
2371 struct pci_dev *pdev = NULL;
2374 ret = si_domain_init(hw);
2378 for_each_pci_dev(pdev) {
2379 if (iommu_should_identity_map(pdev, 1)) {
2380 ret = domain_add_dev_info(si_domain, pdev,
2381 hw ? CONTEXT_TT_PASS_THROUGH :
2382 CONTEXT_TT_MULTI_LEVEL);
2384 /* device not associated with an iommu */
2389 pr_info("IOMMU: %s identity mapping for device %s\n",
2390 hw ? "hardware" : "software", pci_name(pdev));
2397 static int __init init_dmars(void)
2399 struct dmar_drhd_unit *drhd;
2400 struct dmar_rmrr_unit *rmrr;
2401 struct pci_dev *pdev;
2402 struct intel_iommu *iommu;
2408 * initialize and program root entry to not present
2411 for_each_drhd_unit(drhd) {
2413 * lock not needed as this is only incremented in the single
2414 * threaded kernel __init code path all other access are read
2417 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2421 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2422 IOMMU_UNITS_SUPPORTED);
2425 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2428 printk(KERN_ERR "Allocating global iommu array failed\n");
2433 deferred_flush = kzalloc(g_num_of_iommus *
2434 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2435 if (!deferred_flush) {
2440 for_each_drhd_unit(drhd) {
2444 iommu = drhd->iommu;
2445 g_iommus[iommu->seq_id] = iommu;
2447 ret = iommu_init_domains(iommu);
2453 * we could share the same root & context tables
2454 * among all IOMMU's. Need to Split it later.
2456 ret = iommu_alloc_root_entry(iommu);
2458 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2461 if (!ecap_pass_through(iommu->ecap))
2462 hw_pass_through = 0;
2466 * Start from the sane iommu hardware state.
2468 for_each_drhd_unit(drhd) {
2472 iommu = drhd->iommu;
2475 * If the queued invalidation is already initialized by us
2476 * (for example, while enabling interrupt-remapping) then
2477 * we got the things already rolling from a sane state.
2483 * Clear any previous faults.
2485 dmar_fault(-1, iommu);
2487 * Disable queued invalidation if supported and already enabled
2488 * before OS handover.
2490 dmar_disable_qi(iommu);
2493 for_each_drhd_unit(drhd) {
2497 iommu = drhd->iommu;
2499 if (dmar_enable_qi(iommu)) {
2501 * Queued Invalidate not enabled, use Register Based
2504 iommu->flush.flush_context = __iommu_flush_context;
2505 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2506 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2509 (unsigned long long)drhd->reg_base_addr);
2511 iommu->flush.flush_context = qi_flush_context;
2512 iommu->flush.flush_iotlb = qi_flush_iotlb;
2513 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2516 (unsigned long long)drhd->reg_base_addr);
2520 if (iommu_pass_through)
2521 iommu_identity_mapping |= IDENTMAP_ALL;
2523 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2524 iommu_identity_mapping |= IDENTMAP_GFX;
2527 check_tylersburg_isoch();
2530 * If pass through is not set or not enabled, setup context entries for
2531 * identity mappings for rmrr, gfx, and isa and may fall back to static
2532 * identity mapping if iommu_identity_mapping is set.
2534 if (iommu_identity_mapping) {
2535 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2537 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2543 * for each dev attached to rmrr
2545 * locate drhd for dev, alloc domain for dev
2546 * allocate free domain
2547 * allocate page table entries for rmrr
2548 * if context not allocated for bus
2549 * allocate and init context
2550 * set present in root table for this bus
2551 * init context with domain, translation etc
2555 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2556 for_each_rmrr_units(rmrr) {
2557 for (i = 0; i < rmrr->devices_cnt; i++) {
2558 pdev = rmrr->devices[i];
2560 * some BIOS lists non-exist devices in DMAR
2565 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2568 "IOMMU: mapping reserved region failed\n");
2572 iommu_prepare_isa();
2577 * global invalidate context cache
2578 * global invalidate iotlb
2579 * enable translation
2581 for_each_drhd_unit(drhd) {
2582 if (drhd->ignored) {
2584 * we always have to disable PMRs or DMA may fail on
2588 iommu_disable_protect_mem_regions(drhd->iommu);
2591 iommu = drhd->iommu;
2593 iommu_flush_write_buffer(iommu);
2595 ret = dmar_set_interrupt(iommu);
2599 iommu_set_root_entry(iommu);
2601 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2602 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2604 ret = iommu_enable_translation(iommu);
2608 iommu_disable_protect_mem_regions(iommu);
2613 for_each_drhd_unit(drhd) {
2616 iommu = drhd->iommu;
2623 /* This takes a number of _MM_ pages, not VTD pages */
2624 static struct iova *intel_alloc_iova(struct device *dev,
2625 struct dmar_domain *domain,
2626 unsigned long nrpages, uint64_t dma_mask)
2628 struct pci_dev *pdev = to_pci_dev(dev);
2629 struct iova *iova = NULL;
2631 /* Restrict dma_mask to the width that the iommu can handle */
2632 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2634 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2636 * First try to allocate an io virtual address in
2637 * DMA_BIT_MASK(32) and if that fails then try allocating
2640 iova = alloc_iova(&domain->iovad, nrpages,
2641 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2645 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2646 if (unlikely(!iova)) {
2647 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2648 nrpages, pci_name(pdev));
2655 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2657 struct dmar_domain *domain;
2660 domain = get_domain_for_dev(pdev,
2661 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2664 "Allocating domain for %s failed", pci_name(pdev));
2668 /* make sure context mapping is ok */
2669 if (unlikely(!domain_context_mapped(pdev))) {
2670 ret = domain_context_mapping(domain, pdev,
2671 CONTEXT_TT_MULTI_LEVEL);
2674 "Domain context map for %s failed",
2683 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2685 struct device_domain_info *info;
2687 /* No lock here, assumes no domain exit in normal case */
2688 info = dev->dev.archdata.iommu;
2690 return info->domain;
2692 return __get_valid_domain_for_dev(dev);
2695 static int iommu_dummy(struct pci_dev *pdev)
2697 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2700 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2701 static int iommu_no_mapping(struct device *dev)
2703 struct pci_dev *pdev;
2706 if (unlikely(dev->bus != &pci_bus_type))
2709 pdev = to_pci_dev(dev);
2710 if (iommu_dummy(pdev))
2713 if (!iommu_identity_mapping)
2716 found = identity_mapping(pdev);
2718 if (iommu_should_identity_map(pdev, 0))
2722 * 32 bit DMA is removed from si_domain and fall back
2723 * to non-identity mapping.
2725 domain_remove_one_dev_info(si_domain, pdev);
2726 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2732 * In case of a detached 64 bit DMA device from vm, the device
2733 * is put into si_domain for identity mapping.
2735 if (iommu_should_identity_map(pdev, 0)) {
2737 ret = domain_add_dev_info(si_domain, pdev,
2739 CONTEXT_TT_PASS_THROUGH :
2740 CONTEXT_TT_MULTI_LEVEL);
2742 printk(KERN_INFO "64bit %s uses identity mapping\n",
2752 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2753 size_t size, int dir, u64 dma_mask)
2755 struct pci_dev *pdev = to_pci_dev(hwdev);
2756 struct dmar_domain *domain;
2757 phys_addr_t start_paddr;
2761 struct intel_iommu *iommu;
2762 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2764 BUG_ON(dir == DMA_NONE);
2766 if (iommu_no_mapping(hwdev))
2769 domain = get_valid_domain_for_dev(pdev);
2773 iommu = domain_get_iommu(domain);
2774 size = aligned_nrpages(paddr, size);
2776 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2781 * Check if DMAR supports zero-length reads on write only
2784 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2785 !cap_zlr(iommu->cap))
2786 prot |= DMA_PTE_READ;
2787 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2788 prot |= DMA_PTE_WRITE;
2790 * paddr - (paddr + size) might be partial page, we should map the whole
2791 * page. Note: if two part of one page are separately mapped, we
2792 * might have two guest_addr mapping to the same host paddr, but this
2793 * is not a big problem
2795 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2796 mm_to_dma_pfn(paddr_pfn), size, prot);
2800 /* it's a non-present to present mapping. Only flush if caching mode */
2801 if (cap_caching_mode(iommu->cap))
2802 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2804 iommu_flush_write_buffer(iommu);
2806 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2807 start_paddr += paddr & ~PAGE_MASK;
2812 __free_iova(&domain->iovad, iova);
2813 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2814 pci_name(pdev), size, (unsigned long long)paddr, dir);
2818 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2819 unsigned long offset, size_t size,
2820 enum dma_data_direction dir,
2821 struct dma_attrs *attrs)
2823 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2824 dir, to_pci_dev(dev)->dma_mask);
2827 static void flush_unmaps(void)
2833 /* just flush them all */
2834 for (i = 0; i < g_num_of_iommus; i++) {
2835 struct intel_iommu *iommu = g_iommus[i];
2839 if (!deferred_flush[i].next)
2842 /* In caching mode, global flushes turn emulation expensive */
2843 if (!cap_caching_mode(iommu->cap))
2844 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2845 DMA_TLB_GLOBAL_FLUSH);
2846 for (j = 0; j < deferred_flush[i].next; j++) {
2848 struct iova *iova = deferred_flush[i].iova[j];
2849 struct dmar_domain *domain = deferred_flush[i].domain[j];
2851 /* On real hardware multiple invalidations are expensive */
2852 if (cap_caching_mode(iommu->cap))
2853 iommu_flush_iotlb_psi(iommu, domain->id,
2854 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2856 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2857 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2858 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2860 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2862 deferred_flush[i].next = 0;
2868 static void flush_unmaps_timeout(unsigned long data)
2870 unsigned long flags;
2872 spin_lock_irqsave(&async_umap_flush_lock, flags);
2874 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2877 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2879 unsigned long flags;
2881 struct intel_iommu *iommu;
2883 spin_lock_irqsave(&async_umap_flush_lock, flags);
2884 if (list_size == HIGH_WATER_MARK)
2887 iommu = domain_get_iommu(dom);
2888 iommu_id = iommu->seq_id;
2890 next = deferred_flush[iommu_id].next;
2891 deferred_flush[iommu_id].domain[next] = dom;
2892 deferred_flush[iommu_id].iova[next] = iova;
2893 deferred_flush[iommu_id].next++;
2896 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2900 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2903 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2904 size_t size, enum dma_data_direction dir,
2905 struct dma_attrs *attrs)
2907 struct pci_dev *pdev = to_pci_dev(dev);
2908 struct dmar_domain *domain;
2909 unsigned long start_pfn, last_pfn;
2911 struct intel_iommu *iommu;
2913 if (iommu_no_mapping(dev))
2916 domain = find_domain(pdev);
2919 iommu = domain_get_iommu(domain);
2921 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2922 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2923 (unsigned long long)dev_addr))
2926 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2927 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2929 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2930 pci_name(pdev), start_pfn, last_pfn);
2932 /* clear the whole page */
2933 dma_pte_clear_range(domain, start_pfn, last_pfn);
2935 /* free page tables */
2936 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2938 if (intel_iommu_strict) {
2939 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2940 last_pfn - start_pfn + 1, 0);
2942 __free_iova(&domain->iovad, iova);
2944 add_unmap(domain, iova);
2946 * queue up the release of the unmap to save the 1/6th of the
2947 * cpu used up by the iotlb flush operation...
2952 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2953 dma_addr_t *dma_handle, gfp_t flags,
2954 struct dma_attrs *attrs)
2959 size = PAGE_ALIGN(size);
2960 order = get_order(size);
2962 if (!iommu_no_mapping(hwdev))
2963 flags &= ~(GFP_DMA | GFP_DMA32);
2964 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2965 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2971 vaddr = (void *)__get_free_pages(flags, order);
2974 memset(vaddr, 0, size);
2976 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2978 hwdev->coherent_dma_mask);
2981 free_pages((unsigned long)vaddr, order);
2985 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2986 dma_addr_t dma_handle, struct dma_attrs *attrs)
2990 size = PAGE_ALIGN(size);
2991 order = get_order(size);
2993 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2994 free_pages((unsigned long)vaddr, order);
2997 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2998 int nelems, enum dma_data_direction dir,
2999 struct dma_attrs *attrs)
3001 struct pci_dev *pdev = to_pci_dev(hwdev);
3002 struct dmar_domain *domain;
3003 unsigned long start_pfn, last_pfn;
3005 struct intel_iommu *iommu;
3007 if (iommu_no_mapping(hwdev))
3010 domain = find_domain(pdev);
3013 iommu = domain_get_iommu(domain);
3015 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3016 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3017 (unsigned long long)sglist[0].dma_address))
3020 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3021 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3023 /* clear the whole page */
3024 dma_pte_clear_range(domain, start_pfn, last_pfn);
3026 /* free page tables */
3027 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3029 if (intel_iommu_strict) {
3030 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3031 last_pfn - start_pfn + 1, 0);
3033 __free_iova(&domain->iovad, iova);
3035 add_unmap(domain, iova);
3037 * queue up the release of the unmap to save the 1/6th of the
3038 * cpu used up by the iotlb flush operation...
3043 static int intel_nontranslate_map_sg(struct device *hddev,
3044 struct scatterlist *sglist, int nelems, int dir)
3047 struct scatterlist *sg;
3049 for_each_sg(sglist, sg, nelems, i) {
3050 BUG_ON(!sg_page(sg));
3051 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3052 sg->dma_length = sg->length;
3057 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3058 enum dma_data_direction dir, struct dma_attrs *attrs)
3061 struct pci_dev *pdev = to_pci_dev(hwdev);
3062 struct dmar_domain *domain;
3065 struct iova *iova = NULL;
3067 struct scatterlist *sg;
3068 unsigned long start_vpfn;
3069 struct intel_iommu *iommu;
3071 BUG_ON(dir == DMA_NONE);
3072 if (iommu_no_mapping(hwdev))
3073 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3075 domain = get_valid_domain_for_dev(pdev);
3079 iommu = domain_get_iommu(domain);
3081 for_each_sg(sglist, sg, nelems, i)
3082 size += aligned_nrpages(sg->offset, sg->length);
3084 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3087 sglist->dma_length = 0;
3092 * Check if DMAR supports zero-length reads on write only
3095 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3096 !cap_zlr(iommu->cap))
3097 prot |= DMA_PTE_READ;
3098 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3099 prot |= DMA_PTE_WRITE;
3101 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3103 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3104 if (unlikely(ret)) {
3105 /* clear the page */
3106 dma_pte_clear_range(domain, start_vpfn,
3107 start_vpfn + size - 1);
3108 /* free page tables */
3109 dma_pte_free_pagetable(domain, start_vpfn,
3110 start_vpfn + size - 1);
3112 __free_iova(&domain->iovad, iova);
3116 /* it's a non-present to present mapping. Only flush if caching mode */
3117 if (cap_caching_mode(iommu->cap))
3118 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3120 iommu_flush_write_buffer(iommu);
3125 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3130 struct dma_map_ops intel_dma_ops = {
3131 .alloc = intel_alloc_coherent,
3132 .free = intel_free_coherent,
3133 .map_sg = intel_map_sg,
3134 .unmap_sg = intel_unmap_sg,
3135 .map_page = intel_map_page,
3136 .unmap_page = intel_unmap_page,
3137 .mapping_error = intel_mapping_error,
3140 static inline int iommu_domain_cache_init(void)
3144 iommu_domain_cache = kmem_cache_create("iommu_domain",
3145 sizeof(struct dmar_domain),
3150 if (!iommu_domain_cache) {
3151 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3158 static inline int iommu_devinfo_cache_init(void)
3162 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3163 sizeof(struct device_domain_info),
3167 if (!iommu_devinfo_cache) {
3168 printk(KERN_ERR "Couldn't create devinfo cache\n");
3175 static inline int iommu_iova_cache_init(void)
3179 iommu_iova_cache = kmem_cache_create("iommu_iova",
3180 sizeof(struct iova),
3184 if (!iommu_iova_cache) {
3185 printk(KERN_ERR "Couldn't create iova cache\n");
3192 static int __init iommu_init_mempool(void)
3195 ret = iommu_iova_cache_init();
3199 ret = iommu_domain_cache_init();
3203 ret = iommu_devinfo_cache_init();
3207 kmem_cache_destroy(iommu_domain_cache);
3209 kmem_cache_destroy(iommu_iova_cache);
3214 static void __init iommu_exit_mempool(void)
3216 kmem_cache_destroy(iommu_devinfo_cache);
3217 kmem_cache_destroy(iommu_domain_cache);
3218 kmem_cache_destroy(iommu_iova_cache);
3222 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3224 struct dmar_drhd_unit *drhd;
3228 /* We know that this device on this chipset has its own IOMMU.
3229 * If we find it under a different IOMMU, then the BIOS is lying
3230 * to us. Hope that the IOMMU for this device is actually
3231 * disabled, and it needs no translation...
3233 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3235 /* "can't" happen */
3236 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3239 vtbar &= 0xffff0000;
3241 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3242 drhd = dmar_find_matched_drhd_unit(pdev);
3243 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3244 TAINT_FIRMWARE_WORKAROUND,
3245 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3246 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3248 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3250 static void __init init_no_remapping_devices(void)
3252 struct dmar_drhd_unit *drhd;
3254 for_each_drhd_unit(drhd) {
3255 if (!drhd->include_all) {
3257 for (i = 0; i < drhd->devices_cnt; i++)
3258 if (drhd->devices[i] != NULL)
3260 /* ignore DMAR unit if no pci devices exist */
3261 if (i == drhd->devices_cnt)
3266 for_each_drhd_unit(drhd) {
3268 if (drhd->ignored || drhd->include_all)
3271 for (i = 0; i < drhd->devices_cnt; i++)
3272 if (drhd->devices[i] &&
3273 !IS_GFX_DEVICE(drhd->devices[i]))
3276 if (i < drhd->devices_cnt)
3279 /* This IOMMU has *only* gfx devices. Either bypass it or
3280 set the gfx_mapped flag, as appropriate */
3282 intel_iommu_gfx_mapped = 1;
3285 for (i = 0; i < drhd->devices_cnt; i++) {
3286 if (!drhd->devices[i])
3288 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3294 #ifdef CONFIG_SUSPEND
3295 static int init_iommu_hw(void)
3297 struct dmar_drhd_unit *drhd;
3298 struct intel_iommu *iommu = NULL;
3300 for_each_active_iommu(iommu, drhd)
3302 dmar_reenable_qi(iommu);
3304 for_each_iommu(iommu, drhd) {
3305 if (drhd->ignored) {
3307 * we always have to disable PMRs or DMA may fail on
3311 iommu_disable_protect_mem_regions(iommu);
3315 iommu_flush_write_buffer(iommu);
3317 iommu_set_root_entry(iommu);
3319 iommu->flush.flush_context(iommu, 0, 0, 0,
3320 DMA_CCMD_GLOBAL_INVL);
3321 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3322 DMA_TLB_GLOBAL_FLUSH);
3323 if (iommu_enable_translation(iommu))
3325 iommu_disable_protect_mem_regions(iommu);
3331 static void iommu_flush_all(void)
3333 struct dmar_drhd_unit *drhd;
3334 struct intel_iommu *iommu;
3336 for_each_active_iommu(iommu, drhd) {
3337 iommu->flush.flush_context(iommu, 0, 0, 0,
3338 DMA_CCMD_GLOBAL_INVL);
3339 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3340 DMA_TLB_GLOBAL_FLUSH);
3344 static int iommu_suspend(void)
3346 struct dmar_drhd_unit *drhd;
3347 struct intel_iommu *iommu = NULL;
3350 for_each_active_iommu(iommu, drhd) {
3351 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3353 if (!iommu->iommu_state)
3359 for_each_active_iommu(iommu, drhd) {
3360 iommu_disable_translation(iommu);
3362 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3364 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3365 readl(iommu->reg + DMAR_FECTL_REG);
3366 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3367 readl(iommu->reg + DMAR_FEDATA_REG);
3368 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3369 readl(iommu->reg + DMAR_FEADDR_REG);
3370 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3371 readl(iommu->reg + DMAR_FEUADDR_REG);
3373 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3378 for_each_active_iommu(iommu, drhd)
3379 kfree(iommu->iommu_state);
3384 static void iommu_resume(void)
3386 struct dmar_drhd_unit *drhd;
3387 struct intel_iommu *iommu = NULL;
3390 if (init_iommu_hw()) {
3392 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3394 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3398 for_each_active_iommu(iommu, drhd) {
3400 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3402 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3403 iommu->reg + DMAR_FECTL_REG);
3404 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3405 iommu->reg + DMAR_FEDATA_REG);
3406 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3407 iommu->reg + DMAR_FEADDR_REG);
3408 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3409 iommu->reg + DMAR_FEUADDR_REG);
3411 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3414 for_each_active_iommu(iommu, drhd)
3415 kfree(iommu->iommu_state);
3418 static struct syscore_ops iommu_syscore_ops = {
3419 .resume = iommu_resume,
3420 .suspend = iommu_suspend,
3423 static void __init init_iommu_pm_ops(void)
3425 register_syscore_ops(&iommu_syscore_ops);
3429 static inline void init_iommu_pm_ops(void) {}
3430 #endif /* CONFIG_PM */
3432 LIST_HEAD(dmar_rmrr_units);
3434 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3436 list_add(&rmrr->list, &dmar_rmrr_units);
3440 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3442 struct acpi_dmar_reserved_memory *rmrr;
3443 struct dmar_rmrr_unit *rmrru;
3445 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3449 rmrru->hdr = header;
3450 rmrr = (struct acpi_dmar_reserved_memory *)header;
3451 rmrru->base_address = rmrr->base_address;
3452 rmrru->end_address = rmrr->end_address;
3454 dmar_register_rmrr_unit(rmrru);
3459 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3461 struct acpi_dmar_reserved_memory *rmrr;
3464 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3465 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3466 ((void *)rmrr) + rmrr->header.length,
3467 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3469 if (ret || (rmrru->devices_cnt == 0)) {
3470 list_del(&rmrru->list);
3476 static LIST_HEAD(dmar_atsr_units);
3478 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3480 struct acpi_dmar_atsr *atsr;
3481 struct dmar_atsr_unit *atsru;
3483 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3484 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3489 atsru->include_all = atsr->flags & 0x1;
3491 list_add(&atsru->list, &dmar_atsr_units);
3496 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3499 struct acpi_dmar_atsr *atsr;
3501 if (atsru->include_all)
3504 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3505 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3506 (void *)atsr + atsr->header.length,
3507 &atsru->devices_cnt, &atsru->devices,
3509 if (rc || !atsru->devices_cnt) {
3510 list_del(&atsru->list);
3517 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3520 struct pci_bus *bus;
3521 struct acpi_dmar_atsr *atsr;
3522 struct dmar_atsr_unit *atsru;
3524 dev = pci_physfn(dev);
3526 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3527 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3528 if (atsr->segment == pci_domain_nr(dev->bus))
3535 for (bus = dev->bus; bus; bus = bus->parent) {
3536 struct pci_dev *bridge = bus->self;
3538 if (!bridge || !pci_is_pcie(bridge) ||
3539 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3542 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3543 for (i = 0; i < atsru->devices_cnt; i++)
3544 if (atsru->devices[i] == bridge)
3550 if (atsru->include_all)
3556 int __init dmar_parse_rmrr_atsr_dev(void)
3558 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3559 struct dmar_atsr_unit *atsr, *atsr_n;
3562 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3563 ret = rmrr_parse_dev(rmrr);
3568 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3569 ret = atsr_parse_dev(atsr);
3578 * Here we only respond to action of unbound device from driver.
3580 * Added device is not attached to its DMAR domain here yet. That will happen
3581 * when mapping the device to iova.
3583 static int device_notifier(struct notifier_block *nb,
3584 unsigned long action, void *data)
3586 struct device *dev = data;
3587 struct pci_dev *pdev = to_pci_dev(dev);
3588 struct dmar_domain *domain;
3590 if (iommu_no_mapping(dev))
3593 domain = find_domain(pdev);
3597 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3598 domain_remove_one_dev_info(domain, pdev);
3600 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3601 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3602 list_empty(&domain->devices))
3603 domain_exit(domain);
3609 static struct notifier_block device_nb = {
3610 .notifier_call = device_notifier,
3613 int __init intel_iommu_init(void)
3617 /* VT-d is required for a TXT/tboot launch, so enforce that */
3618 force_on = tboot_force_iommu();
3620 if (dmar_table_init()) {
3622 panic("tboot: Failed to initialize DMAR table\n");
3626 if (dmar_dev_scope_init() < 0) {
3628 panic("tboot: Failed to initialize DMAR device scope\n");
3632 if (no_iommu || dmar_disabled)
3635 if (iommu_init_mempool()) {
3637 panic("tboot: Failed to initialize iommu memory\n");
3641 if (list_empty(&dmar_rmrr_units))
3642 printk(KERN_INFO "DMAR: No RMRR found\n");
3644 if (list_empty(&dmar_atsr_units))
3645 printk(KERN_INFO "DMAR: No ATSR found\n");
3647 if (dmar_init_reserved_ranges()) {
3649 panic("tboot: Failed to reserve iommu ranges\n");
3653 init_no_remapping_devices();
3658 panic("tboot: Failed to initialize DMARs\n");
3659 printk(KERN_ERR "IOMMU: dmar init failed\n");
3660 put_iova_domain(&reserved_iova_list);
3661 iommu_exit_mempool();
3665 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3667 init_timer(&unmap_timer);
3668 #ifdef CONFIG_SWIOTLB
3671 dma_ops = &intel_dma_ops;
3673 init_iommu_pm_ops();
3675 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3677 bus_register_notifier(&pci_bus_type, &device_nb);
3679 intel_iommu_enabled = 1;
3684 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3685 struct pci_dev *pdev)
3687 struct pci_dev *tmp, *parent;
3689 if (!iommu || !pdev)
3692 /* dependent device detach */
3693 tmp = pci_find_upstream_pcie_bridge(pdev);
3694 /* Secondary interface's bus number and devfn 0 */
3696 parent = pdev->bus->self;
3697 while (parent != tmp) {
3698 iommu_detach_dev(iommu, parent->bus->number,
3700 parent = parent->bus->self;
3702 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3703 iommu_detach_dev(iommu,
3704 tmp->subordinate->number, 0);
3705 else /* this is a legacy PCI bridge */
3706 iommu_detach_dev(iommu, tmp->bus->number,
3711 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3712 struct pci_dev *pdev)
3714 struct device_domain_info *info;
3715 struct intel_iommu *iommu;
3716 unsigned long flags;
3718 struct list_head *entry, *tmp;
3720 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3725 spin_lock_irqsave(&device_domain_lock, flags);
3726 list_for_each_safe(entry, tmp, &domain->devices) {
3727 info = list_entry(entry, struct device_domain_info, link);
3728 if (info->segment == pci_domain_nr(pdev->bus) &&
3729 info->bus == pdev->bus->number &&
3730 info->devfn == pdev->devfn) {
3731 list_del(&info->link);
3732 list_del(&info->global);
3734 info->dev->dev.archdata.iommu = NULL;
3735 spin_unlock_irqrestore(&device_domain_lock, flags);
3737 iommu_disable_dev_iotlb(info);
3738 iommu_detach_dev(iommu, info->bus, info->devfn);
3739 iommu_detach_dependent_devices(iommu, pdev);
3740 free_devinfo_mem(info);
3742 spin_lock_irqsave(&device_domain_lock, flags);
3750 /* if there is no other devices under the same iommu
3751 * owned by this domain, clear this iommu in iommu_bmp
3752 * update iommu count and coherency
3754 if (iommu == device_to_iommu(info->segment, info->bus,
3759 spin_unlock_irqrestore(&device_domain_lock, flags);
3762 unsigned long tmp_flags;
3763 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3764 clear_bit(iommu->seq_id, domain->iommu_bmp);
3765 domain->iommu_count--;
3766 domain_update_iommu_cap(domain);
3767 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3769 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3770 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3771 spin_lock_irqsave(&iommu->lock, tmp_flags);
3772 clear_bit(domain->id, iommu->domain_ids);
3773 iommu->domains[domain->id] = NULL;
3774 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3779 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3781 struct device_domain_info *info;
3782 struct intel_iommu *iommu;
3783 unsigned long flags1, flags2;
3785 spin_lock_irqsave(&device_domain_lock, flags1);
3786 while (!list_empty(&domain->devices)) {
3787 info = list_entry(domain->devices.next,
3788 struct device_domain_info, link);
3789 list_del(&info->link);
3790 list_del(&info->global);
3792 info->dev->dev.archdata.iommu = NULL;
3794 spin_unlock_irqrestore(&device_domain_lock, flags1);
3796 iommu_disable_dev_iotlb(info);
3797 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3798 iommu_detach_dev(iommu, info->bus, info->devfn);
3799 iommu_detach_dependent_devices(iommu, info->dev);
3801 /* clear this iommu in iommu_bmp, update iommu count
3804 spin_lock_irqsave(&domain->iommu_lock, flags2);
3805 if (test_and_clear_bit(iommu->seq_id,
3806 domain->iommu_bmp)) {
3807 domain->iommu_count--;
3808 domain_update_iommu_cap(domain);
3810 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3812 free_devinfo_mem(info);
3813 spin_lock_irqsave(&device_domain_lock, flags1);
3815 spin_unlock_irqrestore(&device_domain_lock, flags1);
3818 /* domain id for virtual machine, it won't be set in context */
3819 static unsigned long vm_domid;
3821 static struct dmar_domain *iommu_alloc_vm_domain(void)
3823 struct dmar_domain *domain;
3825 domain = alloc_domain_mem();
3829 domain->id = vm_domid++;
3831 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
3832 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3837 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3841 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3842 spin_lock_init(&domain->iommu_lock);
3844 domain_reserve_special_ranges(domain);
3846 /* calculate AGAW */
3847 domain->gaw = guest_width;
3848 adjust_width = guestwidth_to_adjustwidth(guest_width);
3849 domain->agaw = width_to_agaw(adjust_width);
3851 INIT_LIST_HEAD(&domain->devices);
3853 domain->iommu_count = 0;
3854 domain->iommu_coherency = 0;
3855 domain->iommu_snooping = 0;
3856 domain->iommu_superpage = 0;
3857 domain->max_addr = 0;
3860 /* always allocate the top pgd */
3861 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3864 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3868 static void iommu_free_vm_domain(struct dmar_domain *domain)
3870 unsigned long flags;
3871 struct dmar_drhd_unit *drhd;
3872 struct intel_iommu *iommu;
3874 unsigned long ndomains;
3876 for_each_drhd_unit(drhd) {
3879 iommu = drhd->iommu;
3881 ndomains = cap_ndoms(iommu->cap);
3882 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3883 if (iommu->domains[i] == domain) {
3884 spin_lock_irqsave(&iommu->lock, flags);
3885 clear_bit(i, iommu->domain_ids);
3886 iommu->domains[i] = NULL;
3887 spin_unlock_irqrestore(&iommu->lock, flags);
3894 static void vm_domain_exit(struct dmar_domain *domain)
3896 /* Domain 0 is reserved, so dont process it */
3900 vm_domain_remove_all_dev_info(domain);
3902 put_iova_domain(&domain->iovad);
3905 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3907 /* free page tables */
3908 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3910 iommu_free_vm_domain(domain);
3911 free_domain_mem(domain);
3914 static int intel_iommu_domain_init(struct iommu_domain *domain)
3916 struct dmar_domain *dmar_domain;
3918 dmar_domain = iommu_alloc_vm_domain();
3921 "intel_iommu_domain_init: dmar_domain == NULL\n");
3924 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3926 "intel_iommu_domain_init() failed\n");
3927 vm_domain_exit(dmar_domain);
3930 domain_update_iommu_cap(dmar_domain);
3931 domain->priv = dmar_domain;
3936 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3938 struct dmar_domain *dmar_domain = domain->priv;
3940 domain->priv = NULL;
3941 vm_domain_exit(dmar_domain);
3944 static int intel_iommu_attach_device(struct iommu_domain *domain,
3947 struct dmar_domain *dmar_domain = domain->priv;
3948 struct pci_dev *pdev = to_pci_dev(dev);
3949 struct intel_iommu *iommu;
3952 /* normally pdev is not mapped */
3953 if (unlikely(domain_context_mapped(pdev))) {
3954 struct dmar_domain *old_domain;
3956 old_domain = find_domain(pdev);
3958 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3959 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3960 domain_remove_one_dev_info(old_domain, pdev);
3962 domain_remove_dev_info(old_domain);
3966 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3971 /* check if this iommu agaw is sufficient for max mapped address */
3972 addr_width = agaw_to_width(iommu->agaw);
3973 if (addr_width > cap_mgaw(iommu->cap))
3974 addr_width = cap_mgaw(iommu->cap);
3976 if (dmar_domain->max_addr > (1LL << addr_width)) {
3977 printk(KERN_ERR "%s: iommu width (%d) is not "
3978 "sufficient for the mapped address (%llx)\n",
3979 __func__, addr_width, dmar_domain->max_addr);
3982 dmar_domain->gaw = addr_width;
3985 * Knock out extra levels of page tables if necessary
3987 while (iommu->agaw < dmar_domain->agaw) {
3988 struct dma_pte *pte;
3990 pte = dmar_domain->pgd;
3991 if (dma_pte_present(pte)) {
3992 dmar_domain->pgd = (struct dma_pte *)
3993 phys_to_virt(dma_pte_addr(pte));
3994 free_pgtable_page(pte);
3996 dmar_domain->agaw--;
3999 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4002 static void intel_iommu_detach_device(struct iommu_domain *domain,
4005 struct dmar_domain *dmar_domain = domain->priv;
4006 struct pci_dev *pdev = to_pci_dev(dev);
4008 domain_remove_one_dev_info(dmar_domain, pdev);
4011 static int intel_iommu_map(struct iommu_domain *domain,
4012 unsigned long iova, phys_addr_t hpa,
4013 size_t size, int iommu_prot)
4015 struct dmar_domain *dmar_domain = domain->priv;
4020 if (iommu_prot & IOMMU_READ)
4021 prot |= DMA_PTE_READ;
4022 if (iommu_prot & IOMMU_WRITE)
4023 prot |= DMA_PTE_WRITE;
4024 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4025 prot |= DMA_PTE_SNP;
4027 max_addr = iova + size;
4028 if (dmar_domain->max_addr < max_addr) {
4031 /* check if minimum agaw is sufficient for mapped address */
4032 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4033 if (end < max_addr) {
4034 printk(KERN_ERR "%s: iommu width (%d) is not "
4035 "sufficient for the mapped address (%llx)\n",
4036 __func__, dmar_domain->gaw, max_addr);
4039 dmar_domain->max_addr = max_addr;
4041 /* Round up size to next multiple of PAGE_SIZE, if it and
4042 the low bits of hpa would take us onto the next page */
4043 size = aligned_nrpages(hpa, size);
4044 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4045 hpa >> VTD_PAGE_SHIFT, size, prot);
4049 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4050 unsigned long iova, size_t size)
4052 struct dmar_domain *dmar_domain = domain->priv;
4055 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4056 (iova + size - 1) >> VTD_PAGE_SHIFT);
4058 if (dmar_domain->max_addr == iova + size)
4059 dmar_domain->max_addr = iova;
4061 return PAGE_SIZE << order;
4064 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4067 struct dmar_domain *dmar_domain = domain->priv;
4068 struct dma_pte *pte;
4071 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4073 phys = dma_pte_addr(pte);
4078 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4081 struct dmar_domain *dmar_domain = domain->priv;
4083 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4084 return dmar_domain->iommu_snooping;
4085 if (cap == IOMMU_CAP_INTR_REMAP)
4086 return irq_remapping_enabled;
4092 * Group numbers are arbitrary. Device with the same group number
4093 * indicate the iommu cannot differentiate between them. To avoid
4094 * tracking used groups we just use the seg|bus|devfn of the lowest
4095 * level we're able to differentiate devices
4097 static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4099 struct pci_dev *pdev = to_pci_dev(dev);
4100 struct pci_dev *bridge;
4110 if (iommu_no_mapping(dev))
4113 id.pci.segment = pci_domain_nr(pdev->bus);
4114 id.pci.bus = pdev->bus->number;
4115 id.pci.devfn = pdev->devfn;
4117 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4120 bridge = pci_find_upstream_pcie_bridge(pdev);
4122 if (pci_is_pcie(bridge)) {
4123 id.pci.bus = bridge->subordinate->number;
4126 id.pci.bus = bridge->bus->number;
4127 id.pci.devfn = bridge->devfn;
4131 if (!pdev->is_virtfn && iommu_group_mf)
4132 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4134 *groupid = id.group;
4139 static struct iommu_ops intel_iommu_ops = {
4140 .domain_init = intel_iommu_domain_init,
4141 .domain_destroy = intel_iommu_domain_destroy,
4142 .attach_dev = intel_iommu_attach_device,
4143 .detach_dev = intel_iommu_detach_device,
4144 .map = intel_iommu_map,
4145 .unmap = intel_iommu_unmap,
4146 .iova_to_phys = intel_iommu_iova_to_phys,
4147 .domain_has_cap = intel_iommu_domain_has_cap,
4148 .device_group = intel_iommu_device_group,
4149 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4152 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4155 * Mobile 4 Series Chipset neglects to set RWBF capability,
4158 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4161 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4162 if (dev->revision == 0x07) {
4163 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4168 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4171 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4172 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4173 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4174 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4175 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4176 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4177 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4178 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4180 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4184 if (pci_read_config_word(dev, GGC, &ggc))
4187 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4188 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4190 } else if (dmar_map_gfx) {
4191 /* we have to ensure the gfx device is idle before we flush */
4192 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4193 intel_iommu_strict = 1;
4196 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4197 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4198 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4199 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4201 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4202 ISOCH DMAR unit for the Azalia sound device, but not give it any
4203 TLB entries, which causes it to deadlock. Check for that. We do
4204 this in a function called from init_dmars(), instead of in a PCI
4205 quirk, because we don't want to print the obnoxious "BIOS broken"
4206 message if VT-d is actually disabled.
4208 static void __init check_tylersburg_isoch(void)
4210 struct pci_dev *pdev;
4211 uint32_t vtisochctrl;
4213 /* If there's no Azalia in the system anyway, forget it. */
4214 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4219 /* System Management Registers. Might be hidden, in which case
4220 we can't do the sanity check. But that's OK, because the
4221 known-broken BIOSes _don't_ actually hide it, so far. */
4222 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4226 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4233 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4234 if (vtisochctrl & 1)
4237 /* Drop all bits other than the number of TLB entries */
4238 vtisochctrl &= 0x1c;
4240 /* If we have the recommended number of TLB entries (16), fine. */
4241 if (vtisochctrl == 0x10)
4244 /* Zero TLB entries? You get to ride the short bus to school. */
4246 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4247 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4248 dmi_get_system_info(DMI_BIOS_VENDOR),
4249 dmi_get_system_info(DMI_BIOS_VERSION),
4250 dmi_get_system_info(DMI_PRODUCT_VERSION));
4251 iommu_identity_mapping |= IDENTMAP_AZALIA;
4255 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",