2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
77 static inline unsigned long page_to_dma_pfn(struct page *pg)
79 return mm_to_dma_pfn(page_to_pfn(pg));
81 static inline unsigned long virt_to_dma_pfn(void *p)
83 return page_to_dma_pfn(virt_to_page(p));
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu **g_iommus;
89 static int rwbf_quirk;
94 * 12-63: Context Ptr (12 - (haw-1))
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry *root)
104 return (root->val & 1);
106 static inline void set_root_present(struct root_entry *root)
110 static inline void set_root_value(struct root_entry *root, unsigned long value)
112 root->val |= value & VTD_PAGE_MASK;
115 static inline struct context_entry *
116 get_context_addr_from_root(struct root_entry *root)
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
135 struct context_entry {
140 static inline bool context_present(struct context_entry *context)
142 return (context->lo & 1);
144 static inline void context_set_present(struct context_entry *context)
149 static inline void context_set_fault_enable(struct context_entry *context)
151 context->lo &= (((u64)-1) << 2) | 1;
154 static inline void context_set_translation_type(struct context_entry *context,
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
161 static inline void context_set_address_root(struct context_entry *context,
164 context->lo |= value & VTD_PAGE_MASK;
167 static inline void context_set_address_width(struct context_entry *context,
170 context->hi |= value & 7;
173 static inline void context_set_domain_id(struct context_entry *context,
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
179 static inline void context_clear_entry(struct context_entry *context)
192 * 12-63: Host physcial address
198 static inline void dma_clear_pte(struct dma_pte *pte)
203 static inline void dma_set_pte_readable(struct dma_pte *pte)
205 pte->val |= DMA_PTE_READ;
208 static inline void dma_set_pte_writable(struct dma_pte *pte)
210 pte->val |= DMA_PTE_WRITE;
213 static inline void dma_set_pte_snp(struct dma_pte *pte)
215 pte->val |= DMA_PTE_SNP;
218 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
220 pte->val = (pte->val & ~3) | (prot & 3);
223 static inline u64 dma_pte_addr(struct dma_pte *pte)
226 return pte->val & VTD_PAGE_MASK;
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
233 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
238 static inline bool dma_pte_present(struct dma_pte *pte)
240 return (pte->val & 3) != 0;
243 static inline int first_pte_in_page(struct dma_pte *pte)
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
249 * This domain is a statically identity mapping domain.
250 * 1. This domain creats a static 1:1 mapping to all usable memory.
251 * 2. It maps to each iommu if successful.
252 * 3. Each iommu mapps to this domain if successful.
254 struct dmar_domain *si_domain;
256 /* devices under the same p2p bridge are owned in one domain */
257 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
259 /* domain represents a virtual machine, more than one devices
260 * across iommus may be owned in one domain, e.g. kvm guest.
262 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
264 /* si_domain contains mulitple devices */
265 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
268 int id; /* domain id */
269 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
271 struct list_head devices; /* all devices' list */
272 struct iova_domain iovad; /* iova's that belong to this domain */
274 struct dma_pte *pgd; /* virtual address */
275 int gaw; /* max guest address width */
277 /* adjusted guest address width, 0 is level 2 30-bit */
280 int flags; /* flags to find out type of domain */
282 int iommu_coherency;/* indicate coherency of iommu access */
283 int iommu_snooping; /* indicate snooping control feature*/
284 int iommu_count; /* reference count of iommu */
285 spinlock_t iommu_lock; /* protect iommu set in domain */
286 u64 max_addr; /* maximum mapped address */
289 /* PCI domain-device relationship */
290 struct device_domain_info {
291 struct list_head link; /* link to domain siblings */
292 struct list_head global; /* link to global list */
293 int segment; /* PCI domain */
294 u8 bus; /* PCI bus number */
295 u8 devfn; /* PCI devfn number */
296 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
297 struct intel_iommu *iommu; /* IOMMU used by this device */
298 struct dmar_domain *domain; /* pointer to domain */
301 static void flush_unmaps_timeout(unsigned long data);
303 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
305 #define HIGH_WATER_MARK 250
306 struct deferred_flush_tables {
308 struct iova *iova[HIGH_WATER_MARK];
309 struct dmar_domain *domain[HIGH_WATER_MARK];
312 static struct deferred_flush_tables *deferred_flush;
314 /* bitmap for indexing intel_iommus */
315 static int g_num_of_iommus;
317 static DEFINE_SPINLOCK(async_umap_flush_lock);
318 static LIST_HEAD(unmaps_to_do);
321 static long list_size;
323 static void domain_remove_dev_info(struct dmar_domain *domain);
325 #ifdef CONFIG_DMAR_DEFAULT_ON
326 int dmar_disabled = 0;
328 int dmar_disabled = 1;
329 #endif /*CONFIG_DMAR_DEFAULT_ON*/
331 static int __initdata dmar_map_gfx = 1;
332 static int dmar_forcedac;
333 static int intel_iommu_strict;
335 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
336 static DEFINE_SPINLOCK(device_domain_lock);
337 static LIST_HEAD(device_domain_list);
339 static struct iommu_ops intel_iommu_ops;
341 static int __init intel_iommu_setup(char *str)
346 if (!strncmp(str, "on", 2)) {
348 printk(KERN_INFO "Intel-IOMMU: enabled\n");
349 } else if (!strncmp(str, "off", 3)) {
351 printk(KERN_INFO "Intel-IOMMU: disabled\n");
352 } else if (!strncmp(str, "igfx_off", 8)) {
355 "Intel-IOMMU: disable GFX device mapping\n");
356 } else if (!strncmp(str, "forcedac", 8)) {
358 "Intel-IOMMU: Forcing DAC for PCI devices\n");
360 } else if (!strncmp(str, "strict", 6)) {
362 "Intel-IOMMU: disable batched IOTLB flush\n");
363 intel_iommu_strict = 1;
366 str += strcspn(str, ",");
372 __setup("intel_iommu=", intel_iommu_setup);
374 static struct kmem_cache *iommu_domain_cache;
375 static struct kmem_cache *iommu_devinfo_cache;
376 static struct kmem_cache *iommu_iova_cache;
378 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
383 /* trying to avoid low memory issues */
384 flags = current->flags & PF_MEMALLOC;
385 current->flags |= PF_MEMALLOC;
386 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
387 current->flags &= (~PF_MEMALLOC | flags);
392 static inline void *alloc_pgtable_page(void)
397 /* trying to avoid low memory issues */
398 flags = current->flags & PF_MEMALLOC;
399 current->flags |= PF_MEMALLOC;
400 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
401 current->flags &= (~PF_MEMALLOC | flags);
405 static inline void free_pgtable_page(void *vaddr)
407 free_page((unsigned long)vaddr);
410 static inline void *alloc_domain_mem(void)
412 return iommu_kmem_cache_alloc(iommu_domain_cache);
415 static void free_domain_mem(void *vaddr)
417 kmem_cache_free(iommu_domain_cache, vaddr);
420 static inline void * alloc_devinfo_mem(void)
422 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
425 static inline void free_devinfo_mem(void *vaddr)
427 kmem_cache_free(iommu_devinfo_cache, vaddr);
430 struct iova *alloc_iova_mem(void)
432 return iommu_kmem_cache_alloc(iommu_iova_cache);
435 void free_iova_mem(struct iova *iova)
437 kmem_cache_free(iommu_iova_cache, iova);
441 static inline int width_to_agaw(int width);
443 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
448 sagaw = cap_sagaw(iommu->cap);
449 for (agaw = width_to_agaw(max_gaw);
451 if (test_bit(agaw, &sagaw))
459 * Calculate max SAGAW for each iommu.
461 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
463 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
467 * calculate agaw for each iommu.
468 * "SAGAW" may be different across iommus, use a default agaw, and
469 * get a supported less agaw for iommus that don't support the default agaw.
471 int iommu_calculate_agaw(struct intel_iommu *iommu)
473 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
476 /* This functionin only returns single iommu in a domain */
477 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
481 /* si_domain and vm domain should not get here. */
482 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
483 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
485 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
486 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
489 return g_iommus[iommu_id];
492 static void domain_update_iommu_coherency(struct dmar_domain *domain)
496 domain->iommu_coherency = 1;
498 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
499 for (; i < g_num_of_iommus; ) {
500 if (!ecap_coherent(g_iommus[i]->ecap)) {
501 domain->iommu_coherency = 0;
504 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
508 static void domain_update_iommu_snooping(struct dmar_domain *domain)
512 domain->iommu_snooping = 1;
514 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
515 for (; i < g_num_of_iommus; ) {
516 if (!ecap_sc_support(g_iommus[i]->ecap)) {
517 domain->iommu_snooping = 0;
520 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
524 /* Some capabilities may be different across iommus */
525 static void domain_update_iommu_cap(struct dmar_domain *domain)
527 domain_update_iommu_coherency(domain);
528 domain_update_iommu_snooping(domain);
531 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
533 struct dmar_drhd_unit *drhd = NULL;
536 for_each_drhd_unit(drhd) {
539 if (segment != drhd->segment)
542 for (i = 0; i < drhd->devices_cnt; i++) {
543 if (drhd->devices[i] &&
544 drhd->devices[i]->bus->number == bus &&
545 drhd->devices[i]->devfn == devfn)
547 if (drhd->devices[i] &&
548 drhd->devices[i]->subordinate &&
549 drhd->devices[i]->subordinate->number <= bus &&
550 drhd->devices[i]->subordinate->subordinate >= bus)
554 if (drhd->include_all)
561 static void domain_flush_cache(struct dmar_domain *domain,
562 void *addr, int size)
564 if (!domain->iommu_coherency)
565 clflush_cache_range(addr, size);
568 /* Gets context entry for a given bus and devfn */
569 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
572 struct root_entry *root;
573 struct context_entry *context;
574 unsigned long phy_addr;
577 spin_lock_irqsave(&iommu->lock, flags);
578 root = &iommu->root_entry[bus];
579 context = get_context_addr_from_root(root);
581 context = (struct context_entry *)alloc_pgtable_page();
583 spin_unlock_irqrestore(&iommu->lock, flags);
586 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
587 phy_addr = virt_to_phys((void *)context);
588 set_root_value(root, phy_addr);
589 set_root_present(root);
590 __iommu_flush_cache(iommu, root, sizeof(*root));
592 spin_unlock_irqrestore(&iommu->lock, flags);
593 return &context[devfn];
596 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
598 struct root_entry *root;
599 struct context_entry *context;
603 spin_lock_irqsave(&iommu->lock, flags);
604 root = &iommu->root_entry[bus];
605 context = get_context_addr_from_root(root);
610 ret = context_present(&context[devfn]);
612 spin_unlock_irqrestore(&iommu->lock, flags);
616 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
618 struct root_entry *root;
619 struct context_entry *context;
622 spin_lock_irqsave(&iommu->lock, flags);
623 root = &iommu->root_entry[bus];
624 context = get_context_addr_from_root(root);
626 context_clear_entry(&context[devfn]);
627 __iommu_flush_cache(iommu, &context[devfn], \
630 spin_unlock_irqrestore(&iommu->lock, flags);
633 static void free_context_table(struct intel_iommu *iommu)
635 struct root_entry *root;
638 struct context_entry *context;
640 spin_lock_irqsave(&iommu->lock, flags);
641 if (!iommu->root_entry) {
644 for (i = 0; i < ROOT_ENTRY_NR; i++) {
645 root = &iommu->root_entry[i];
646 context = get_context_addr_from_root(root);
648 free_pgtable_page(context);
650 free_pgtable_page(iommu->root_entry);
651 iommu->root_entry = NULL;
653 spin_unlock_irqrestore(&iommu->lock, flags);
656 /* page table handling */
657 #define LEVEL_STRIDE (9)
658 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
660 static inline int agaw_to_level(int agaw)
665 static inline int agaw_to_width(int agaw)
667 return 30 + agaw * LEVEL_STRIDE;
671 static inline int width_to_agaw(int width)
673 return (width - 30) / LEVEL_STRIDE;
676 static inline unsigned int level_to_offset_bits(int level)
678 return (level - 1) * LEVEL_STRIDE;
681 static inline int pfn_level_offset(unsigned long pfn, int level)
683 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
686 static inline unsigned long level_mask(int level)
688 return -1UL << level_to_offset_bits(level);
691 static inline unsigned long level_size(int level)
693 return 1UL << level_to_offset_bits(level);
696 static inline unsigned long align_to_level(unsigned long pfn, int level)
698 return (pfn + level_size(level) - 1) & level_mask(level);
701 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
704 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
705 struct dma_pte *parent, *pte = NULL;
706 int level = agaw_to_level(domain->agaw);
709 BUG_ON(!domain->pgd);
710 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
711 parent = domain->pgd;
716 offset = pfn_level_offset(pfn, level);
717 pte = &parent[offset];
721 if (!dma_pte_present(pte)) {
724 tmp_page = alloc_pgtable_page();
729 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
730 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
731 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
732 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page);
736 domain_flush_cache(domain, pte, sizeof(*pte));
739 parent = phys_to_virt(dma_pte_addr(pte));
746 /* return address's pte at specific level */
747 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
751 struct dma_pte *parent, *pte = NULL;
752 int total = agaw_to_level(domain->agaw);
755 parent = domain->pgd;
756 while (level <= total) {
757 offset = pfn_level_offset(pfn, total);
758 pte = &parent[offset];
762 if (!dma_pte_present(pte))
764 parent = phys_to_virt(dma_pte_addr(pte));
770 /* clear last level pte, a tlb flush should be followed */
771 static void dma_pte_clear_range(struct dmar_domain *domain,
772 unsigned long start_pfn,
773 unsigned long last_pfn)
775 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
776 struct dma_pte *first_pte, *pte;
778 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
779 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
781 /* we don't need lock here; nobody else touches the iova range */
782 while (start_pfn <= last_pfn) {
783 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
785 start_pfn = align_to_level(start_pfn + 1, 2);
792 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
794 domain_flush_cache(domain, first_pte,
795 (void *)pte - (void *)first_pte);
799 /* free page table pages. last level pte should already be cleared */
800 static void dma_pte_free_pagetable(struct dmar_domain *domain,
801 unsigned long start_pfn,
802 unsigned long last_pfn)
804 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
805 struct dma_pte *first_pte, *pte;
806 int total = agaw_to_level(domain->agaw);
810 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
811 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
813 /* We don't need lock here; nobody else touches the iova range */
815 while (level <= total) {
816 tmp = align_to_level(start_pfn, level);
818 /* If we can't even clear one PTE at this level, we're done */
819 if (tmp + level_size(level) - 1 > last_pfn)
822 while (tmp + level_size(level) - 1 <= last_pfn) {
823 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
825 tmp = align_to_level(tmp + 1, level + 1);
829 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
832 tmp += level_size(level);
833 } while (!first_pte_in_page(pte) &&
834 tmp + level_size(level) - 1 <= last_pfn);
836 domain_flush_cache(domain, first_pte,
837 (void *)pte - (void *)first_pte);
843 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
844 free_pgtable_page(domain->pgd);
850 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
852 struct root_entry *root;
855 root = (struct root_entry *)alloc_pgtable_page();
859 __iommu_flush_cache(iommu, root, ROOT_SIZE);
861 spin_lock_irqsave(&iommu->lock, flags);
862 iommu->root_entry = root;
863 spin_unlock_irqrestore(&iommu->lock, flags);
868 static void iommu_set_root_entry(struct intel_iommu *iommu)
874 addr = iommu->root_entry;
876 spin_lock_irqsave(&iommu->register_lock, flag);
877 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
879 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
881 /* Make sure hardware complete it */
882 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
883 readl, (sts & DMA_GSTS_RTPS), sts);
885 spin_unlock_irqrestore(&iommu->register_lock, flag);
888 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
893 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
896 spin_lock_irqsave(&iommu->register_lock, flag);
897 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
899 /* Make sure hardware complete it */
900 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
901 readl, (!(val & DMA_GSTS_WBFS)), val);
903 spin_unlock_irqrestore(&iommu->register_lock, flag);
906 /* return value determine if we need a write buffer flush */
907 static void __iommu_flush_context(struct intel_iommu *iommu,
908 u16 did, u16 source_id, u8 function_mask,
915 case DMA_CCMD_GLOBAL_INVL:
916 val = DMA_CCMD_GLOBAL_INVL;
918 case DMA_CCMD_DOMAIN_INVL:
919 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
921 case DMA_CCMD_DEVICE_INVL:
922 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
923 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
930 spin_lock_irqsave(&iommu->register_lock, flag);
931 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
933 /* Make sure hardware complete it */
934 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
935 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
937 spin_unlock_irqrestore(&iommu->register_lock, flag);
940 /* return value determine if we need a write buffer flush */
941 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
942 u64 addr, unsigned int size_order, u64 type)
944 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
945 u64 val = 0, val_iva = 0;
949 case DMA_TLB_GLOBAL_FLUSH:
950 /* global flush doesn't need set IVA_REG */
951 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
953 case DMA_TLB_DSI_FLUSH:
954 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
956 case DMA_TLB_PSI_FLUSH:
957 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
958 /* Note: always flush non-leaf currently */
959 val_iva = size_order | addr;
964 /* Note: set drain read/write */
967 * This is probably to be super secure.. Looks like we can
968 * ignore it without any impact.
970 if (cap_read_drain(iommu->cap))
971 val |= DMA_TLB_READ_DRAIN;
973 if (cap_write_drain(iommu->cap))
974 val |= DMA_TLB_WRITE_DRAIN;
976 spin_lock_irqsave(&iommu->register_lock, flag);
977 /* Note: Only uses first TLB reg currently */
979 dmar_writeq(iommu->reg + tlb_offset, val_iva);
980 dmar_writeq(iommu->reg + tlb_offset + 8, val);
982 /* Make sure hardware complete it */
983 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
984 dmar_readq, (!(val & DMA_TLB_IVT)), val);
986 spin_unlock_irqrestore(&iommu->register_lock, flag);
988 /* check IOTLB invalidation granularity */
989 if (DMA_TLB_IAIG(val) == 0)
990 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
991 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
992 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
993 (unsigned long long)DMA_TLB_IIRG(type),
994 (unsigned long long)DMA_TLB_IAIG(val));
997 static struct device_domain_info *iommu_support_dev_iotlb(
998 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1001 unsigned long flags;
1002 struct device_domain_info *info;
1003 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1005 if (!ecap_dev_iotlb_support(iommu->ecap))
1011 spin_lock_irqsave(&device_domain_lock, flags);
1012 list_for_each_entry(info, &domain->devices, link)
1013 if (info->bus == bus && info->devfn == devfn) {
1017 spin_unlock_irqrestore(&device_domain_lock, flags);
1019 if (!found || !info->dev)
1022 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1025 if (!dmar_find_matched_atsr_unit(info->dev))
1028 info->iommu = iommu;
1033 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1038 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1041 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1043 if (!info->dev || !pci_ats_enabled(info->dev))
1046 pci_disable_ats(info->dev);
1049 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1050 u64 addr, unsigned mask)
1053 unsigned long flags;
1054 struct device_domain_info *info;
1056 spin_lock_irqsave(&device_domain_lock, flags);
1057 list_for_each_entry(info, &domain->devices, link) {
1058 if (!info->dev || !pci_ats_enabled(info->dev))
1061 sid = info->bus << 8 | info->devfn;
1062 qdep = pci_ats_queue_depth(info->dev);
1063 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1065 spin_unlock_irqrestore(&device_domain_lock, flags);
1068 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1069 unsigned long pfn, unsigned int pages)
1071 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1072 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1077 * Fallback to domain selective flush if no PSI support or the size is
1079 * PSI requires page size to be 2 ^ x, and the base address is naturally
1080 * aligned to the size
1082 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1083 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1086 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1090 * In caching mode, domain ID 0 is reserved for non-present to present
1091 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1093 if (!cap_caching_mode(iommu->cap) || did)
1094 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1097 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1100 unsigned long flags;
1102 spin_lock_irqsave(&iommu->register_lock, flags);
1103 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1104 pmen &= ~DMA_PMEN_EPM;
1105 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1107 /* wait for the protected region status bit to clear */
1108 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1109 readl, !(pmen & DMA_PMEN_PRS), pmen);
1111 spin_unlock_irqrestore(&iommu->register_lock, flags);
1114 static int iommu_enable_translation(struct intel_iommu *iommu)
1117 unsigned long flags;
1119 spin_lock_irqsave(&iommu->register_lock, flags);
1120 iommu->gcmd |= DMA_GCMD_TE;
1121 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1123 /* Make sure hardware complete it */
1124 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1125 readl, (sts & DMA_GSTS_TES), sts);
1127 spin_unlock_irqrestore(&iommu->register_lock, flags);
1131 static int iommu_disable_translation(struct intel_iommu *iommu)
1136 spin_lock_irqsave(&iommu->register_lock, flag);
1137 iommu->gcmd &= ~DMA_GCMD_TE;
1138 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1140 /* Make sure hardware complete it */
1141 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1142 readl, (!(sts & DMA_GSTS_TES)), sts);
1144 spin_unlock_irqrestore(&iommu->register_lock, flag);
1149 static int iommu_init_domains(struct intel_iommu *iommu)
1151 unsigned long ndomains;
1152 unsigned long nlongs;
1154 ndomains = cap_ndoms(iommu->cap);
1155 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1156 nlongs = BITS_TO_LONGS(ndomains);
1158 /* TBD: there might be 64K domains,
1159 * consider other allocation for future chip
1161 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1162 if (!iommu->domain_ids) {
1163 printk(KERN_ERR "Allocating domain id array failed\n");
1166 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1168 if (!iommu->domains) {
1169 printk(KERN_ERR "Allocating domain array failed\n");
1170 kfree(iommu->domain_ids);
1174 spin_lock_init(&iommu->lock);
1177 * if Caching mode is set, then invalid translations are tagged
1178 * with domainid 0. Hence we need to pre-allocate it.
1180 if (cap_caching_mode(iommu->cap))
1181 set_bit(0, iommu->domain_ids);
1186 static void domain_exit(struct dmar_domain *domain);
1187 static void vm_domain_exit(struct dmar_domain *domain);
1189 void free_dmar_iommu(struct intel_iommu *iommu)
1191 struct dmar_domain *domain;
1193 unsigned long flags;
1195 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1196 for (; i < cap_ndoms(iommu->cap); ) {
1197 domain = iommu->domains[i];
1198 clear_bit(i, iommu->domain_ids);
1200 spin_lock_irqsave(&domain->iommu_lock, flags);
1201 if (--domain->iommu_count == 0) {
1202 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1203 vm_domain_exit(domain);
1205 domain_exit(domain);
1207 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1209 i = find_next_bit(iommu->domain_ids,
1210 cap_ndoms(iommu->cap), i+1);
1213 if (iommu->gcmd & DMA_GCMD_TE)
1214 iommu_disable_translation(iommu);
1217 set_irq_data(iommu->irq, NULL);
1218 /* This will mask the irq */
1219 free_irq(iommu->irq, iommu);
1220 destroy_irq(iommu->irq);
1223 kfree(iommu->domains);
1224 kfree(iommu->domain_ids);
1226 g_iommus[iommu->seq_id] = NULL;
1228 /* if all iommus are freed, free g_iommus */
1229 for (i = 0; i < g_num_of_iommus; i++) {
1234 if (i == g_num_of_iommus)
1237 /* free context mapping */
1238 free_context_table(iommu);
1241 static struct dmar_domain *alloc_domain(void)
1243 struct dmar_domain *domain;
1245 domain = alloc_domain_mem();
1249 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1255 static int iommu_attach_domain(struct dmar_domain *domain,
1256 struct intel_iommu *iommu)
1259 unsigned long ndomains;
1260 unsigned long flags;
1262 ndomains = cap_ndoms(iommu->cap);
1264 spin_lock_irqsave(&iommu->lock, flags);
1266 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1267 if (num >= ndomains) {
1268 spin_unlock_irqrestore(&iommu->lock, flags);
1269 printk(KERN_ERR "IOMMU: no free domain ids\n");
1274 set_bit(num, iommu->domain_ids);
1275 set_bit(iommu->seq_id, &domain->iommu_bmp);
1276 iommu->domains[num] = domain;
1277 spin_unlock_irqrestore(&iommu->lock, flags);
1282 static void iommu_detach_domain(struct dmar_domain *domain,
1283 struct intel_iommu *iommu)
1285 unsigned long flags;
1289 spin_lock_irqsave(&iommu->lock, flags);
1290 ndomains = cap_ndoms(iommu->cap);
1291 num = find_first_bit(iommu->domain_ids, ndomains);
1292 for (; num < ndomains; ) {
1293 if (iommu->domains[num] == domain) {
1297 num = find_next_bit(iommu->domain_ids,
1298 cap_ndoms(iommu->cap), num+1);
1302 clear_bit(num, iommu->domain_ids);
1303 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1304 iommu->domains[num] = NULL;
1306 spin_unlock_irqrestore(&iommu->lock, flags);
1309 static struct iova_domain reserved_iova_list;
1310 static struct lock_class_key reserved_alloc_key;
1311 static struct lock_class_key reserved_rbtree_key;
1313 static void dmar_init_reserved_ranges(void)
1315 struct pci_dev *pdev = NULL;
1319 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1321 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1322 &reserved_alloc_key);
1323 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1324 &reserved_rbtree_key);
1326 /* IOAPIC ranges shouldn't be accessed by DMA */
1327 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1328 IOVA_PFN(IOAPIC_RANGE_END));
1330 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1332 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1333 for_each_pci_dev(pdev) {
1336 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1337 r = &pdev->resource[i];
1338 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1340 iova = reserve_iova(&reserved_iova_list,
1344 printk(KERN_ERR "Reserve iova failed\n");
1350 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1352 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1355 static inline int guestwidth_to_adjustwidth(int gaw)
1358 int r = (gaw - 12) % 9;
1369 static int domain_init(struct dmar_domain *domain, int guest_width)
1371 struct intel_iommu *iommu;
1372 int adjust_width, agaw;
1373 unsigned long sagaw;
1375 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1376 spin_lock_init(&domain->iommu_lock);
1378 domain_reserve_special_ranges(domain);
1380 /* calculate AGAW */
1381 iommu = domain_get_iommu(domain);
1382 if (guest_width > cap_mgaw(iommu->cap))
1383 guest_width = cap_mgaw(iommu->cap);
1384 domain->gaw = guest_width;
1385 adjust_width = guestwidth_to_adjustwidth(guest_width);
1386 agaw = width_to_agaw(adjust_width);
1387 sagaw = cap_sagaw(iommu->cap);
1388 if (!test_bit(agaw, &sagaw)) {
1389 /* hardware doesn't support it, choose a bigger one */
1390 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1391 agaw = find_next_bit(&sagaw, 5, agaw);
1395 domain->agaw = agaw;
1396 INIT_LIST_HEAD(&domain->devices);
1398 if (ecap_coherent(iommu->ecap))
1399 domain->iommu_coherency = 1;
1401 domain->iommu_coherency = 0;
1403 if (ecap_sc_support(iommu->ecap))
1404 domain->iommu_snooping = 1;
1406 domain->iommu_snooping = 0;
1408 domain->iommu_count = 1;
1410 /* always allocate the top pgd */
1411 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1414 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1418 static void domain_exit(struct dmar_domain *domain)
1420 struct dmar_drhd_unit *drhd;
1421 struct intel_iommu *iommu;
1423 /* Domain 0 is reserved, so dont process it */
1427 domain_remove_dev_info(domain);
1429 put_iova_domain(&domain->iovad);
1432 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1434 /* free page tables */
1435 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1437 for_each_active_iommu(iommu, drhd)
1438 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1439 iommu_detach_domain(domain, iommu);
1441 free_domain_mem(domain);
1444 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1445 u8 bus, u8 devfn, int translation)
1447 struct context_entry *context;
1448 unsigned long flags;
1449 struct intel_iommu *iommu;
1450 struct dma_pte *pgd;
1452 unsigned long ndomains;
1455 struct device_domain_info *info = NULL;
1457 pr_debug("Set context mapping for %02x:%02x.%d\n",
1458 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1460 BUG_ON(!domain->pgd);
1461 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1462 translation != CONTEXT_TT_MULTI_LEVEL);
1464 iommu = device_to_iommu(segment, bus, devfn);
1468 context = device_to_context_entry(iommu, bus, devfn);
1471 spin_lock_irqsave(&iommu->lock, flags);
1472 if (context_present(context)) {
1473 spin_unlock_irqrestore(&iommu->lock, flags);
1480 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1481 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1484 /* find an available domain id for this device in iommu */
1485 ndomains = cap_ndoms(iommu->cap);
1486 num = find_first_bit(iommu->domain_ids, ndomains);
1487 for (; num < ndomains; ) {
1488 if (iommu->domains[num] == domain) {
1493 num = find_next_bit(iommu->domain_ids,
1494 cap_ndoms(iommu->cap), num+1);
1498 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1499 if (num >= ndomains) {
1500 spin_unlock_irqrestore(&iommu->lock, flags);
1501 printk(KERN_ERR "IOMMU: no free domain ids\n");
1505 set_bit(num, iommu->domain_ids);
1506 set_bit(iommu->seq_id, &domain->iommu_bmp);
1507 iommu->domains[num] = domain;
1511 /* Skip top levels of page tables for
1512 * iommu which has less agaw than default.
1514 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1515 pgd = phys_to_virt(dma_pte_addr(pgd));
1516 if (!dma_pte_present(pgd)) {
1517 spin_unlock_irqrestore(&iommu->lock, flags);
1523 context_set_domain_id(context, id);
1525 if (translation != CONTEXT_TT_PASS_THROUGH) {
1526 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1527 translation = info ? CONTEXT_TT_DEV_IOTLB :
1528 CONTEXT_TT_MULTI_LEVEL;
1531 * In pass through mode, AW must be programmed to indicate the largest
1532 * AGAW value supported by hardware. And ASR is ignored by hardware.
1534 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1535 context_set_address_width(context, iommu->msagaw);
1537 context_set_address_root(context, virt_to_phys(pgd));
1538 context_set_address_width(context, iommu->agaw);
1541 context_set_translation_type(context, translation);
1542 context_set_fault_enable(context);
1543 context_set_present(context);
1544 domain_flush_cache(domain, context, sizeof(*context));
1547 * It's a non-present to present mapping. If hardware doesn't cache
1548 * non-present entry we only need to flush the write-buffer. If the
1549 * _does_ cache non-present entries, then it does so in the special
1550 * domain #0, which we have to flush:
1552 if (cap_caching_mode(iommu->cap)) {
1553 iommu->flush.flush_context(iommu, 0,
1554 (((u16)bus) << 8) | devfn,
1555 DMA_CCMD_MASK_NOBIT,
1556 DMA_CCMD_DEVICE_INVL);
1557 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1559 iommu_flush_write_buffer(iommu);
1561 iommu_enable_dev_iotlb(info);
1562 spin_unlock_irqrestore(&iommu->lock, flags);
1564 spin_lock_irqsave(&domain->iommu_lock, flags);
1565 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1566 domain->iommu_count++;
1567 domain_update_iommu_cap(domain);
1569 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1574 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1578 struct pci_dev *tmp, *parent;
1580 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1581 pdev->bus->number, pdev->devfn,
1586 /* dependent device mapping */
1587 tmp = pci_find_upstream_pcie_bridge(pdev);
1590 /* Secondary interface's bus number and devfn 0 */
1591 parent = pdev->bus->self;
1592 while (parent != tmp) {
1593 ret = domain_context_mapping_one(domain,
1594 pci_domain_nr(parent->bus),
1595 parent->bus->number,
1596 parent->devfn, translation);
1599 parent = parent->bus->self;
1601 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1602 return domain_context_mapping_one(domain,
1603 pci_domain_nr(tmp->subordinate),
1604 tmp->subordinate->number, 0,
1606 else /* this is a legacy PCI bridge */
1607 return domain_context_mapping_one(domain,
1608 pci_domain_nr(tmp->bus),
1614 static int domain_context_mapped(struct pci_dev *pdev)
1617 struct pci_dev *tmp, *parent;
1618 struct intel_iommu *iommu;
1620 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1625 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1628 /* dependent device mapping */
1629 tmp = pci_find_upstream_pcie_bridge(pdev);
1632 /* Secondary interface's bus number and devfn 0 */
1633 parent = pdev->bus->self;
1634 while (parent != tmp) {
1635 ret = device_context_mapped(iommu, parent->bus->number,
1639 parent = parent->bus->self;
1642 return device_context_mapped(iommu, tmp->subordinate->number,
1645 return device_context_mapped(iommu, tmp->bus->number,
1649 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1650 struct scatterlist *sg, unsigned long phys_pfn,
1651 unsigned long nr_pages, int prot)
1653 struct dma_pte *first_pte = NULL, *pte = NULL;
1654 phys_addr_t uninitialized_var(pteval);
1655 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1656 unsigned long sg_res;
1658 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1660 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1663 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1668 sg_res = nr_pages + 1;
1669 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1672 while (nr_pages--) {
1676 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1677 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1678 sg->dma_length = sg->length;
1679 pteval = page_to_phys(sg_page(sg)) | prot;
1682 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1686 /* We don't need lock here, nobody else
1687 * touches the iova range
1689 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1691 static int dumps = 5;
1692 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1693 iov_pfn, tmp, (unsigned long long)pteval);
1696 debug_dma_dump_mappings(NULL);
1701 if (!nr_pages || first_pte_in_page(pte)) {
1702 domain_flush_cache(domain, first_pte,
1703 (void *)pte - (void *)first_pte);
1707 pteval += VTD_PAGE_SIZE;
1715 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1716 struct scatterlist *sg, unsigned long nr_pages,
1719 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1722 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1723 unsigned long phys_pfn, unsigned long nr_pages,
1726 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1729 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1734 clear_context_table(iommu, bus, devfn);
1735 iommu->flush.flush_context(iommu, 0, 0, 0,
1736 DMA_CCMD_GLOBAL_INVL);
1737 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1740 static void domain_remove_dev_info(struct dmar_domain *domain)
1742 struct device_domain_info *info;
1743 unsigned long flags;
1744 struct intel_iommu *iommu;
1746 spin_lock_irqsave(&device_domain_lock, flags);
1747 while (!list_empty(&domain->devices)) {
1748 info = list_entry(domain->devices.next,
1749 struct device_domain_info, link);
1750 list_del(&info->link);
1751 list_del(&info->global);
1753 info->dev->dev.archdata.iommu = NULL;
1754 spin_unlock_irqrestore(&device_domain_lock, flags);
1756 iommu_disable_dev_iotlb(info);
1757 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1758 iommu_detach_dev(iommu, info->bus, info->devfn);
1759 free_devinfo_mem(info);
1761 spin_lock_irqsave(&device_domain_lock, flags);
1763 spin_unlock_irqrestore(&device_domain_lock, flags);
1768 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1770 static struct dmar_domain *
1771 find_domain(struct pci_dev *pdev)
1773 struct device_domain_info *info;
1775 /* No lock here, assumes no domain exit in normal case */
1776 info = pdev->dev.archdata.iommu;
1778 return info->domain;
1782 /* domain is initialized */
1783 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1785 struct dmar_domain *domain, *found = NULL;
1786 struct intel_iommu *iommu;
1787 struct dmar_drhd_unit *drhd;
1788 struct device_domain_info *info, *tmp;
1789 struct pci_dev *dev_tmp;
1790 unsigned long flags;
1791 int bus = 0, devfn = 0;
1795 domain = find_domain(pdev);
1799 segment = pci_domain_nr(pdev->bus);
1801 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1803 if (dev_tmp->is_pcie) {
1804 bus = dev_tmp->subordinate->number;
1807 bus = dev_tmp->bus->number;
1808 devfn = dev_tmp->devfn;
1810 spin_lock_irqsave(&device_domain_lock, flags);
1811 list_for_each_entry(info, &device_domain_list, global) {
1812 if (info->segment == segment &&
1813 info->bus == bus && info->devfn == devfn) {
1814 found = info->domain;
1818 spin_unlock_irqrestore(&device_domain_lock, flags);
1819 /* pcie-pci bridge already has a domain, uses it */
1826 domain = alloc_domain();
1830 /* Allocate new domain for the device */
1831 drhd = dmar_find_matched_drhd_unit(pdev);
1833 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1837 iommu = drhd->iommu;
1839 ret = iommu_attach_domain(domain, iommu);
1841 domain_exit(domain);
1845 if (domain_init(domain, gaw)) {
1846 domain_exit(domain);
1850 /* register pcie-to-pci device */
1852 info = alloc_devinfo_mem();
1854 domain_exit(domain);
1857 info->segment = segment;
1859 info->devfn = devfn;
1861 info->domain = domain;
1862 /* This domain is shared by devices under p2p bridge */
1863 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1865 /* pcie-to-pci bridge already has a domain, uses it */
1867 spin_lock_irqsave(&device_domain_lock, flags);
1868 list_for_each_entry(tmp, &device_domain_list, global) {
1869 if (tmp->segment == segment &&
1870 tmp->bus == bus && tmp->devfn == devfn) {
1871 found = tmp->domain;
1876 free_devinfo_mem(info);
1877 domain_exit(domain);
1880 list_add(&info->link, &domain->devices);
1881 list_add(&info->global, &device_domain_list);
1883 spin_unlock_irqrestore(&device_domain_lock, flags);
1887 info = alloc_devinfo_mem();
1890 info->segment = segment;
1891 info->bus = pdev->bus->number;
1892 info->devfn = pdev->devfn;
1894 info->domain = domain;
1895 spin_lock_irqsave(&device_domain_lock, flags);
1896 /* somebody is fast */
1897 found = find_domain(pdev);
1898 if (found != NULL) {
1899 spin_unlock_irqrestore(&device_domain_lock, flags);
1900 if (found != domain) {
1901 domain_exit(domain);
1904 free_devinfo_mem(info);
1907 list_add(&info->link, &domain->devices);
1908 list_add(&info->global, &device_domain_list);
1909 pdev->dev.archdata.iommu = info;
1910 spin_unlock_irqrestore(&device_domain_lock, flags);
1913 /* recheck it here, maybe others set it */
1914 return find_domain(pdev);
1917 static int iommu_identity_mapping;
1919 static int iommu_domain_identity_map(struct dmar_domain *domain,
1920 unsigned long long start,
1921 unsigned long long end)
1923 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1924 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1926 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1927 dma_to_mm_pfn(last_vpfn))) {
1928 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1932 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1933 start, end, domain->id);
1935 * RMRR range might have overlap with physical memory range,
1938 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1940 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1941 last_vpfn - first_vpfn + 1,
1942 DMA_PTE_READ|DMA_PTE_WRITE);
1945 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1946 unsigned long long start,
1947 unsigned long long end)
1949 struct dmar_domain *domain;
1953 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1954 pci_name(pdev), start, end);
1956 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1960 ret = iommu_domain_identity_map(domain, start, end);
1964 /* context entry init */
1965 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1972 domain_exit(domain);
1976 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1977 struct pci_dev *pdev)
1979 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1981 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1982 rmrr->end_address + 1);
1985 #ifdef CONFIG_DMAR_FLOPPY_WA
1986 static inline void iommu_prepare_isa(void)
1988 struct pci_dev *pdev;
1991 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1995 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1996 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1999 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2000 "floppy might not work\n");
2004 static inline void iommu_prepare_isa(void)
2008 #endif /* !CONFIG_DMAR_FLPY_WA */
2010 /* Initialize each context entry as pass through.*/
2011 static int __init init_context_pass_through(void)
2013 struct pci_dev *pdev = NULL;
2014 struct dmar_domain *domain;
2017 for_each_pci_dev(pdev) {
2018 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2019 ret = domain_context_mapping(domain, pdev,
2020 CONTEXT_TT_PASS_THROUGH);
2027 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2029 static int __init si_domain_work_fn(unsigned long start_pfn,
2030 unsigned long end_pfn, void *datax)
2034 *ret = iommu_domain_identity_map(si_domain,
2035 (uint64_t)start_pfn << PAGE_SHIFT,
2036 (uint64_t)end_pfn << PAGE_SHIFT);
2041 static int si_domain_init(void)
2043 struct dmar_drhd_unit *drhd;
2044 struct intel_iommu *iommu;
2047 si_domain = alloc_domain();
2051 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2053 for_each_active_iommu(iommu, drhd) {
2054 ret = iommu_attach_domain(si_domain, iommu);
2056 domain_exit(si_domain);
2061 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2062 domain_exit(si_domain);
2066 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2068 for_each_online_node(nid) {
2069 work_with_active_regions(nid, si_domain_work_fn, &ret);
2077 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2078 struct pci_dev *pdev);
2079 static int identity_mapping(struct pci_dev *pdev)
2081 struct device_domain_info *info;
2083 if (likely(!iommu_identity_mapping))
2087 list_for_each_entry(info, &si_domain->devices, link)
2088 if (info->dev == pdev)
2093 static int domain_add_dev_info(struct dmar_domain *domain,
2094 struct pci_dev *pdev)
2096 struct device_domain_info *info;
2097 unsigned long flags;
2099 info = alloc_devinfo_mem();
2103 info->segment = pci_domain_nr(pdev->bus);
2104 info->bus = pdev->bus->number;
2105 info->devfn = pdev->devfn;
2107 info->domain = domain;
2109 spin_lock_irqsave(&device_domain_lock, flags);
2110 list_add(&info->link, &domain->devices);
2111 list_add(&info->global, &device_domain_list);
2112 pdev->dev.archdata.iommu = info;
2113 spin_unlock_irqrestore(&device_domain_lock, flags);
2118 static int iommu_prepare_static_identity_mapping(void)
2120 struct pci_dev *pdev = NULL;
2123 ret = si_domain_init();
2127 for_each_pci_dev(pdev) {
2128 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2131 ret = domain_context_mapping(si_domain, pdev,
2132 CONTEXT_TT_MULTI_LEVEL);
2135 ret = domain_add_dev_info(si_domain, pdev);
2143 int __init init_dmars(void)
2145 struct dmar_drhd_unit *drhd;
2146 struct dmar_rmrr_unit *rmrr;
2147 struct pci_dev *pdev;
2148 struct intel_iommu *iommu;
2150 int pass_through = 1;
2153 * In case pass through can not be enabled, iommu tries to use identity
2156 if (iommu_pass_through)
2157 iommu_identity_mapping = 1;
2162 * initialize and program root entry to not present
2165 for_each_drhd_unit(drhd) {
2168 * lock not needed as this is only incremented in the single
2169 * threaded kernel __init code path all other access are read
2174 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2177 printk(KERN_ERR "Allocating global iommu array failed\n");
2182 deferred_flush = kzalloc(g_num_of_iommus *
2183 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2184 if (!deferred_flush) {
2190 for_each_drhd_unit(drhd) {
2194 iommu = drhd->iommu;
2195 g_iommus[iommu->seq_id] = iommu;
2197 ret = iommu_init_domains(iommu);
2203 * we could share the same root & context tables
2204 * amoung all IOMMU's. Need to Split it later.
2206 ret = iommu_alloc_root_entry(iommu);
2208 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2211 if (!ecap_pass_through(iommu->ecap))
2214 if (iommu_pass_through)
2215 if (!pass_through) {
2217 "Pass Through is not supported by hardware.\n");
2218 iommu_pass_through = 0;
2222 * Start from the sane iommu hardware state.
2224 for_each_drhd_unit(drhd) {
2228 iommu = drhd->iommu;
2231 * If the queued invalidation is already initialized by us
2232 * (for example, while enabling interrupt-remapping) then
2233 * we got the things already rolling from a sane state.
2239 * Clear any previous faults.
2241 dmar_fault(-1, iommu);
2243 * Disable queued invalidation if supported and already enabled
2244 * before OS handover.
2246 dmar_disable_qi(iommu);
2249 for_each_drhd_unit(drhd) {
2253 iommu = drhd->iommu;
2255 if (dmar_enable_qi(iommu)) {
2257 * Queued Invalidate not enabled, use Register Based
2260 iommu->flush.flush_context = __iommu_flush_context;
2261 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2262 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2264 (unsigned long long)drhd->reg_base_addr);
2266 iommu->flush.flush_context = qi_flush_context;
2267 iommu->flush.flush_iotlb = qi_flush_iotlb;
2268 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2270 (unsigned long long)drhd->reg_base_addr);
2275 * If pass through is set and enabled, context entries of all pci
2276 * devices are intialized by pass through translation type.
2278 if (iommu_pass_through) {
2279 ret = init_context_pass_through();
2281 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2282 iommu_pass_through = 0;
2287 * If pass through is not set or not enabled, setup context entries for
2288 * identity mappings for rmrr, gfx, and isa and may fall back to static
2289 * identity mapping if iommu_identity_mapping is set.
2291 if (!iommu_pass_through) {
2292 if (iommu_identity_mapping)
2293 iommu_prepare_static_identity_mapping();
2296 * for each dev attached to rmrr
2298 * locate drhd for dev, alloc domain for dev
2299 * allocate free domain
2300 * allocate page table entries for rmrr
2301 * if context not allocated for bus
2302 * allocate and init context
2303 * set present in root table for this bus
2304 * init context with domain, translation etc
2308 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2309 for_each_rmrr_units(rmrr) {
2310 for (i = 0; i < rmrr->devices_cnt; i++) {
2311 pdev = rmrr->devices[i];
2313 * some BIOS lists non-exist devices in DMAR
2318 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2321 "IOMMU: mapping reserved region failed\n");
2325 iommu_prepare_isa();
2331 * global invalidate context cache
2332 * global invalidate iotlb
2333 * enable translation
2335 for_each_drhd_unit(drhd) {
2338 iommu = drhd->iommu;
2340 iommu_flush_write_buffer(iommu);
2342 ret = dmar_set_interrupt(iommu);
2346 iommu_set_root_entry(iommu);
2348 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2349 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2350 iommu_disable_protect_mem_regions(iommu);
2352 ret = iommu_enable_translation(iommu);
2359 for_each_drhd_unit(drhd) {
2362 iommu = drhd->iommu;
2369 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2372 host_addr &= ~PAGE_MASK;
2373 host_addr += size + PAGE_SIZE - 1;
2375 return host_addr >> VTD_PAGE_SHIFT;
2378 static struct iova *intel_alloc_iova(struct device *dev,
2379 struct dmar_domain *domain,
2380 unsigned long nrpages, uint64_t dma_mask)
2382 struct pci_dev *pdev = to_pci_dev(dev);
2383 struct iova *iova = NULL;
2385 /* Restrict dma_mask to the width that the iommu can handle */
2386 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2388 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2390 * First try to allocate an io virtual address in
2391 * DMA_BIT_MASK(32) and if that fails then try allocating
2394 iova = alloc_iova(&domain->iovad, nrpages,
2395 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2399 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2400 if (unlikely(!iova)) {
2401 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2402 nrpages, pci_name(pdev));
2409 static struct dmar_domain *
2410 get_valid_domain_for_dev(struct pci_dev *pdev)
2412 struct dmar_domain *domain;
2415 domain = get_domain_for_dev(pdev,
2416 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2419 "Allocating domain for %s failed", pci_name(pdev));
2423 /* make sure context mapping is ok */
2424 if (unlikely(!domain_context_mapped(pdev))) {
2425 ret = domain_context_mapping(domain, pdev,
2426 CONTEXT_TT_MULTI_LEVEL);
2429 "Domain context map for %s failed",
2438 static int iommu_dummy(struct pci_dev *pdev)
2440 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2443 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2444 static int iommu_no_mapping(struct pci_dev *pdev)
2448 if (!iommu_identity_mapping)
2449 return iommu_dummy(pdev);
2451 found = identity_mapping(pdev);
2453 if (pdev->dma_mask > DMA_BIT_MASK(32))
2457 * 32 bit DMA is removed from si_domain and fall back
2458 * to non-identity mapping.
2460 domain_remove_one_dev_info(si_domain, pdev);
2461 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2467 * In case of a detached 64 bit DMA device from vm, the device
2468 * is put into si_domain for identity mapping.
2470 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2472 ret = domain_add_dev_info(si_domain, pdev);
2474 printk(KERN_INFO "64bit %s uses identity mapping\n",
2481 return iommu_dummy(pdev);
2484 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2485 size_t size, int dir, u64 dma_mask)
2487 struct pci_dev *pdev = to_pci_dev(hwdev);
2488 struct dmar_domain *domain;
2489 phys_addr_t start_paddr;
2493 struct intel_iommu *iommu;
2495 BUG_ON(dir == DMA_NONE);
2497 if (iommu_no_mapping(pdev))
2500 domain = get_valid_domain_for_dev(pdev);
2504 iommu = domain_get_iommu(domain);
2505 size = aligned_nrpages(paddr, size);
2507 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2512 * Check if DMAR supports zero-length reads on write only
2515 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2516 !cap_zlr(iommu->cap))
2517 prot |= DMA_PTE_READ;
2518 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2519 prot |= DMA_PTE_WRITE;
2521 * paddr - (paddr + size) might be partial page, we should map the whole
2522 * page. Note: if two part of one page are separately mapped, we
2523 * might have two guest_addr mapping to the same host paddr, but this
2524 * is not a big problem
2526 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2527 paddr >> VTD_PAGE_SHIFT, size, prot);
2531 /* it's a non-present to present mapping. Only flush if caching mode */
2532 if (cap_caching_mode(iommu->cap))
2533 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2535 iommu_flush_write_buffer(iommu);
2537 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2538 start_paddr += paddr & ~PAGE_MASK;
2543 __free_iova(&domain->iovad, iova);
2544 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2545 pci_name(pdev), size, (unsigned long long)paddr, dir);
2549 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2550 unsigned long offset, size_t size,
2551 enum dma_data_direction dir,
2552 struct dma_attrs *attrs)
2554 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2555 dir, to_pci_dev(dev)->dma_mask);
2558 static void flush_unmaps(void)
2564 /* just flush them all */
2565 for (i = 0; i < g_num_of_iommus; i++) {
2566 struct intel_iommu *iommu = g_iommus[i];
2570 if (!deferred_flush[i].next)
2573 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2574 DMA_TLB_GLOBAL_FLUSH);
2575 for (j = 0; j < deferred_flush[i].next; j++) {
2577 struct iova *iova = deferred_flush[i].iova[j];
2579 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2580 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2581 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2582 iova->pfn_lo << PAGE_SHIFT, mask);
2583 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2585 deferred_flush[i].next = 0;
2591 static void flush_unmaps_timeout(unsigned long data)
2593 unsigned long flags;
2595 spin_lock_irqsave(&async_umap_flush_lock, flags);
2597 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2600 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2602 unsigned long flags;
2604 struct intel_iommu *iommu;
2606 spin_lock_irqsave(&async_umap_flush_lock, flags);
2607 if (list_size == HIGH_WATER_MARK)
2610 iommu = domain_get_iommu(dom);
2611 iommu_id = iommu->seq_id;
2613 next = deferred_flush[iommu_id].next;
2614 deferred_flush[iommu_id].domain[next] = dom;
2615 deferred_flush[iommu_id].iova[next] = iova;
2616 deferred_flush[iommu_id].next++;
2619 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2623 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2626 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2627 size_t size, enum dma_data_direction dir,
2628 struct dma_attrs *attrs)
2630 struct pci_dev *pdev = to_pci_dev(dev);
2631 struct dmar_domain *domain;
2632 unsigned long start_pfn, last_pfn;
2634 struct intel_iommu *iommu;
2636 if (iommu_no_mapping(pdev))
2639 domain = find_domain(pdev);
2642 iommu = domain_get_iommu(domain);
2644 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2645 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2646 (unsigned long long)dev_addr))
2649 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2650 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2652 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2653 pci_name(pdev), start_pfn, last_pfn);
2655 /* clear the whole page */
2656 dma_pte_clear_range(domain, start_pfn, last_pfn);
2658 /* free page tables */
2659 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2661 if (intel_iommu_strict) {
2662 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2663 last_pfn - start_pfn + 1);
2665 __free_iova(&domain->iovad, iova);
2667 add_unmap(domain, iova);
2669 * queue up the release of the unmap to save the 1/6th of the
2670 * cpu used up by the iotlb flush operation...
2675 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2678 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2681 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2682 dma_addr_t *dma_handle, gfp_t flags)
2687 size = PAGE_ALIGN(size);
2688 order = get_order(size);
2689 flags &= ~(GFP_DMA | GFP_DMA32);
2691 vaddr = (void *)__get_free_pages(flags, order);
2694 memset(vaddr, 0, size);
2696 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2698 hwdev->coherent_dma_mask);
2701 free_pages((unsigned long)vaddr, order);
2705 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2706 dma_addr_t dma_handle)
2710 size = PAGE_ALIGN(size);
2711 order = get_order(size);
2713 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2714 free_pages((unsigned long)vaddr, order);
2717 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2718 int nelems, enum dma_data_direction dir,
2719 struct dma_attrs *attrs)
2721 struct pci_dev *pdev = to_pci_dev(hwdev);
2722 struct dmar_domain *domain;
2723 unsigned long start_pfn, last_pfn;
2725 struct intel_iommu *iommu;
2727 if (iommu_no_mapping(pdev))
2730 domain = find_domain(pdev);
2733 iommu = domain_get_iommu(domain);
2735 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2736 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2737 (unsigned long long)sglist[0].dma_address))
2740 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2741 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2743 /* clear the whole page */
2744 dma_pte_clear_range(domain, start_pfn, last_pfn);
2746 /* free page tables */
2747 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2749 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2750 (last_pfn - start_pfn + 1));
2753 __free_iova(&domain->iovad, iova);
2756 static int intel_nontranslate_map_sg(struct device *hddev,
2757 struct scatterlist *sglist, int nelems, int dir)
2760 struct scatterlist *sg;
2762 for_each_sg(sglist, sg, nelems, i) {
2763 BUG_ON(!sg_page(sg));
2764 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2765 sg->dma_length = sg->length;
2770 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2771 enum dma_data_direction dir, struct dma_attrs *attrs)
2774 struct pci_dev *pdev = to_pci_dev(hwdev);
2775 struct dmar_domain *domain;
2778 size_t offset_pfn = 0;
2779 struct iova *iova = NULL;
2781 struct scatterlist *sg;
2782 unsigned long start_vpfn;
2783 struct intel_iommu *iommu;
2785 BUG_ON(dir == DMA_NONE);
2786 if (iommu_no_mapping(pdev))
2787 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2789 domain = get_valid_domain_for_dev(pdev);
2793 iommu = domain_get_iommu(domain);
2795 for_each_sg(sglist, sg, nelems, i)
2796 size += aligned_nrpages(sg->offset, sg->length);
2798 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2800 sglist->dma_length = 0;
2805 * Check if DMAR supports zero-length reads on write only
2808 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2809 !cap_zlr(iommu->cap))
2810 prot |= DMA_PTE_READ;
2811 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2812 prot |= DMA_PTE_WRITE;
2814 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2816 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2817 if (unlikely(ret)) {
2818 /* clear the page */
2819 dma_pte_clear_range(domain, start_vpfn,
2820 start_vpfn + size - 1);
2821 /* free page tables */
2822 dma_pte_free_pagetable(domain, start_vpfn,
2823 start_vpfn + size - 1);
2825 __free_iova(&domain->iovad, iova);
2829 /* it's a non-present to present mapping. Only flush if caching mode */
2830 if (cap_caching_mode(iommu->cap))
2831 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2833 iommu_flush_write_buffer(iommu);
2838 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2843 struct dma_map_ops intel_dma_ops = {
2844 .alloc_coherent = intel_alloc_coherent,
2845 .free_coherent = intel_free_coherent,
2846 .map_sg = intel_map_sg,
2847 .unmap_sg = intel_unmap_sg,
2848 .map_page = intel_map_page,
2849 .unmap_page = intel_unmap_page,
2850 .mapping_error = intel_mapping_error,
2853 static inline int iommu_domain_cache_init(void)
2857 iommu_domain_cache = kmem_cache_create("iommu_domain",
2858 sizeof(struct dmar_domain),
2863 if (!iommu_domain_cache) {
2864 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2871 static inline int iommu_devinfo_cache_init(void)
2875 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2876 sizeof(struct device_domain_info),
2880 if (!iommu_devinfo_cache) {
2881 printk(KERN_ERR "Couldn't create devinfo cache\n");
2888 static inline int iommu_iova_cache_init(void)
2892 iommu_iova_cache = kmem_cache_create("iommu_iova",
2893 sizeof(struct iova),
2897 if (!iommu_iova_cache) {
2898 printk(KERN_ERR "Couldn't create iova cache\n");
2905 static int __init iommu_init_mempool(void)
2908 ret = iommu_iova_cache_init();
2912 ret = iommu_domain_cache_init();
2916 ret = iommu_devinfo_cache_init();
2920 kmem_cache_destroy(iommu_domain_cache);
2922 kmem_cache_destroy(iommu_iova_cache);
2927 static void __init iommu_exit_mempool(void)
2929 kmem_cache_destroy(iommu_devinfo_cache);
2930 kmem_cache_destroy(iommu_domain_cache);
2931 kmem_cache_destroy(iommu_iova_cache);
2935 static void __init init_no_remapping_devices(void)
2937 struct dmar_drhd_unit *drhd;
2939 for_each_drhd_unit(drhd) {
2940 if (!drhd->include_all) {
2942 for (i = 0; i < drhd->devices_cnt; i++)
2943 if (drhd->devices[i] != NULL)
2945 /* ignore DMAR unit if no pci devices exist */
2946 if (i == drhd->devices_cnt)
2954 for_each_drhd_unit(drhd) {
2956 if (drhd->ignored || drhd->include_all)
2959 for (i = 0; i < drhd->devices_cnt; i++)
2960 if (drhd->devices[i] &&
2961 !IS_GFX_DEVICE(drhd->devices[i]))
2964 if (i < drhd->devices_cnt)
2967 /* bypass IOMMU if it is just for gfx devices */
2969 for (i = 0; i < drhd->devices_cnt; i++) {
2970 if (!drhd->devices[i])
2972 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2977 #ifdef CONFIG_SUSPEND
2978 static int init_iommu_hw(void)
2980 struct dmar_drhd_unit *drhd;
2981 struct intel_iommu *iommu = NULL;
2983 for_each_active_iommu(iommu, drhd)
2985 dmar_reenable_qi(iommu);
2987 for_each_active_iommu(iommu, drhd) {
2988 iommu_flush_write_buffer(iommu);
2990 iommu_set_root_entry(iommu);
2992 iommu->flush.flush_context(iommu, 0, 0, 0,
2993 DMA_CCMD_GLOBAL_INVL);
2994 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2995 DMA_TLB_GLOBAL_FLUSH);
2996 iommu_disable_protect_mem_regions(iommu);
2997 iommu_enable_translation(iommu);
3003 static void iommu_flush_all(void)
3005 struct dmar_drhd_unit *drhd;
3006 struct intel_iommu *iommu;
3008 for_each_active_iommu(iommu, drhd) {
3009 iommu->flush.flush_context(iommu, 0, 0, 0,
3010 DMA_CCMD_GLOBAL_INVL);
3011 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3012 DMA_TLB_GLOBAL_FLUSH);
3016 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3018 struct dmar_drhd_unit *drhd;
3019 struct intel_iommu *iommu = NULL;
3022 for_each_active_iommu(iommu, drhd) {
3023 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3025 if (!iommu->iommu_state)
3031 for_each_active_iommu(iommu, drhd) {
3032 iommu_disable_translation(iommu);
3034 spin_lock_irqsave(&iommu->register_lock, flag);
3036 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3037 readl(iommu->reg + DMAR_FECTL_REG);
3038 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3039 readl(iommu->reg + DMAR_FEDATA_REG);
3040 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3041 readl(iommu->reg + DMAR_FEADDR_REG);
3042 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3043 readl(iommu->reg + DMAR_FEUADDR_REG);
3045 spin_unlock_irqrestore(&iommu->register_lock, flag);
3050 for_each_active_iommu(iommu, drhd)
3051 kfree(iommu->iommu_state);
3056 static int iommu_resume(struct sys_device *dev)
3058 struct dmar_drhd_unit *drhd;
3059 struct intel_iommu *iommu = NULL;
3062 if (init_iommu_hw()) {
3063 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3067 for_each_active_iommu(iommu, drhd) {
3069 spin_lock_irqsave(&iommu->register_lock, flag);
3071 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3072 iommu->reg + DMAR_FECTL_REG);
3073 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3074 iommu->reg + DMAR_FEDATA_REG);
3075 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3076 iommu->reg + DMAR_FEADDR_REG);
3077 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3078 iommu->reg + DMAR_FEUADDR_REG);
3080 spin_unlock_irqrestore(&iommu->register_lock, flag);
3083 for_each_active_iommu(iommu, drhd)
3084 kfree(iommu->iommu_state);
3089 static struct sysdev_class iommu_sysclass = {
3091 .resume = iommu_resume,
3092 .suspend = iommu_suspend,
3095 static struct sys_device device_iommu = {
3096 .cls = &iommu_sysclass,
3099 static int __init init_iommu_sysfs(void)
3103 error = sysdev_class_register(&iommu_sysclass);
3107 error = sysdev_register(&device_iommu);
3109 sysdev_class_unregister(&iommu_sysclass);
3115 static int __init init_iommu_sysfs(void)
3119 #endif /* CONFIG_PM */
3121 int __init intel_iommu_init(void)
3125 if (dmar_table_init())
3128 if (dmar_dev_scope_init())
3132 * Check the need for DMA-remapping initialization now.
3133 * Above initialization will also be used by Interrupt-remapping.
3135 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
3138 iommu_init_mempool();
3139 dmar_init_reserved_ranges();
3141 init_no_remapping_devices();
3145 printk(KERN_ERR "IOMMU: dmar init failed\n");
3146 put_iova_domain(&reserved_iova_list);
3147 iommu_exit_mempool();
3151 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3153 init_timer(&unmap_timer);
3156 if (!iommu_pass_through) {
3158 "Multi-level page-table translation for DMAR.\n");
3159 dma_ops = &intel_dma_ops;
3162 "DMAR: Pass through translation for DMAR.\n");
3166 register_iommu(&intel_iommu_ops);
3171 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3172 struct pci_dev *pdev)
3174 struct pci_dev *tmp, *parent;
3176 if (!iommu || !pdev)
3179 /* dependent device detach */
3180 tmp = pci_find_upstream_pcie_bridge(pdev);
3181 /* Secondary interface's bus number and devfn 0 */
3183 parent = pdev->bus->self;
3184 while (parent != tmp) {
3185 iommu_detach_dev(iommu, parent->bus->number,
3187 parent = parent->bus->self;
3189 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3190 iommu_detach_dev(iommu,
3191 tmp->subordinate->number, 0);
3192 else /* this is a legacy PCI bridge */
3193 iommu_detach_dev(iommu, tmp->bus->number,
3198 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3199 struct pci_dev *pdev)
3201 struct device_domain_info *info;
3202 struct intel_iommu *iommu;
3203 unsigned long flags;
3205 struct list_head *entry, *tmp;
3207 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3212 spin_lock_irqsave(&device_domain_lock, flags);
3213 list_for_each_safe(entry, tmp, &domain->devices) {
3214 info = list_entry(entry, struct device_domain_info, link);
3215 /* No need to compare PCI domain; it has to be the same */
3216 if (info->bus == pdev->bus->number &&
3217 info->devfn == pdev->devfn) {
3218 list_del(&info->link);
3219 list_del(&info->global);
3221 info->dev->dev.archdata.iommu = NULL;
3222 spin_unlock_irqrestore(&device_domain_lock, flags);
3224 iommu_disable_dev_iotlb(info);
3225 iommu_detach_dev(iommu, info->bus, info->devfn);
3226 iommu_detach_dependent_devices(iommu, pdev);
3227 free_devinfo_mem(info);
3229 spin_lock_irqsave(&device_domain_lock, flags);
3237 /* if there is no other devices under the same iommu
3238 * owned by this domain, clear this iommu in iommu_bmp
3239 * update iommu count and coherency
3241 if (iommu == device_to_iommu(info->segment, info->bus,
3247 unsigned long tmp_flags;
3248 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3249 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3250 domain->iommu_count--;
3251 domain_update_iommu_cap(domain);
3252 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3255 spin_unlock_irqrestore(&device_domain_lock, flags);
3258 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3260 struct device_domain_info *info;
3261 struct intel_iommu *iommu;
3262 unsigned long flags1, flags2;
3264 spin_lock_irqsave(&device_domain_lock, flags1);
3265 while (!list_empty(&domain->devices)) {
3266 info = list_entry(domain->devices.next,
3267 struct device_domain_info, link);
3268 list_del(&info->link);
3269 list_del(&info->global);
3271 info->dev->dev.archdata.iommu = NULL;
3273 spin_unlock_irqrestore(&device_domain_lock, flags1);
3275 iommu_disable_dev_iotlb(info);
3276 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3277 iommu_detach_dev(iommu, info->bus, info->devfn);
3278 iommu_detach_dependent_devices(iommu, info->dev);
3280 /* clear this iommu in iommu_bmp, update iommu count
3283 spin_lock_irqsave(&domain->iommu_lock, flags2);
3284 if (test_and_clear_bit(iommu->seq_id,
3285 &domain->iommu_bmp)) {
3286 domain->iommu_count--;
3287 domain_update_iommu_cap(domain);
3289 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3291 free_devinfo_mem(info);
3292 spin_lock_irqsave(&device_domain_lock, flags1);
3294 spin_unlock_irqrestore(&device_domain_lock, flags1);
3297 /* domain id for virtual machine, it won't be set in context */
3298 static unsigned long vm_domid;
3300 static int vm_domain_min_agaw(struct dmar_domain *domain)
3303 int min_agaw = domain->agaw;
3305 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3306 for (; i < g_num_of_iommus; ) {
3307 if (min_agaw > g_iommus[i]->agaw)
3308 min_agaw = g_iommus[i]->agaw;
3310 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3316 static struct dmar_domain *iommu_alloc_vm_domain(void)
3318 struct dmar_domain *domain;
3320 domain = alloc_domain_mem();
3324 domain->id = vm_domid++;
3325 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3326 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3331 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3335 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3336 spin_lock_init(&domain->iommu_lock);
3338 domain_reserve_special_ranges(domain);
3340 /* calculate AGAW */
3341 domain->gaw = guest_width;
3342 adjust_width = guestwidth_to_adjustwidth(guest_width);
3343 domain->agaw = width_to_agaw(adjust_width);
3345 INIT_LIST_HEAD(&domain->devices);
3347 domain->iommu_count = 0;
3348 domain->iommu_coherency = 0;
3349 domain->max_addr = 0;
3351 /* always allocate the top pgd */
3352 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3355 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3359 static void iommu_free_vm_domain(struct dmar_domain *domain)
3361 unsigned long flags;
3362 struct dmar_drhd_unit *drhd;
3363 struct intel_iommu *iommu;
3365 unsigned long ndomains;
3367 for_each_drhd_unit(drhd) {
3370 iommu = drhd->iommu;
3372 ndomains = cap_ndoms(iommu->cap);
3373 i = find_first_bit(iommu->domain_ids, ndomains);
3374 for (; i < ndomains; ) {
3375 if (iommu->domains[i] == domain) {
3376 spin_lock_irqsave(&iommu->lock, flags);
3377 clear_bit(i, iommu->domain_ids);
3378 iommu->domains[i] = NULL;
3379 spin_unlock_irqrestore(&iommu->lock, flags);
3382 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3387 static void vm_domain_exit(struct dmar_domain *domain)
3389 /* Domain 0 is reserved, so dont process it */
3393 vm_domain_remove_all_dev_info(domain);
3395 put_iova_domain(&domain->iovad);
3398 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3400 /* free page tables */
3401 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3403 iommu_free_vm_domain(domain);
3404 free_domain_mem(domain);
3407 static int intel_iommu_domain_init(struct iommu_domain *domain)
3409 struct dmar_domain *dmar_domain;
3411 dmar_domain = iommu_alloc_vm_domain();
3414 "intel_iommu_domain_init: dmar_domain == NULL\n");
3417 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3419 "intel_iommu_domain_init() failed\n");
3420 vm_domain_exit(dmar_domain);
3423 domain->priv = dmar_domain;
3428 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3430 struct dmar_domain *dmar_domain = domain->priv;
3432 domain->priv = NULL;
3433 vm_domain_exit(dmar_domain);
3436 static int intel_iommu_attach_device(struct iommu_domain *domain,
3439 struct dmar_domain *dmar_domain = domain->priv;
3440 struct pci_dev *pdev = to_pci_dev(dev);
3441 struct intel_iommu *iommu;
3446 /* normally pdev is not mapped */
3447 if (unlikely(domain_context_mapped(pdev))) {
3448 struct dmar_domain *old_domain;
3450 old_domain = find_domain(pdev);
3452 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3453 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3454 domain_remove_one_dev_info(old_domain, pdev);
3456 domain_remove_dev_info(old_domain);
3460 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3465 /* check if this iommu agaw is sufficient for max mapped address */
3466 addr_width = agaw_to_width(iommu->agaw);
3467 end = DOMAIN_MAX_ADDR(addr_width);
3468 end = end & VTD_PAGE_MASK;
3469 if (end < dmar_domain->max_addr) {
3470 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3471 "sufficient for the mapped address (%llx)\n",
3472 __func__, iommu->agaw, dmar_domain->max_addr);
3476 ret = domain_add_dev_info(dmar_domain, pdev);
3480 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3484 static void intel_iommu_detach_device(struct iommu_domain *domain,
3487 struct dmar_domain *dmar_domain = domain->priv;
3488 struct pci_dev *pdev = to_pci_dev(dev);
3490 domain_remove_one_dev_info(dmar_domain, pdev);
3493 static int intel_iommu_map_range(struct iommu_domain *domain,
3494 unsigned long iova, phys_addr_t hpa,
3495 size_t size, int iommu_prot)
3497 struct dmar_domain *dmar_domain = domain->priv;
3503 if (iommu_prot & IOMMU_READ)
3504 prot |= DMA_PTE_READ;
3505 if (iommu_prot & IOMMU_WRITE)
3506 prot |= DMA_PTE_WRITE;
3507 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3508 prot |= DMA_PTE_SNP;
3510 max_addr = iova + size;
3511 if (dmar_domain->max_addr < max_addr) {
3515 /* check if minimum agaw is sufficient for mapped address */
3516 min_agaw = vm_domain_min_agaw(dmar_domain);
3517 addr_width = agaw_to_width(min_agaw);
3518 end = DOMAIN_MAX_ADDR(addr_width);
3519 end = end & VTD_PAGE_MASK;
3520 if (end < max_addr) {
3521 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3522 "sufficient for the mapped address (%llx)\n",
3523 __func__, min_agaw, max_addr);
3526 dmar_domain->max_addr = max_addr;
3528 /* Round up size to next multiple of PAGE_SIZE, if it and
3529 the low bits of hpa would take us onto the next page */
3530 size = aligned_nrpages(hpa, size);
3531 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3532 hpa >> VTD_PAGE_SHIFT, size, prot);
3536 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3537 unsigned long iova, size_t size)
3539 struct dmar_domain *dmar_domain = domain->priv;
3541 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3542 (iova + size - 1) >> VTD_PAGE_SHIFT);
3544 if (dmar_domain->max_addr == iova + size)
3545 dmar_domain->max_addr = iova;
3548 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3551 struct dmar_domain *dmar_domain = domain->priv;
3552 struct dma_pte *pte;
3555 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3557 phys = dma_pte_addr(pte);
3562 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3565 struct dmar_domain *dmar_domain = domain->priv;
3567 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3568 return dmar_domain->iommu_snooping;
3573 static struct iommu_ops intel_iommu_ops = {
3574 .domain_init = intel_iommu_domain_init,
3575 .domain_destroy = intel_iommu_domain_destroy,
3576 .attach_dev = intel_iommu_attach_device,
3577 .detach_dev = intel_iommu_detach_device,
3578 .map = intel_iommu_map_range,
3579 .unmap = intel_iommu_unmap_range,
3580 .iova_to_phys = intel_iommu_iova_to_phys,
3581 .domain_has_cap = intel_iommu_domain_has_cap,
3584 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3587 * Mobile 4 Series Chipset neglects to set RWBF capability,
3590 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3594 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);