2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
51 #include "irq_remapping.h"
53 #define ROOT_SIZE VTD_PAGE_SIZE
54 #define CONTEXT_SIZE VTD_PAGE_SIZE
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
61 #define IOAPIC_RANGE_START (0xfee00000)
62 #define IOAPIC_RANGE_END (0xfeefffff)
63 #define IOVA_START_ADDR (0x1000)
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
70 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN (1)
82 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
86 /* page table handling */
87 #define LEVEL_STRIDE (9)
88 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
106 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108 static inline int agaw_to_level(int agaw)
113 static inline int agaw_to_width(int agaw)
115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 static inline int width_to_agaw(int width)
120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 static inline unsigned int level_to_offset_bits(int level)
125 return (level - 1) * LEVEL_STRIDE;
128 static inline int pfn_level_offset(unsigned long pfn, int level)
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 static inline unsigned long level_mask(int level)
135 return -1UL << level_to_offset_bits(level);
138 static inline unsigned long level_size(int level)
140 return 1UL << level_to_offset_bits(level);
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
145 return (pfn + level_size(level) - 1) & level_mask(level);
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
166 return mm_to_dma_pfn(page_to_pfn(pg));
168 static inline unsigned long virt_to_dma_pfn(void *p)
170 return page_to_dma_pfn(virt_to_page(p));
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
183 static int force_on = 0;
188 * 12-63: Context Ptr (12 - (haw-1))
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
206 return re->lo & VTD_PAGE_MASK;
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
218 return re->hi & VTD_PAGE_MASK;
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
231 struct context_entry {
236 static inline void context_clear_pasid_enable(struct context_entry *context)
238 context->lo &= ~(1ULL << 11);
241 static inline bool context_pasid_enabled(struct context_entry *context)
243 return !!(context->lo & (1ULL << 11));
246 static inline void context_set_copied(struct context_entry *context)
248 context->hi |= (1ull << 3);
251 static inline bool context_copied(struct context_entry *context)
253 return !!(context->hi & (1ULL << 3));
256 static inline bool __context_present(struct context_entry *context)
258 return (context->lo & 1);
261 static inline bool context_present(struct context_entry *context)
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
268 static inline void context_set_present(struct context_entry *context)
273 static inline void context_set_fault_enable(struct context_entry *context)
275 context->lo &= (((u64)-1) << 2) | 1;
278 static inline void context_set_translation_type(struct context_entry *context,
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
285 static inline void context_set_address_root(struct context_entry *context,
288 context->lo &= ~VTD_PAGE_MASK;
289 context->lo |= value & VTD_PAGE_MASK;
292 static inline void context_set_address_width(struct context_entry *context,
295 context->hi |= value & 7;
298 static inline void context_set_domain_id(struct context_entry *context,
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
304 static inline int context_domain_id(struct context_entry *c)
306 return((c->hi >> 8) & 0xffff);
309 static inline void context_clear_entry(struct context_entry *context)
322 * 12-63: Host physcial address
328 static inline void dma_clear_pte(struct dma_pte *pte)
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
336 return pte->val & VTD_PAGE_MASK;
338 /* Must have a full atomic 64-bit read */
339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
343 static inline bool dma_pte_present(struct dma_pte *pte)
345 return (pte->val & 3) != 0;
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
350 return (pte->val & DMA_PTE_LARGE_PAGE);
353 static inline int first_pte_in_page(struct dma_pte *pte)
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
368 * Domain represents a virtual machine, more than one devices
369 * across iommus may be owned in one domain, e.g. kvm guest.
371 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
373 /* si_domain contains mulitple devices */
374 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
376 #define for_each_domain_iommu(idx, domain) \
377 for (idx = 0; idx < g_num_of_iommus; idx++) \
378 if (domain->iommu_refcnt[idx])
381 int nid; /* node id */
383 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
384 /* Refcount of devices per iommu */
387 u16 iommu_did[DMAR_UNITS_SUPPORTED];
388 /* Domain ids per IOMMU. Use u16 since
389 * domain ids are 16 bit wide according
390 * to VT-d spec, section 9.3 */
392 struct list_head devices; /* all devices' list */
393 struct iova_domain iovad; /* iova's that belong to this domain */
395 struct dma_pte *pgd; /* virtual address */
396 int gaw; /* max guest address width */
398 /* adjusted guest address width, 0 is level 2 30-bit */
401 int flags; /* flags to find out type of domain */
403 int iommu_coherency;/* indicate coherency of iommu access */
404 int iommu_snooping; /* indicate snooping control feature*/
405 int iommu_count; /* reference count of iommu */
406 int iommu_superpage;/* Level of superpages supported:
407 0 == 4KiB (no superpages), 1 == 2MiB,
408 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409 spinlock_t iommu_lock; /* protect iommu set in domain */
410 u64 max_addr; /* maximum mapped address */
412 struct iommu_domain domain; /* generic domain data structure for
416 /* PCI domain-device relationship */
417 struct device_domain_info {
418 struct list_head link; /* link to domain siblings */
419 struct list_head global; /* link to global list */
420 u8 bus; /* PCI bus number */
421 u8 devfn; /* PCI devfn number */
422 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
423 struct intel_iommu *iommu; /* IOMMU used by this device */
424 struct dmar_domain *domain; /* pointer to domain */
427 struct dmar_rmrr_unit {
428 struct list_head list; /* list of rmrr units */
429 struct acpi_dmar_header *hdr; /* ACPI header */
430 u64 base_address; /* reserved base address*/
431 u64 end_address; /* reserved end address */
432 struct dmar_dev_scope *devices; /* target devices */
433 int devices_cnt; /* target device count */
436 struct dmar_atsr_unit {
437 struct list_head list; /* list of ATSR units */
438 struct acpi_dmar_header *hdr; /* ACPI header */
439 struct dmar_dev_scope *devices; /* target devices */
440 int devices_cnt; /* target device count */
441 u8 include_all:1; /* include all ports */
444 static LIST_HEAD(dmar_atsr_units);
445 static LIST_HEAD(dmar_rmrr_units);
447 #define for_each_rmrr_units(rmrr) \
448 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
450 static void flush_unmaps_timeout(unsigned long data);
452 static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
454 #define HIGH_WATER_MARK 250
455 struct deferred_flush_tables {
457 struct iova *iova[HIGH_WATER_MARK];
458 struct dmar_domain *domain[HIGH_WATER_MARK];
459 struct page *freelist[HIGH_WATER_MARK];
462 static struct deferred_flush_tables *deferred_flush;
464 /* bitmap for indexing intel_iommus */
465 static int g_num_of_iommus;
467 static DEFINE_SPINLOCK(async_umap_flush_lock);
468 static LIST_HEAD(unmaps_to_do);
471 static long list_size;
473 static void domain_exit(struct dmar_domain *domain);
474 static void domain_remove_dev_info(struct dmar_domain *domain);
475 static void domain_remove_one_dev_info(struct dmar_domain *domain,
477 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
479 static int domain_detach_iommu(struct dmar_domain *domain,
480 struct intel_iommu *iommu);
482 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 int dmar_disabled = 0;
485 int dmar_disabled = 1;
486 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
488 int intel_iommu_enabled = 0;
489 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
491 static int dmar_map_gfx = 1;
492 static int dmar_forcedac;
493 static int intel_iommu_strict;
494 static int intel_iommu_superpage = 1;
495 static int intel_iommu_ecs = 1;
497 /* We only actually use ECS when PASID support (on the new bit 40)
498 * is also advertised. Some early implementations — the ones with
499 * PASID support on bit 28 — have issues even when we *only* use
500 * extended root/context tables. */
501 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
502 ecap_pasid(iommu->ecap))
504 int intel_iommu_gfx_mapped;
505 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
507 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
508 static DEFINE_SPINLOCK(device_domain_lock);
509 static LIST_HEAD(device_domain_list);
511 static const struct iommu_ops intel_iommu_ops;
513 static bool translation_pre_enabled(struct intel_iommu *iommu)
515 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
518 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
520 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
523 static void init_translation_status(struct intel_iommu *iommu)
527 gsts = readl(iommu->reg + DMAR_GSTS_REG);
528 if (gsts & DMA_GSTS_TES)
529 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
532 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
533 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
535 return container_of(dom, struct dmar_domain, domain);
538 static int __init intel_iommu_setup(char *str)
543 if (!strncmp(str, "on", 2)) {
545 pr_info("IOMMU enabled\n");
546 } else if (!strncmp(str, "off", 3)) {
548 pr_info("IOMMU disabled\n");
549 } else if (!strncmp(str, "igfx_off", 8)) {
551 pr_info("Disable GFX device mapping\n");
552 } else if (!strncmp(str, "forcedac", 8)) {
553 pr_info("Forcing DAC for PCI devices\n");
555 } else if (!strncmp(str, "strict", 6)) {
556 pr_info("Disable batched IOTLB flush\n");
557 intel_iommu_strict = 1;
558 } else if (!strncmp(str, "sp_off", 6)) {
559 pr_info("Disable supported super page\n");
560 intel_iommu_superpage = 0;
561 } else if (!strncmp(str, "ecs_off", 7)) {
563 "Intel-IOMMU: disable extended context table support\n");
567 str += strcspn(str, ",");
573 __setup("intel_iommu=", intel_iommu_setup);
575 static struct kmem_cache *iommu_domain_cache;
576 static struct kmem_cache *iommu_devinfo_cache;
578 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
580 struct dmar_domain **domains;
583 domains = iommu->domains[idx];
587 return domains[did & 0xff];
590 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
591 struct dmar_domain *domain)
593 struct dmar_domain **domains;
596 if (!iommu->domains[idx]) {
597 size_t size = 256 * sizeof(struct dmar_domain *);
598 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
601 domains = iommu->domains[idx];
602 if (WARN_ON(!domains))
605 domains[did & 0xff] = domain;
608 static inline void *alloc_pgtable_page(int node)
613 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
615 vaddr = page_address(page);
619 static inline void free_pgtable_page(void *vaddr)
621 free_page((unsigned long)vaddr);
624 static inline void *alloc_domain_mem(void)
626 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
629 static void free_domain_mem(void *vaddr)
631 kmem_cache_free(iommu_domain_cache, vaddr);
634 static inline void * alloc_devinfo_mem(void)
636 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
639 static inline void free_devinfo_mem(void *vaddr)
641 kmem_cache_free(iommu_devinfo_cache, vaddr);
644 static inline int domain_type_is_vm(struct dmar_domain *domain)
646 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
649 static inline int domain_type_is_si(struct dmar_domain *domain)
651 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
654 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
656 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
657 DOMAIN_FLAG_STATIC_IDENTITY);
660 static inline int domain_pfn_supported(struct dmar_domain *domain,
663 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
665 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
668 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
673 sagaw = cap_sagaw(iommu->cap);
674 for (agaw = width_to_agaw(max_gaw);
676 if (test_bit(agaw, &sagaw))
684 * Calculate max SAGAW for each iommu.
686 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
688 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
692 * calculate agaw for each iommu.
693 * "SAGAW" may be different across iommus, use a default agaw, and
694 * get a supported less agaw for iommus that don't support the default agaw.
696 int iommu_calculate_agaw(struct intel_iommu *iommu)
698 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
701 /* This functionin only returns single iommu in a domain */
702 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
706 /* si_domain and vm domain should not get here. */
707 BUG_ON(domain_type_is_vm_or_si(domain));
708 for_each_domain_iommu(iommu_id, domain)
711 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
714 return g_iommus[iommu_id];
717 static void domain_update_iommu_coherency(struct dmar_domain *domain)
719 struct dmar_drhd_unit *drhd;
720 struct intel_iommu *iommu;
724 domain->iommu_coherency = 1;
726 for_each_domain_iommu(i, domain) {
728 if (!ecap_coherent(g_iommus[i]->ecap)) {
729 domain->iommu_coherency = 0;
736 /* No hardware attached; use lowest common denominator */
738 for_each_active_iommu(iommu, drhd) {
739 if (!ecap_coherent(iommu->ecap)) {
740 domain->iommu_coherency = 0;
747 static int domain_update_iommu_snooping(struct intel_iommu *skip)
749 struct dmar_drhd_unit *drhd;
750 struct intel_iommu *iommu;
754 for_each_active_iommu(iommu, drhd) {
756 if (!ecap_sc_support(iommu->ecap)) {
767 static int domain_update_iommu_superpage(struct intel_iommu *skip)
769 struct dmar_drhd_unit *drhd;
770 struct intel_iommu *iommu;
773 if (!intel_iommu_superpage) {
777 /* set iommu_superpage to the smallest common denominator */
779 for_each_active_iommu(iommu, drhd) {
781 mask &= cap_super_page_val(iommu->cap);
791 /* Some capabilities may be different across iommus */
792 static void domain_update_iommu_cap(struct dmar_domain *domain)
794 domain_update_iommu_coherency(domain);
795 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
796 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
799 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
800 u8 bus, u8 devfn, int alloc)
802 struct root_entry *root = &iommu->root_entry[bus];
803 struct context_entry *context;
806 if (ecs_enabled(iommu)) {
815 context = phys_to_virt(*entry & VTD_PAGE_MASK);
817 unsigned long phy_addr;
821 context = alloc_pgtable_page(iommu->node);
825 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
826 phy_addr = virt_to_phys((void *)context);
827 *entry = phy_addr | 1;
828 __iommu_flush_cache(iommu, entry, sizeof(*entry));
830 return &context[devfn];
833 static int iommu_dummy(struct device *dev)
835 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
838 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
840 struct dmar_drhd_unit *drhd = NULL;
841 struct intel_iommu *iommu;
843 struct pci_dev *ptmp, *pdev = NULL;
847 if (iommu_dummy(dev))
850 if (dev_is_pci(dev)) {
851 pdev = to_pci_dev(dev);
852 segment = pci_domain_nr(pdev->bus);
853 } else if (has_acpi_companion(dev))
854 dev = &ACPI_COMPANION(dev)->dev;
857 for_each_active_iommu(iommu, drhd) {
858 if (pdev && segment != drhd->segment)
861 for_each_active_dev_scope(drhd->devices,
862 drhd->devices_cnt, i, tmp) {
864 *bus = drhd->devices[i].bus;
865 *devfn = drhd->devices[i].devfn;
869 if (!pdev || !dev_is_pci(tmp))
872 ptmp = to_pci_dev(tmp);
873 if (ptmp->subordinate &&
874 ptmp->subordinate->number <= pdev->bus->number &&
875 ptmp->subordinate->busn_res.end >= pdev->bus->number)
879 if (pdev && drhd->include_all) {
881 *bus = pdev->bus->number;
882 *devfn = pdev->devfn;
893 static void domain_flush_cache(struct dmar_domain *domain,
894 void *addr, int size)
896 if (!domain->iommu_coherency)
897 clflush_cache_range(addr, size);
900 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
902 struct context_entry *context;
906 spin_lock_irqsave(&iommu->lock, flags);
907 context = iommu_context_addr(iommu, bus, devfn, 0);
909 ret = context_present(context);
910 spin_unlock_irqrestore(&iommu->lock, flags);
914 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
916 struct context_entry *context;
919 spin_lock_irqsave(&iommu->lock, flags);
920 context = iommu_context_addr(iommu, bus, devfn, 0);
922 context_clear_entry(context);
923 __iommu_flush_cache(iommu, context, sizeof(*context));
925 spin_unlock_irqrestore(&iommu->lock, flags);
928 static void free_context_table(struct intel_iommu *iommu)
932 struct context_entry *context;
934 spin_lock_irqsave(&iommu->lock, flags);
935 if (!iommu->root_entry) {
938 for (i = 0; i < ROOT_ENTRY_NR; i++) {
939 context = iommu_context_addr(iommu, i, 0, 0);
941 free_pgtable_page(context);
943 if (!ecs_enabled(iommu))
946 context = iommu_context_addr(iommu, i, 0x80, 0);
948 free_pgtable_page(context);
951 free_pgtable_page(iommu->root_entry);
952 iommu->root_entry = NULL;
954 spin_unlock_irqrestore(&iommu->lock, flags);
957 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958 unsigned long pfn, int *target_level)
960 struct dma_pte *parent, *pte = NULL;
961 int level = agaw_to_level(domain->agaw);
964 BUG_ON(!domain->pgd);
966 if (!domain_pfn_supported(domain, pfn))
967 /* Address beyond IOMMU's addressing capabilities. */
970 parent = domain->pgd;
975 offset = pfn_level_offset(pfn, level);
976 pte = &parent[offset];
977 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
979 if (level == *target_level)
982 if (!dma_pte_present(pte)) {
985 tmp_page = alloc_pgtable_page(domain->nid);
990 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992 if (cmpxchg64(&pte->val, 0ULL, pteval))
993 /* Someone else set it while we were thinking; use theirs. */
994 free_pgtable_page(tmp_page);
996 domain_flush_cache(domain, pte, sizeof(*pte));
1001 parent = phys_to_virt(dma_pte_addr(pte));
1006 *target_level = level;
1012 /* return address's pte at specific level */
1013 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1015 int level, int *large_page)
1017 struct dma_pte *parent, *pte = NULL;
1018 int total = agaw_to_level(domain->agaw);
1021 parent = domain->pgd;
1022 while (level <= total) {
1023 offset = pfn_level_offset(pfn, total);
1024 pte = &parent[offset];
1028 if (!dma_pte_present(pte)) {
1029 *large_page = total;
1033 if (dma_pte_superpage(pte)) {
1034 *large_page = total;
1038 parent = phys_to_virt(dma_pte_addr(pte));
1044 /* clear last level pte, a tlb flush should be followed */
1045 static void dma_pte_clear_range(struct dmar_domain *domain,
1046 unsigned long start_pfn,
1047 unsigned long last_pfn)
1049 unsigned int large_page = 1;
1050 struct dma_pte *first_pte, *pte;
1052 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1053 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054 BUG_ON(start_pfn > last_pfn);
1056 /* we don't need lock here; nobody else touches the iova range */
1059 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1061 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1066 start_pfn += lvl_to_nr_pages(large_page);
1068 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1070 domain_flush_cache(domain, first_pte,
1071 (void *)pte - (void *)first_pte);
1073 } while (start_pfn && start_pfn <= last_pfn);
1076 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1077 struct dma_pte *pte, unsigned long pfn,
1078 unsigned long start_pfn, unsigned long last_pfn)
1080 pfn = max(start_pfn, pfn);
1081 pte = &pte[pfn_level_offset(pfn, level)];
1084 unsigned long level_pfn;
1085 struct dma_pte *level_pte;
1087 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1090 level_pfn = pfn & level_mask(level - 1);
1091 level_pte = phys_to_virt(dma_pte_addr(pte));
1094 dma_pte_free_level(domain, level - 1, level_pte,
1095 level_pfn, start_pfn, last_pfn);
1097 /* If range covers entire pagetable, free it */
1098 if (!(start_pfn > level_pfn ||
1099 last_pfn < level_pfn + level_size(level) - 1)) {
1101 domain_flush_cache(domain, pte, sizeof(*pte));
1102 free_pgtable_page(level_pte);
1105 pfn += level_size(level);
1106 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1109 /* free page table pages. last level pte should already be cleared */
1110 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111 unsigned long start_pfn,
1112 unsigned long last_pfn)
1114 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1115 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116 BUG_ON(start_pfn > last_pfn);
1118 dma_pte_clear_range(domain, start_pfn, last_pfn);
1120 /* We don't need lock here; nobody else touches the iova range */
1121 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1122 domain->pgd, 0, start_pfn, last_pfn);
1125 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126 free_pgtable_page(domain->pgd);
1131 /* When a page at a given level is being unlinked from its parent, we don't
1132 need to *modify* it at all. All we need to do is make a list of all the
1133 pages which can be freed just as soon as we've flushed the IOTLB and we
1134 know the hardware page-walk will no longer touch them.
1135 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1137 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1138 int level, struct dma_pte *pte,
1139 struct page *freelist)
1143 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1144 pg->freelist = freelist;
1150 pte = page_address(pg);
1152 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1153 freelist = dma_pte_list_pagetables(domain, level - 1,
1156 } while (!first_pte_in_page(pte));
1161 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1162 struct dma_pte *pte, unsigned long pfn,
1163 unsigned long start_pfn,
1164 unsigned long last_pfn,
1165 struct page *freelist)
1167 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1169 pfn = max(start_pfn, pfn);
1170 pte = &pte[pfn_level_offset(pfn, level)];
1173 unsigned long level_pfn;
1175 if (!dma_pte_present(pte))
1178 level_pfn = pfn & level_mask(level);
1180 /* If range covers entire pagetable, free it */
1181 if (start_pfn <= level_pfn &&
1182 last_pfn >= level_pfn + level_size(level) - 1) {
1183 /* These suborbinate page tables are going away entirely. Don't
1184 bother to clear them; we're just going to *free* them. */
1185 if (level > 1 && !dma_pte_superpage(pte))
1186 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1192 } else if (level > 1) {
1193 /* Recurse down into a level that isn't *entirely* obsolete */
1194 freelist = dma_pte_clear_level(domain, level - 1,
1195 phys_to_virt(dma_pte_addr(pte)),
1196 level_pfn, start_pfn, last_pfn,
1200 pfn += level_size(level);
1201 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1204 domain_flush_cache(domain, first_pte,
1205 (void *)++last_pte - (void *)first_pte);
1210 /* We can't just free the pages because the IOMMU may still be walking
1211 the page tables, and may have cached the intermediate levels. The
1212 pages can only be freed after the IOTLB flush has been done. */
1213 struct page *domain_unmap(struct dmar_domain *domain,
1214 unsigned long start_pfn,
1215 unsigned long last_pfn)
1217 struct page *freelist = NULL;
1219 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1220 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221 BUG_ON(start_pfn > last_pfn);
1223 /* we don't need lock here; nobody else touches the iova range */
1224 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1225 domain->pgd, 0, start_pfn, last_pfn, NULL);
1228 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1229 struct page *pgd_page = virt_to_page(domain->pgd);
1230 pgd_page->freelist = freelist;
1231 freelist = pgd_page;
1239 void dma_free_pagelist(struct page *freelist)
1243 while ((pg = freelist)) {
1244 freelist = pg->freelist;
1245 free_pgtable_page(page_address(pg));
1249 /* iommu handling */
1250 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1252 struct root_entry *root;
1253 unsigned long flags;
1255 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1257 pr_err("Allocating root entry for %s failed\n",
1262 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1264 spin_lock_irqsave(&iommu->lock, flags);
1265 iommu->root_entry = root;
1266 spin_unlock_irqrestore(&iommu->lock, flags);
1271 static void iommu_set_root_entry(struct intel_iommu *iommu)
1277 addr = virt_to_phys(iommu->root_entry);
1278 if (ecs_enabled(iommu))
1279 addr |= DMA_RTADDR_RTT;
1281 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1284 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1286 /* Make sure hardware complete it */
1287 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288 readl, (sts & DMA_GSTS_RTPS), sts);
1290 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1293 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1298 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1301 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1304 /* Make sure hardware complete it */
1305 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306 readl, (!(val & DMA_GSTS_WBFS)), val);
1308 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1311 /* return value determine if we need a write buffer flush */
1312 static void __iommu_flush_context(struct intel_iommu *iommu,
1313 u16 did, u16 source_id, u8 function_mask,
1320 case DMA_CCMD_GLOBAL_INVL:
1321 val = DMA_CCMD_GLOBAL_INVL;
1323 case DMA_CCMD_DOMAIN_INVL:
1324 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1326 case DMA_CCMD_DEVICE_INVL:
1327 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1328 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1333 val |= DMA_CCMD_ICC;
1335 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1338 /* Make sure hardware complete it */
1339 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1340 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1342 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1345 /* return value determine if we need a write buffer flush */
1346 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1347 u64 addr, unsigned int size_order, u64 type)
1349 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1350 u64 val = 0, val_iva = 0;
1354 case DMA_TLB_GLOBAL_FLUSH:
1355 /* global flush doesn't need set IVA_REG */
1356 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1358 case DMA_TLB_DSI_FLUSH:
1359 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1361 case DMA_TLB_PSI_FLUSH:
1362 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363 /* IH bit is passed in as part of address */
1364 val_iva = size_order | addr;
1369 /* Note: set drain read/write */
1372 * This is probably to be super secure.. Looks like we can
1373 * ignore it without any impact.
1375 if (cap_read_drain(iommu->cap))
1376 val |= DMA_TLB_READ_DRAIN;
1378 if (cap_write_drain(iommu->cap))
1379 val |= DMA_TLB_WRITE_DRAIN;
1381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382 /* Note: Only uses first TLB reg currently */
1384 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1385 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1387 /* Make sure hardware complete it */
1388 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1389 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1391 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1393 /* check IOTLB invalidation granularity */
1394 if (DMA_TLB_IAIG(val) == 0)
1395 pr_err("Flush IOTLB failed\n");
1396 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1397 pr_debug("TLB flush request %Lx, actual %Lx\n",
1398 (unsigned long long)DMA_TLB_IIRG(type),
1399 (unsigned long long)DMA_TLB_IAIG(val));
1402 static struct device_domain_info *
1403 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1407 unsigned long flags;
1408 struct device_domain_info *info;
1409 struct pci_dev *pdev;
1411 if (!ecap_dev_iotlb_support(iommu->ecap))
1417 spin_lock_irqsave(&device_domain_lock, flags);
1418 list_for_each_entry(info, &domain->devices, link)
1419 if (info->iommu == iommu && info->bus == bus &&
1420 info->devfn == devfn) {
1424 spin_unlock_irqrestore(&device_domain_lock, flags);
1426 if (!found || !info->dev || !dev_is_pci(info->dev))
1429 pdev = to_pci_dev(info->dev);
1431 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1434 if (!dmar_find_matched_atsr_unit(pdev))
1440 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1442 if (!info || !dev_is_pci(info->dev))
1445 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1448 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1450 if (!info->dev || !dev_is_pci(info->dev) ||
1451 !pci_ats_enabled(to_pci_dev(info->dev)))
1454 pci_disable_ats(to_pci_dev(info->dev));
1457 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1458 u64 addr, unsigned mask)
1461 unsigned long flags;
1462 struct device_domain_info *info;
1464 spin_lock_irqsave(&device_domain_lock, flags);
1465 list_for_each_entry(info, &domain->devices, link) {
1466 struct pci_dev *pdev;
1467 if (!info->dev || !dev_is_pci(info->dev))
1470 pdev = to_pci_dev(info->dev);
1471 if (!pci_ats_enabled(pdev))
1474 sid = info->bus << 8 | info->devfn;
1475 qdep = pci_ats_queue_depth(pdev);
1476 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1478 spin_unlock_irqrestore(&device_domain_lock, flags);
1481 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1482 struct dmar_domain *domain,
1483 unsigned long pfn, unsigned int pages,
1486 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1487 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1488 u16 did = domain->iommu_did[iommu->seq_id];
1495 * Fallback to domain selective flush if no PSI support or the size is
1497 * PSI requires page size to be 2 ^ x, and the base address is naturally
1498 * aligned to the size
1500 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1501 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1504 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1508 * In caching mode, changes of pages from non-present to present require
1509 * flush. However, device IOTLB doesn't need to be flushed in this case.
1511 if (!cap_caching_mode(iommu->cap) || !map)
1512 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1516 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1519 unsigned long flags;
1521 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1522 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1523 pmen &= ~DMA_PMEN_EPM;
1524 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1526 /* wait for the protected region status bit to clear */
1527 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1528 readl, !(pmen & DMA_PMEN_PRS), pmen);
1530 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1533 static void iommu_enable_translation(struct intel_iommu *iommu)
1536 unsigned long flags;
1538 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1539 iommu->gcmd |= DMA_GCMD_TE;
1540 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1542 /* Make sure hardware complete it */
1543 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1544 readl, (sts & DMA_GSTS_TES), sts);
1546 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1549 static void iommu_disable_translation(struct intel_iommu *iommu)
1554 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1555 iommu->gcmd &= ~DMA_GCMD_TE;
1556 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1558 /* Make sure hardware complete it */
1559 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1560 readl, (!(sts & DMA_GSTS_TES)), sts);
1562 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1566 static int iommu_init_domains(struct intel_iommu *iommu)
1568 u32 ndomains, nlongs;
1571 ndomains = cap_ndoms(iommu->cap);
1572 pr_debug("%s: Number of Domains supported <%d>\n",
1573 iommu->name, ndomains);
1574 nlongs = BITS_TO_LONGS(ndomains);
1576 spin_lock_init(&iommu->lock);
1578 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1579 if (!iommu->domain_ids) {
1580 pr_err("%s: Allocating domain id array failed\n",
1585 size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1586 iommu->domains = kzalloc(size, GFP_KERNEL);
1588 if (iommu->domains) {
1589 size = 256 * sizeof(struct dmar_domain *);
1590 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1593 if (!iommu->domains || !iommu->domains[0]) {
1594 pr_err("%s: Allocating domain array failed\n",
1596 kfree(iommu->domain_ids);
1597 kfree(iommu->domains);
1598 iommu->domain_ids = NULL;
1599 iommu->domains = NULL;
1606 * If Caching mode is set, then invalid translations are tagged
1607 * with domain-id 0, hence we need to pre-allocate it. We also
1608 * use domain-id 0 as a marker for non-allocated domain-id, so
1609 * make sure it is not used for a real domain.
1611 set_bit(0, iommu->domain_ids);
1616 static void disable_dmar_iommu(struct intel_iommu *iommu)
1618 struct device_domain_info *info, *tmp;
1620 if (!iommu->domains || !iommu->domain_ids)
1623 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1624 struct dmar_domain *domain;
1626 if (info->iommu != iommu)
1629 if (!info->dev || !info->domain)
1632 domain = info->domain;
1634 domain_remove_one_dev_info(domain, info->dev);
1636 if (!domain_type_is_vm_or_si(domain))
1637 domain_exit(domain);
1640 if (iommu->gcmd & DMA_GCMD_TE)
1641 iommu_disable_translation(iommu);
1644 static void free_dmar_iommu(struct intel_iommu *iommu)
1646 if ((iommu->domains) && (iommu->domain_ids)) {
1647 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1650 for (i = 0; i < elems; i++)
1651 kfree(iommu->domains[i]);
1652 kfree(iommu->domains);
1653 kfree(iommu->domain_ids);
1654 iommu->domains = NULL;
1655 iommu->domain_ids = NULL;
1658 g_iommus[iommu->seq_id] = NULL;
1660 /* free context mapping */
1661 free_context_table(iommu);
1664 static struct dmar_domain *alloc_domain(int flags)
1666 struct dmar_domain *domain;
1668 domain = alloc_domain_mem();
1672 memset(domain, 0, sizeof(*domain));
1674 domain->flags = flags;
1675 spin_lock_init(&domain->iommu_lock);
1676 INIT_LIST_HEAD(&domain->devices);
1681 static int __iommu_attach_domain(struct dmar_domain *domain,
1682 struct intel_iommu *iommu)
1685 unsigned long ndomains;
1687 num = domain->iommu_did[iommu->seq_id];
1691 ndomains = cap_ndoms(iommu->cap);
1692 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1694 if (num < ndomains) {
1695 set_bit(num, iommu->domain_ids);
1696 set_iommu_domain(iommu, num, domain);
1697 domain->iommu_did[iommu->seq_id] = num;
1703 pr_err("%s: No free domain ids\n", iommu->name);
1708 static int iommu_attach_domain(struct dmar_domain *domain,
1709 struct intel_iommu *iommu)
1712 unsigned long flags;
1714 spin_lock_irqsave(&iommu->lock, flags);
1715 num = __iommu_attach_domain(domain, iommu);
1716 spin_unlock_irqrestore(&iommu->lock, flags);
1721 static void iommu_detach_domain(struct dmar_domain *domain,
1722 struct intel_iommu *iommu)
1724 unsigned long flags;
1727 spin_lock_irqsave(&iommu->lock, flags);
1729 num = domain->iommu_did[iommu->seq_id];
1734 clear_bit(num, iommu->domain_ids);
1735 set_iommu_domain(iommu, num, NULL);
1737 spin_unlock_irqrestore(&iommu->lock, flags);
1740 static void domain_attach_iommu(struct dmar_domain *domain,
1741 struct intel_iommu *iommu)
1743 unsigned long flags;
1745 spin_lock_irqsave(&domain->iommu_lock, flags);
1746 domain->iommu_refcnt[iommu->seq_id] += 1;
1747 domain->iommu_count += 1;
1748 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1749 domain->nid = iommu->node;
1750 domain_update_iommu_cap(domain);
1752 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1755 static int domain_detach_iommu(struct dmar_domain *domain,
1756 struct intel_iommu *iommu)
1758 unsigned long flags;
1759 int count = INT_MAX;
1761 spin_lock_irqsave(&domain->iommu_lock, flags);
1762 domain->iommu_refcnt[iommu->seq_id] -= 1;
1763 count = --domain->iommu_count;
1764 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1765 domain_update_iommu_cap(domain);
1766 domain->iommu_did[iommu->seq_id] = 0;
1768 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1773 static struct iova_domain reserved_iova_list;
1774 static struct lock_class_key reserved_rbtree_key;
1776 static int dmar_init_reserved_ranges(void)
1778 struct pci_dev *pdev = NULL;
1782 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1785 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1786 &reserved_rbtree_key);
1788 /* IOAPIC ranges shouldn't be accessed by DMA */
1789 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1790 IOVA_PFN(IOAPIC_RANGE_END));
1792 pr_err("Reserve IOAPIC range failed\n");
1796 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1797 for_each_pci_dev(pdev) {
1800 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1801 r = &pdev->resource[i];
1802 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1804 iova = reserve_iova(&reserved_iova_list,
1808 pr_err("Reserve iova failed\n");
1816 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1818 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1821 static inline int guestwidth_to_adjustwidth(int gaw)
1824 int r = (gaw - 12) % 9;
1835 static int domain_init(struct dmar_domain *domain, int guest_width)
1837 struct intel_iommu *iommu;
1838 int adjust_width, agaw;
1839 unsigned long sagaw;
1841 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1843 domain_reserve_special_ranges(domain);
1845 /* calculate AGAW */
1846 iommu = domain_get_iommu(domain);
1847 if (guest_width > cap_mgaw(iommu->cap))
1848 guest_width = cap_mgaw(iommu->cap);
1849 domain->gaw = guest_width;
1850 adjust_width = guestwidth_to_adjustwidth(guest_width);
1851 agaw = width_to_agaw(adjust_width);
1852 sagaw = cap_sagaw(iommu->cap);
1853 if (!test_bit(agaw, &sagaw)) {
1854 /* hardware doesn't support it, choose a bigger one */
1855 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1856 agaw = find_next_bit(&sagaw, 5, agaw);
1860 domain->agaw = agaw;
1862 if (ecap_coherent(iommu->ecap))
1863 domain->iommu_coherency = 1;
1865 domain->iommu_coherency = 0;
1867 if (ecap_sc_support(iommu->ecap))
1868 domain->iommu_snooping = 1;
1870 domain->iommu_snooping = 0;
1872 if (intel_iommu_superpage)
1873 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1875 domain->iommu_superpage = 0;
1877 domain->nid = iommu->node;
1879 /* always allocate the top pgd */
1880 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1883 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1887 static void domain_exit(struct dmar_domain *domain)
1889 struct page *freelist = NULL;
1892 /* Domain 0 is reserved, so dont process it */
1896 /* Flush any lazy unmaps that may reference this domain */
1897 if (!intel_iommu_strict)
1898 flush_unmaps_timeout(0);
1900 /* remove associated devices */
1901 domain_remove_dev_info(domain);
1904 put_iova_domain(&domain->iovad);
1906 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1908 /* clear attached or cached domains */
1910 for_each_domain_iommu(i, domain)
1911 iommu_detach_domain(domain, g_iommus[i]);
1914 dma_free_pagelist(freelist);
1916 free_domain_mem(domain);
1919 static int domain_context_mapping_one(struct dmar_domain *domain,
1920 struct intel_iommu *iommu,
1923 int translation = CONTEXT_TT_MULTI_LEVEL;
1924 struct device_domain_info *info = NULL;
1925 struct context_entry *context;
1926 unsigned long flags;
1927 struct dma_pte *pgd;
1931 if (hw_pass_through && domain_type_is_si(domain))
1932 translation = CONTEXT_TT_PASS_THROUGH;
1934 pr_debug("Set context mapping for %02x:%02x.%d\n",
1935 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1937 BUG_ON(!domain->pgd);
1939 spin_lock_irqsave(&iommu->lock, flags);
1940 context = iommu_context_addr(iommu, bus, devfn, 1);
1941 spin_unlock_irqrestore(&iommu->lock, flags);
1944 spin_lock_irqsave(&iommu->lock, flags);
1945 if (context_present(context)) {
1946 spin_unlock_irqrestore(&iommu->lock, flags);
1952 id = __iommu_attach_domain(domain, iommu);
1954 spin_unlock_irqrestore(&iommu->lock, flags);
1955 pr_err("%s: No free domain ids\n", iommu->name);
1959 context_clear_entry(context);
1960 context_set_domain_id(context, id);
1963 * Skip top levels of page tables for iommu which has less agaw
1964 * than default. Unnecessary for PT mode.
1966 if (translation != CONTEXT_TT_PASS_THROUGH) {
1967 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1968 pgd = phys_to_virt(dma_pte_addr(pgd));
1969 if (!dma_pte_present(pgd)) {
1970 spin_unlock_irqrestore(&iommu->lock, flags);
1975 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1976 translation = info ? CONTEXT_TT_DEV_IOTLB :
1977 CONTEXT_TT_MULTI_LEVEL;
1979 context_set_address_root(context, virt_to_phys(pgd));
1980 context_set_address_width(context, iommu->agaw);
1983 * In pass through mode, AW must be programmed to
1984 * indicate the largest AGAW value supported by
1985 * hardware. And ASR is ignored by hardware.
1987 context_set_address_width(context, iommu->msagaw);
1990 context_set_translation_type(context, translation);
1991 context_set_fault_enable(context);
1992 context_set_present(context);
1993 domain_flush_cache(domain, context, sizeof(*context));
1996 * It's a non-present to present mapping. If hardware doesn't cache
1997 * non-present entry we only need to flush the write-buffer. If the
1998 * _does_ cache non-present entries, then it does so in the special
1999 * domain #0, which we have to flush:
2001 if (cap_caching_mode(iommu->cap)) {
2002 iommu->flush.flush_context(iommu, 0,
2003 (((u16)bus) << 8) | devfn,
2004 DMA_CCMD_MASK_NOBIT,
2005 DMA_CCMD_DEVICE_INVL);
2006 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
2008 iommu_flush_write_buffer(iommu);
2010 iommu_enable_dev_iotlb(info);
2011 spin_unlock_irqrestore(&iommu->lock, flags);
2013 domain_attach_iommu(domain, iommu);
2018 struct domain_context_mapping_data {
2019 struct dmar_domain *domain;
2020 struct intel_iommu *iommu;
2023 static int domain_context_mapping_cb(struct pci_dev *pdev,
2024 u16 alias, void *opaque)
2026 struct domain_context_mapping_data *data = opaque;
2028 return domain_context_mapping_one(data->domain, data->iommu,
2029 PCI_BUS_NUM(alias), alias & 0xff);
2033 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2035 struct intel_iommu *iommu;
2037 struct domain_context_mapping_data data;
2039 iommu = device_to_iommu(dev, &bus, &devfn);
2043 if (!dev_is_pci(dev))
2044 return domain_context_mapping_one(domain, iommu, bus, devfn);
2046 data.domain = domain;
2049 return pci_for_each_dma_alias(to_pci_dev(dev),
2050 &domain_context_mapping_cb, &data);
2053 static int domain_context_mapped_cb(struct pci_dev *pdev,
2054 u16 alias, void *opaque)
2056 struct intel_iommu *iommu = opaque;
2058 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2061 static int domain_context_mapped(struct device *dev)
2063 struct intel_iommu *iommu;
2066 iommu = device_to_iommu(dev, &bus, &devfn);
2070 if (!dev_is_pci(dev))
2071 return device_context_mapped(iommu, bus, devfn);
2073 return !pci_for_each_dma_alias(to_pci_dev(dev),
2074 domain_context_mapped_cb, iommu);
2077 /* Returns a number of VTD pages, but aligned to MM page size */
2078 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2081 host_addr &= ~PAGE_MASK;
2082 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2085 /* Return largest possible superpage level for a given mapping */
2086 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2087 unsigned long iov_pfn,
2088 unsigned long phy_pfn,
2089 unsigned long pages)
2091 int support, level = 1;
2092 unsigned long pfnmerge;
2094 support = domain->iommu_superpage;
2096 /* To use a large page, the virtual *and* physical addresses
2097 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2098 of them will mean we have to use smaller pages. So just
2099 merge them and check both at once. */
2100 pfnmerge = iov_pfn | phy_pfn;
2102 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2103 pages >>= VTD_STRIDE_SHIFT;
2106 pfnmerge >>= VTD_STRIDE_SHIFT;
2113 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2114 struct scatterlist *sg, unsigned long phys_pfn,
2115 unsigned long nr_pages, int prot)
2117 struct dma_pte *first_pte = NULL, *pte = NULL;
2118 phys_addr_t uninitialized_var(pteval);
2119 unsigned long sg_res = 0;
2120 unsigned int largepage_lvl = 0;
2121 unsigned long lvl_pages = 0;
2123 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2125 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2128 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2132 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2135 while (nr_pages > 0) {
2139 sg_res = aligned_nrpages(sg->offset, sg->length);
2140 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2141 sg->dma_length = sg->length;
2142 pteval = page_to_phys(sg_page(sg)) | prot;
2143 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2147 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2149 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2152 /* It is large page*/
2153 if (largepage_lvl > 1) {
2154 pteval |= DMA_PTE_LARGE_PAGE;
2155 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2157 * Ensure that old small page tables are
2158 * removed to make room for superpage,
2161 dma_pte_free_pagetable(domain, iov_pfn,
2162 iov_pfn + lvl_pages - 1);
2164 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2168 /* We don't need lock here, nobody else
2169 * touches the iova range
2171 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2173 static int dumps = 5;
2174 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2175 iov_pfn, tmp, (unsigned long long)pteval);
2178 debug_dma_dump_mappings(NULL);
2183 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2185 BUG_ON(nr_pages < lvl_pages);
2186 BUG_ON(sg_res < lvl_pages);
2188 nr_pages -= lvl_pages;
2189 iov_pfn += lvl_pages;
2190 phys_pfn += lvl_pages;
2191 pteval += lvl_pages * VTD_PAGE_SIZE;
2192 sg_res -= lvl_pages;
2194 /* If the next PTE would be the first in a new page, then we
2195 need to flush the cache on the entries we've just written.
2196 And then we'll need to recalculate 'pte', so clear it and
2197 let it get set again in the if (!pte) block above.
2199 If we're done (!nr_pages) we need to flush the cache too.
2201 Also if we've been setting superpages, we may need to
2202 recalculate 'pte' and switch back to smaller pages for the
2203 end of the mapping, if the trailing size is not enough to
2204 use another superpage (i.e. sg_res < lvl_pages). */
2206 if (!nr_pages || first_pte_in_page(pte) ||
2207 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2208 domain_flush_cache(domain, first_pte,
2209 (void *)pte - (void *)first_pte);
2213 if (!sg_res && nr_pages)
2219 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2220 struct scatterlist *sg, unsigned long nr_pages,
2223 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2226 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2227 unsigned long phys_pfn, unsigned long nr_pages,
2230 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2233 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2238 clear_context_table(iommu, bus, devfn);
2239 iommu->flush.flush_context(iommu, 0, 0, 0,
2240 DMA_CCMD_GLOBAL_INVL);
2241 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2244 static inline void unlink_domain_info(struct device_domain_info *info)
2246 assert_spin_locked(&device_domain_lock);
2247 list_del(&info->link);
2248 list_del(&info->global);
2250 info->dev->archdata.iommu = NULL;
2253 static void domain_remove_dev_info(struct dmar_domain *domain)
2255 struct device_domain_info *info, *tmp;
2257 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2258 domain_remove_one_dev_info(domain, info->dev);
2263 * Note: we use struct device->archdata.iommu stores the info
2265 static struct dmar_domain *find_domain(struct device *dev)
2267 struct device_domain_info *info;
2269 /* No lock here, assumes no domain exit in normal case */
2270 info = dev->archdata.iommu;
2272 return info->domain;
2276 static inline struct device_domain_info *
2277 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2279 struct device_domain_info *info;
2281 list_for_each_entry(info, &device_domain_list, global)
2282 if (info->iommu->segment == segment && info->bus == bus &&
2283 info->devfn == devfn)
2289 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2292 struct dmar_domain *domain)
2294 struct dmar_domain *found = NULL;
2295 struct device_domain_info *info;
2296 unsigned long flags;
2298 info = alloc_devinfo_mem();
2303 info->devfn = devfn;
2305 info->domain = domain;
2306 info->iommu = iommu;
2308 spin_lock_irqsave(&device_domain_lock, flags);
2310 found = find_domain(dev);
2312 struct device_domain_info *info2;
2313 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2315 found = info2->domain;
2318 spin_unlock_irqrestore(&device_domain_lock, flags);
2319 free_devinfo_mem(info);
2320 /* Caller must free the original domain */
2324 list_add(&info->link, &domain->devices);
2325 list_add(&info->global, &device_domain_list);
2327 dev->archdata.iommu = info;
2328 spin_unlock_irqrestore(&device_domain_lock, flags);
2333 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2335 *(u16 *)opaque = alias;
2339 /* domain is initialized */
2340 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2342 struct dmar_domain *domain, *tmp;
2343 struct intel_iommu *iommu;
2344 struct device_domain_info *info;
2346 unsigned long flags;
2349 domain = find_domain(dev);
2353 iommu = device_to_iommu(dev, &bus, &devfn);
2357 if (dev_is_pci(dev)) {
2358 struct pci_dev *pdev = to_pci_dev(dev);
2360 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2362 spin_lock_irqsave(&device_domain_lock, flags);
2363 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2364 PCI_BUS_NUM(dma_alias),
2367 iommu = info->iommu;
2368 domain = info->domain;
2370 spin_unlock_irqrestore(&device_domain_lock, flags);
2372 /* DMA alias already has a domain, uses it */
2377 /* Allocate and initialize new domain for the device */
2378 domain = alloc_domain(0);
2381 if (iommu_attach_domain(domain, iommu) < 0) {
2382 free_domain_mem(domain);
2385 domain_attach_iommu(domain, iommu);
2386 if (domain_init(domain, gaw)) {
2387 domain_exit(domain);
2391 /* register PCI DMA alias device */
2392 if (dev_is_pci(dev)) {
2393 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2394 dma_alias & 0xff, NULL, domain);
2396 if (!tmp || tmp != domain) {
2397 domain_exit(domain);
2406 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2408 if (!tmp || tmp != domain) {
2409 domain_exit(domain);
2416 static int iommu_identity_mapping;
2417 #define IDENTMAP_ALL 1
2418 #define IDENTMAP_GFX 2
2419 #define IDENTMAP_AZALIA 4
2421 static int iommu_domain_identity_map(struct dmar_domain *domain,
2422 unsigned long long start,
2423 unsigned long long end)
2425 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2426 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2428 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2429 dma_to_mm_pfn(last_vpfn))) {
2430 pr_err("Reserving iova failed\n");
2434 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2436 * RMRR range might have overlap with physical memory range,
2439 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2441 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2442 last_vpfn - first_vpfn + 1,
2443 DMA_PTE_READ|DMA_PTE_WRITE);
2446 static int iommu_prepare_identity_map(struct device *dev,
2447 unsigned long long start,
2448 unsigned long long end)
2450 struct dmar_domain *domain;
2453 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2457 /* For _hardware_ passthrough, don't bother. But for software
2458 passthrough, we do it anyway -- it may indicate a memory
2459 range which is reserved in E820, so which didn't get set
2460 up to start with in si_domain */
2461 if (domain == si_domain && hw_pass_through) {
2462 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2463 dev_name(dev), start, end);
2467 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2468 dev_name(dev), start, end);
2471 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2472 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2473 dmi_get_system_info(DMI_BIOS_VENDOR),
2474 dmi_get_system_info(DMI_BIOS_VERSION),
2475 dmi_get_system_info(DMI_PRODUCT_VERSION));
2480 if (end >> agaw_to_width(domain->agaw)) {
2481 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2482 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2483 agaw_to_width(domain->agaw),
2484 dmi_get_system_info(DMI_BIOS_VENDOR),
2485 dmi_get_system_info(DMI_BIOS_VERSION),
2486 dmi_get_system_info(DMI_PRODUCT_VERSION));
2491 ret = iommu_domain_identity_map(domain, start, end);
2495 /* context entry init */
2496 ret = domain_context_mapping(domain, dev);
2503 domain_exit(domain);
2507 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2510 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2512 return iommu_prepare_identity_map(dev, rmrr->base_address,
2516 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2517 static inline void iommu_prepare_isa(void)
2519 struct pci_dev *pdev;
2522 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2526 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2527 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2530 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2535 static inline void iommu_prepare_isa(void)
2539 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2541 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2543 static int __init si_domain_init(int hw)
2547 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2551 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2552 domain_exit(si_domain);
2556 pr_debug("Identity mapping domain allocated\n");
2561 for_each_online_node(nid) {
2562 unsigned long start_pfn, end_pfn;
2565 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2566 ret = iommu_domain_identity_map(si_domain,
2567 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2576 static int identity_mapping(struct device *dev)
2578 struct device_domain_info *info;
2580 if (likely(!iommu_identity_mapping))
2583 info = dev->archdata.iommu;
2584 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2585 return (info->domain == si_domain);
2590 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2592 struct dmar_domain *ndomain;
2593 struct intel_iommu *iommu;
2597 iommu = device_to_iommu(dev, &bus, &devfn);
2601 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2602 if (ndomain != domain)
2605 ret = domain_context_mapping(domain, dev);
2607 domain_remove_one_dev_info(domain, dev);
2614 static bool device_has_rmrr(struct device *dev)
2616 struct dmar_rmrr_unit *rmrr;
2621 for_each_rmrr_units(rmrr) {
2623 * Return TRUE if this RMRR contains the device that
2626 for_each_active_dev_scope(rmrr->devices,
2627 rmrr->devices_cnt, i, tmp)
2638 * There are a couple cases where we need to restrict the functionality of
2639 * devices associated with RMRRs. The first is when evaluating a device for
2640 * identity mapping because problems exist when devices are moved in and out
2641 * of domains and their respective RMRR information is lost. This means that
2642 * a device with associated RMRRs will never be in a "passthrough" domain.
2643 * The second is use of the device through the IOMMU API. This interface
2644 * expects to have full control of the IOVA space for the device. We cannot
2645 * satisfy both the requirement that RMRR access is maintained and have an
2646 * unencumbered IOVA space. We also have no ability to quiesce the device's
2647 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2648 * We therefore prevent devices associated with an RMRR from participating in
2649 * the IOMMU API, which eliminates them from device assignment.
2651 * In both cases we assume that PCI USB devices with RMRRs have them largely
2652 * for historical reasons and that the RMRR space is not actively used post
2653 * boot. This exclusion may change if vendors begin to abuse it.
2655 * The same exception is made for graphics devices, with the requirement that
2656 * any use of the RMRR regions will be torn down before assigning the device
2659 static bool device_is_rmrr_locked(struct device *dev)
2661 if (!device_has_rmrr(dev))
2664 if (dev_is_pci(dev)) {
2665 struct pci_dev *pdev = to_pci_dev(dev);
2667 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2674 static int iommu_should_identity_map(struct device *dev, int startup)
2677 if (dev_is_pci(dev)) {
2678 struct pci_dev *pdev = to_pci_dev(dev);
2680 if (device_is_rmrr_locked(dev))
2683 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2686 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2689 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2693 * We want to start off with all devices in the 1:1 domain, and
2694 * take them out later if we find they can't access all of memory.
2696 * However, we can't do this for PCI devices behind bridges,
2697 * because all PCI devices behind the same bridge will end up
2698 * with the same source-id on their transactions.
2700 * Practically speaking, we can't change things around for these
2701 * devices at run-time, because we can't be sure there'll be no
2702 * DMA transactions in flight for any of their siblings.
2704 * So PCI devices (unless they're on the root bus) as well as
2705 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2706 * the 1:1 domain, just in _case_ one of their siblings turns out
2707 * not to be able to map all of memory.
2709 if (!pci_is_pcie(pdev)) {
2710 if (!pci_is_root_bus(pdev->bus))
2712 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2714 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2717 if (device_has_rmrr(dev))
2722 * At boot time, we don't yet know if devices will be 64-bit capable.
2723 * Assume that they will — if they turn out not to be, then we can
2724 * take them out of the 1:1 domain later.
2728 * If the device's dma_mask is less than the system's memory
2729 * size then this is not a candidate for identity mapping.
2731 u64 dma_mask = *dev->dma_mask;
2733 if (dev->coherent_dma_mask &&
2734 dev->coherent_dma_mask < dma_mask)
2735 dma_mask = dev->coherent_dma_mask;
2737 return dma_mask >= dma_get_required_mask(dev);
2743 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2747 if (!iommu_should_identity_map(dev, 1))
2750 ret = domain_add_dev_info(si_domain, dev);
2752 pr_info("%s identity mapping for device %s\n",
2753 hw ? "Hardware" : "Software", dev_name(dev));
2754 else if (ret == -ENODEV)
2755 /* device not associated with an iommu */
2762 static int __init iommu_prepare_static_identity_mapping(int hw)
2764 struct pci_dev *pdev = NULL;
2765 struct dmar_drhd_unit *drhd;
2766 struct intel_iommu *iommu;
2771 for_each_pci_dev(pdev) {
2772 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2777 for_each_active_iommu(iommu, drhd)
2778 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2779 struct acpi_device_physical_node *pn;
2780 struct acpi_device *adev;
2782 if (dev->bus != &acpi_bus_type)
2785 adev= to_acpi_device(dev);
2786 mutex_lock(&adev->physical_node_lock);
2787 list_for_each_entry(pn, &adev->physical_node_list, node) {
2788 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2792 mutex_unlock(&adev->physical_node_lock);
2800 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2803 * Start from the sane iommu hardware state.
2804 * If the queued invalidation is already initialized by us
2805 * (for example, while enabling interrupt-remapping) then
2806 * we got the things already rolling from a sane state.
2810 * Clear any previous faults.
2812 dmar_fault(-1, iommu);
2814 * Disable queued invalidation if supported and already enabled
2815 * before OS handover.
2817 dmar_disable_qi(iommu);
2820 if (dmar_enable_qi(iommu)) {
2822 * Queued Invalidate not enabled, use Register Based Invalidate
2824 iommu->flush.flush_context = __iommu_flush_context;
2825 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2826 pr_info("%s: Using Register based invalidation\n",
2829 iommu->flush.flush_context = qi_flush_context;
2830 iommu->flush.flush_iotlb = qi_flush_iotlb;
2831 pr_info("%s: Using Queued invalidation\n", iommu->name);
2835 static int copy_context_table(struct intel_iommu *iommu,
2836 struct root_entry *old_re,
2837 struct context_entry **tbl,
2840 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2841 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2842 phys_addr_t old_ce_phys;
2844 tbl_idx = ext ? bus * 2 : bus;
2846 for (devfn = 0; devfn < 256; devfn++) {
2847 /* First calculate the correct index */
2848 idx = (ext ? devfn * 2 : devfn) % 256;
2851 /* First save what we may have and clean up */
2853 tbl[tbl_idx] = new_ce;
2854 __iommu_flush_cache(iommu, new_ce,
2864 old_ce_phys = root_entry_lctp(old_re);
2866 old_ce_phys = root_entry_uctp(old_re);
2869 if (ext && devfn == 0) {
2870 /* No LCTP, try UCTP */
2879 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2883 new_ce = alloc_pgtable_page(iommu->node);
2890 /* Now copy the context entry */
2893 if (!__context_present(&ce))
2896 did = context_domain_id(&ce);
2897 if (did >= 0 && did < cap_ndoms(iommu->cap))
2898 set_bit(did, iommu->domain_ids);
2901 * We need a marker for copied context entries. This
2902 * marker needs to work for the old format as well as
2903 * for extended context entries.
2905 * Bit 67 of the context entry is used. In the old
2906 * format this bit is available to software, in the
2907 * extended format it is the PGE bit, but PGE is ignored
2908 * by HW if PASIDs are disabled (and thus still
2911 * So disable PASIDs first and then mark the entry
2912 * copied. This means that we don't copy PASID
2913 * translations from the old kernel, but this is fine as
2914 * faults there are not fatal.
2916 context_clear_pasid_enable(&ce);
2917 context_set_copied(&ce);
2922 tbl[tbl_idx + pos] = new_ce;
2924 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2933 static int copy_translation_tables(struct intel_iommu *iommu)
2935 struct context_entry **ctxt_tbls;
2936 struct root_entry *old_rt;
2937 phys_addr_t old_rt_phys;
2938 int ctxt_table_entries;
2939 unsigned long flags;
2944 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2945 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2946 new_ext = !!ecap_ecs(iommu->ecap);
2949 * The RTT bit can only be changed when translation is disabled,
2950 * but disabling translation means to open a window for data
2951 * corruption. So bail out and don't copy anything if we would
2952 * have to change the bit.
2957 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2961 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2965 /* This is too big for the stack - allocate it from slab */
2966 ctxt_table_entries = ext ? 512 : 256;
2968 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2972 for (bus = 0; bus < 256; bus++) {
2973 ret = copy_context_table(iommu, &old_rt[bus],
2974 ctxt_tbls, bus, ext);
2976 pr_err("%s: Failed to copy context table for bus %d\n",
2982 spin_lock_irqsave(&iommu->lock, flags);
2984 /* Context tables are copied, now write them to the root_entry table */
2985 for (bus = 0; bus < 256; bus++) {
2986 int idx = ext ? bus * 2 : bus;
2989 if (ctxt_tbls[idx]) {
2990 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2991 iommu->root_entry[bus].lo = val;
2994 if (!ext || !ctxt_tbls[idx + 1])
2997 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2998 iommu->root_entry[bus].hi = val;
3001 spin_unlock_irqrestore(&iommu->lock, flags);
3005 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3015 static int __init init_dmars(void)
3017 struct dmar_drhd_unit *drhd;
3018 struct dmar_rmrr_unit *rmrr;
3019 bool copied_tables = false;
3021 struct intel_iommu *iommu;
3027 * initialize and program root entry to not present
3030 for_each_drhd_unit(drhd) {
3032 * lock not needed as this is only incremented in the single
3033 * threaded kernel __init code path all other access are read
3036 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3040 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3043 /* Preallocate enough resources for IOMMU hot-addition */
3044 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3045 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3047 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3050 pr_err("Allocating global iommu array failed\n");
3055 deferred_flush = kzalloc(g_num_of_iommus *
3056 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3057 if (!deferred_flush) {
3062 for_each_active_iommu(iommu, drhd) {
3063 g_iommus[iommu->seq_id] = iommu;
3065 intel_iommu_init_qi(iommu);
3067 ret = iommu_init_domains(iommu);
3071 init_translation_status(iommu);
3073 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3074 iommu_disable_translation(iommu);
3075 clear_translation_pre_enabled(iommu);
3076 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3082 * we could share the same root & context tables
3083 * among all IOMMU's. Need to Split it later.
3085 ret = iommu_alloc_root_entry(iommu);
3089 if (translation_pre_enabled(iommu)) {
3090 pr_info("Translation already enabled - trying to copy translation structures\n");
3092 ret = copy_translation_tables(iommu);
3095 * We found the IOMMU with translation
3096 * enabled - but failed to copy over the
3097 * old root-entry table. Try to proceed
3098 * by disabling translation now and
3099 * allocating a clean root-entry table.
3100 * This might cause DMAR faults, but
3101 * probably the dump will still succeed.
3103 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3105 iommu_disable_translation(iommu);
3106 clear_translation_pre_enabled(iommu);
3108 pr_info("Copied translation tables from previous kernel for %s\n",
3110 copied_tables = true;
3114 iommu_flush_write_buffer(iommu);
3115 iommu_set_root_entry(iommu);
3116 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3117 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3119 if (!ecap_pass_through(iommu->ecap))
3120 hw_pass_through = 0;
3123 if (iommu_pass_through)
3124 iommu_identity_mapping |= IDENTMAP_ALL;
3126 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3127 iommu_identity_mapping |= IDENTMAP_GFX;
3130 if (iommu_identity_mapping) {
3131 ret = si_domain_init(hw_pass_through);
3136 check_tylersburg_isoch();
3139 * If we copied translations from a previous kernel in the kdump
3140 * case, we can not assign the devices to domains now, as that
3141 * would eliminate the old mappings. So skip this part and defer
3142 * the assignment to device driver initialization time.
3148 * If pass through is not set or not enabled, setup context entries for
3149 * identity mappings for rmrr, gfx, and isa and may fall back to static
3150 * identity mapping if iommu_identity_mapping is set.
3152 if (iommu_identity_mapping) {
3153 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3155 pr_crit("Failed to setup IOMMU pass-through\n");
3161 * for each dev attached to rmrr
3163 * locate drhd for dev, alloc domain for dev
3164 * allocate free domain
3165 * allocate page table entries for rmrr
3166 * if context not allocated for bus
3167 * allocate and init context
3168 * set present in root table for this bus
3169 * init context with domain, translation etc
3173 pr_info("Setting RMRR:\n");
3174 for_each_rmrr_units(rmrr) {
3175 /* some BIOS lists non-exist devices in DMAR table. */
3176 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3178 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3180 pr_err("Mapping reserved region failed\n");
3184 iommu_prepare_isa();
3191 * global invalidate context cache
3192 * global invalidate iotlb
3193 * enable translation
3195 for_each_iommu(iommu, drhd) {
3196 if (drhd->ignored) {
3198 * we always have to disable PMRs or DMA may fail on
3202 iommu_disable_protect_mem_regions(iommu);
3206 iommu_flush_write_buffer(iommu);
3208 ret = dmar_set_interrupt(iommu);
3212 if (!translation_pre_enabled(iommu))
3213 iommu_enable_translation(iommu);
3215 iommu_disable_protect_mem_regions(iommu);
3221 for_each_active_iommu(iommu, drhd) {
3222 disable_dmar_iommu(iommu);
3223 free_dmar_iommu(iommu);
3225 kfree(deferred_flush);
3232 /* This takes a number of _MM_ pages, not VTD pages */
3233 static struct iova *intel_alloc_iova(struct device *dev,
3234 struct dmar_domain *domain,
3235 unsigned long nrpages, uint64_t dma_mask)
3237 struct iova *iova = NULL;
3239 /* Restrict dma_mask to the width that the iommu can handle */
3240 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3242 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3244 * First try to allocate an io virtual address in
3245 * DMA_BIT_MASK(32) and if that fails then try allocating
3248 iova = alloc_iova(&domain->iovad, nrpages,
3249 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3253 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3254 if (unlikely(!iova)) {
3255 pr_err("Allocating %ld-page iova for %s failed",
3256 nrpages, dev_name(dev));
3263 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3265 struct dmar_domain *domain;
3268 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3270 pr_err("Allocating domain for %s failed\n",
3275 /* make sure context mapping is ok */
3276 if (unlikely(!domain_context_mapped(dev))) {
3277 ret = domain_context_mapping(domain, dev);
3279 pr_err("Domain context map for %s failed\n",
3288 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3290 struct device_domain_info *info;
3292 /* No lock here, assumes no domain exit in normal case */
3293 info = dev->archdata.iommu;
3295 return info->domain;
3297 return __get_valid_domain_for_dev(dev);
3300 /* Check if the dev needs to go through non-identity map and unmap process.*/
3301 static int iommu_no_mapping(struct device *dev)
3305 if (iommu_dummy(dev))
3308 if (!iommu_identity_mapping)
3311 found = identity_mapping(dev);
3313 if (iommu_should_identity_map(dev, 0))
3317 * 32 bit DMA is removed from si_domain and fall back
3318 * to non-identity mapping.
3320 domain_remove_one_dev_info(si_domain, dev);
3321 pr_info("32bit %s uses non-identity mapping\n",
3327 * In case of a detached 64 bit DMA device from vm, the device
3328 * is put into si_domain for identity mapping.
3330 if (iommu_should_identity_map(dev, 0)) {
3332 ret = domain_add_dev_info(si_domain, dev);
3334 pr_info("64bit %s uses identity mapping\n",
3344 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3345 size_t size, int dir, u64 dma_mask)
3347 struct dmar_domain *domain;
3348 phys_addr_t start_paddr;
3352 struct intel_iommu *iommu;
3353 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3355 BUG_ON(dir == DMA_NONE);
3357 if (iommu_no_mapping(dev))
3360 domain = get_valid_domain_for_dev(dev);
3364 iommu = domain_get_iommu(domain);
3365 size = aligned_nrpages(paddr, size);
3367 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3372 * Check if DMAR supports zero-length reads on write only
3375 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3376 !cap_zlr(iommu->cap))
3377 prot |= DMA_PTE_READ;
3378 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3379 prot |= DMA_PTE_WRITE;
3381 * paddr - (paddr + size) might be partial page, we should map the whole
3382 * page. Note: if two part of one page are separately mapped, we
3383 * might have two guest_addr mapping to the same host paddr, but this
3384 * is not a big problem
3386 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3387 mm_to_dma_pfn(paddr_pfn), size, prot);
3391 /* it's a non-present to present mapping. Only flush if caching mode */
3392 if (cap_caching_mode(iommu->cap))
3393 iommu_flush_iotlb_psi(iommu, domain,
3394 mm_to_dma_pfn(iova->pfn_lo),
3397 iommu_flush_write_buffer(iommu);
3399 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3400 start_paddr += paddr & ~PAGE_MASK;
3405 __free_iova(&domain->iovad, iova);
3406 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3407 dev_name(dev), size, (unsigned long long)paddr, dir);
3411 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3412 unsigned long offset, size_t size,
3413 enum dma_data_direction dir,
3414 struct dma_attrs *attrs)
3416 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3417 dir, *dev->dma_mask);
3420 static void flush_unmaps(void)
3426 /* just flush them all */
3427 for (i = 0; i < g_num_of_iommus; i++) {
3428 struct intel_iommu *iommu = g_iommus[i];
3432 if (!deferred_flush[i].next)
3435 /* In caching mode, global flushes turn emulation expensive */
3436 if (!cap_caching_mode(iommu->cap))
3437 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3438 DMA_TLB_GLOBAL_FLUSH);
3439 for (j = 0; j < deferred_flush[i].next; j++) {
3441 struct iova *iova = deferred_flush[i].iova[j];
3442 struct dmar_domain *domain = deferred_flush[i].domain[j];
3444 /* On real hardware multiple invalidations are expensive */
3445 if (cap_caching_mode(iommu->cap))
3446 iommu_flush_iotlb_psi(iommu, domain,
3447 iova->pfn_lo, iova_size(iova),
3448 !deferred_flush[i].freelist[j], 0);
3450 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3451 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3452 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3454 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3455 if (deferred_flush[i].freelist[j])
3456 dma_free_pagelist(deferred_flush[i].freelist[j]);
3458 deferred_flush[i].next = 0;
3464 static void flush_unmaps_timeout(unsigned long data)
3466 unsigned long flags;
3468 spin_lock_irqsave(&async_umap_flush_lock, flags);
3470 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3473 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3475 unsigned long flags;
3477 struct intel_iommu *iommu;
3479 spin_lock_irqsave(&async_umap_flush_lock, flags);
3480 if (list_size == HIGH_WATER_MARK)
3483 iommu = domain_get_iommu(dom);
3484 iommu_id = iommu->seq_id;
3486 next = deferred_flush[iommu_id].next;
3487 deferred_flush[iommu_id].domain[next] = dom;
3488 deferred_flush[iommu_id].iova[next] = iova;
3489 deferred_flush[iommu_id].freelist[next] = freelist;
3490 deferred_flush[iommu_id].next++;
3493 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3497 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3500 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3502 struct dmar_domain *domain;
3503 unsigned long start_pfn, last_pfn;
3505 struct intel_iommu *iommu;
3506 struct page *freelist;
3508 if (iommu_no_mapping(dev))
3511 domain = find_domain(dev);
3514 iommu = domain_get_iommu(domain);
3516 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3517 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3518 (unsigned long long)dev_addr))
3521 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3522 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3524 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3525 dev_name(dev), start_pfn, last_pfn);
3527 freelist = domain_unmap(domain, start_pfn, last_pfn);
3529 if (intel_iommu_strict) {
3530 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3531 last_pfn - start_pfn + 1, !freelist, 0);
3533 __free_iova(&domain->iovad, iova);
3534 dma_free_pagelist(freelist);
3536 add_unmap(domain, iova, freelist);
3538 * queue up the release of the unmap to save the 1/6th of the
3539 * cpu used up by the iotlb flush operation...
3544 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3545 size_t size, enum dma_data_direction dir,
3546 struct dma_attrs *attrs)
3548 intel_unmap(dev, dev_addr);
3551 static void *intel_alloc_coherent(struct device *dev, size_t size,
3552 dma_addr_t *dma_handle, gfp_t flags,
3553 struct dma_attrs *attrs)
3555 struct page *page = NULL;
3558 size = PAGE_ALIGN(size);
3559 order = get_order(size);
3561 if (!iommu_no_mapping(dev))
3562 flags &= ~(GFP_DMA | GFP_DMA32);
3563 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3564 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3570 if (flags & __GFP_WAIT) {
3571 unsigned int count = size >> PAGE_SHIFT;
3573 page = dma_alloc_from_contiguous(dev, count, order);
3574 if (page && iommu_no_mapping(dev) &&
3575 page_to_phys(page) + size > dev->coherent_dma_mask) {
3576 dma_release_from_contiguous(dev, page, count);
3582 page = alloc_pages(flags, order);
3585 memset(page_address(page), 0, size);
3587 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3589 dev->coherent_dma_mask);
3591 return page_address(page);
3592 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3593 __free_pages(page, order);
3598 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3599 dma_addr_t dma_handle, struct dma_attrs *attrs)
3602 struct page *page = virt_to_page(vaddr);
3604 size = PAGE_ALIGN(size);
3605 order = get_order(size);
3607 intel_unmap(dev, dma_handle);
3608 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3609 __free_pages(page, order);
3612 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3613 int nelems, enum dma_data_direction dir,
3614 struct dma_attrs *attrs)
3616 intel_unmap(dev, sglist[0].dma_address);
3619 static int intel_nontranslate_map_sg(struct device *hddev,
3620 struct scatterlist *sglist, int nelems, int dir)
3623 struct scatterlist *sg;
3625 for_each_sg(sglist, sg, nelems, i) {
3626 BUG_ON(!sg_page(sg));
3627 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3628 sg->dma_length = sg->length;
3633 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3634 enum dma_data_direction dir, struct dma_attrs *attrs)
3637 struct dmar_domain *domain;
3640 struct iova *iova = NULL;
3642 struct scatterlist *sg;
3643 unsigned long start_vpfn;
3644 struct intel_iommu *iommu;
3646 BUG_ON(dir == DMA_NONE);
3647 if (iommu_no_mapping(dev))
3648 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3650 domain = get_valid_domain_for_dev(dev);
3654 iommu = domain_get_iommu(domain);
3656 for_each_sg(sglist, sg, nelems, i)
3657 size += aligned_nrpages(sg->offset, sg->length);
3659 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3662 sglist->dma_length = 0;
3667 * Check if DMAR supports zero-length reads on write only
3670 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3671 !cap_zlr(iommu->cap))
3672 prot |= DMA_PTE_READ;
3673 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3674 prot |= DMA_PTE_WRITE;
3676 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3678 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3679 if (unlikely(ret)) {
3680 dma_pte_free_pagetable(domain, start_vpfn,
3681 start_vpfn + size - 1);
3682 __free_iova(&domain->iovad, iova);
3686 /* it's a non-present to present mapping. Only flush if caching mode */
3687 if (cap_caching_mode(iommu->cap))
3688 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3690 iommu_flush_write_buffer(iommu);
3695 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3700 struct dma_map_ops intel_dma_ops = {
3701 .alloc = intel_alloc_coherent,
3702 .free = intel_free_coherent,
3703 .map_sg = intel_map_sg,
3704 .unmap_sg = intel_unmap_sg,
3705 .map_page = intel_map_page,
3706 .unmap_page = intel_unmap_page,
3707 .mapping_error = intel_mapping_error,
3710 static inline int iommu_domain_cache_init(void)
3714 iommu_domain_cache = kmem_cache_create("iommu_domain",
3715 sizeof(struct dmar_domain),
3720 if (!iommu_domain_cache) {
3721 pr_err("Couldn't create iommu_domain cache\n");
3728 static inline int iommu_devinfo_cache_init(void)
3732 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3733 sizeof(struct device_domain_info),
3737 if (!iommu_devinfo_cache) {
3738 pr_err("Couldn't create devinfo cache\n");
3745 static int __init iommu_init_mempool(void)
3748 ret = iommu_iova_cache_init();
3752 ret = iommu_domain_cache_init();
3756 ret = iommu_devinfo_cache_init();
3760 kmem_cache_destroy(iommu_domain_cache);
3762 iommu_iova_cache_destroy();
3767 static void __init iommu_exit_mempool(void)
3769 kmem_cache_destroy(iommu_devinfo_cache);
3770 kmem_cache_destroy(iommu_domain_cache);
3771 iommu_iova_cache_destroy();
3774 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3776 struct dmar_drhd_unit *drhd;
3780 /* We know that this device on this chipset has its own IOMMU.
3781 * If we find it under a different IOMMU, then the BIOS is lying
3782 * to us. Hope that the IOMMU for this device is actually
3783 * disabled, and it needs no translation...
3785 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3787 /* "can't" happen */
3788 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3791 vtbar &= 0xffff0000;
3793 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3794 drhd = dmar_find_matched_drhd_unit(pdev);
3795 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3796 TAINT_FIRMWARE_WORKAROUND,
3797 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3798 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3800 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3802 static void __init init_no_remapping_devices(void)
3804 struct dmar_drhd_unit *drhd;
3808 for_each_drhd_unit(drhd) {
3809 if (!drhd->include_all) {
3810 for_each_active_dev_scope(drhd->devices,
3811 drhd->devices_cnt, i, dev)
3813 /* ignore DMAR unit if no devices exist */
3814 if (i == drhd->devices_cnt)
3819 for_each_active_drhd_unit(drhd) {
3820 if (drhd->include_all)
3823 for_each_active_dev_scope(drhd->devices,
3824 drhd->devices_cnt, i, dev)
3825 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3827 if (i < drhd->devices_cnt)
3830 /* This IOMMU has *only* gfx devices. Either bypass it or
3831 set the gfx_mapped flag, as appropriate */
3833 intel_iommu_gfx_mapped = 1;
3836 for_each_active_dev_scope(drhd->devices,
3837 drhd->devices_cnt, i, dev)
3838 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3843 #ifdef CONFIG_SUSPEND
3844 static int init_iommu_hw(void)
3846 struct dmar_drhd_unit *drhd;
3847 struct intel_iommu *iommu = NULL;
3849 for_each_active_iommu(iommu, drhd)
3851 dmar_reenable_qi(iommu);
3853 for_each_iommu(iommu, drhd) {
3854 if (drhd->ignored) {
3856 * we always have to disable PMRs or DMA may fail on
3860 iommu_disable_protect_mem_regions(iommu);
3864 iommu_flush_write_buffer(iommu);
3866 iommu_set_root_entry(iommu);
3868 iommu->flush.flush_context(iommu, 0, 0, 0,
3869 DMA_CCMD_GLOBAL_INVL);
3870 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3871 iommu_enable_translation(iommu);
3872 iommu_disable_protect_mem_regions(iommu);
3878 static void iommu_flush_all(void)
3880 struct dmar_drhd_unit *drhd;
3881 struct intel_iommu *iommu;
3883 for_each_active_iommu(iommu, drhd) {
3884 iommu->flush.flush_context(iommu, 0, 0, 0,
3885 DMA_CCMD_GLOBAL_INVL);
3886 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3887 DMA_TLB_GLOBAL_FLUSH);
3891 static int iommu_suspend(void)
3893 struct dmar_drhd_unit *drhd;
3894 struct intel_iommu *iommu = NULL;
3897 for_each_active_iommu(iommu, drhd) {
3898 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3900 if (!iommu->iommu_state)
3906 for_each_active_iommu(iommu, drhd) {
3907 iommu_disable_translation(iommu);
3909 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3911 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3912 readl(iommu->reg + DMAR_FECTL_REG);
3913 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3914 readl(iommu->reg + DMAR_FEDATA_REG);
3915 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3916 readl(iommu->reg + DMAR_FEADDR_REG);
3917 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3918 readl(iommu->reg + DMAR_FEUADDR_REG);
3920 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3925 for_each_active_iommu(iommu, drhd)
3926 kfree(iommu->iommu_state);
3931 static void iommu_resume(void)
3933 struct dmar_drhd_unit *drhd;
3934 struct intel_iommu *iommu = NULL;
3937 if (init_iommu_hw()) {
3939 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3941 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3945 for_each_active_iommu(iommu, drhd) {
3947 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3949 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3950 iommu->reg + DMAR_FECTL_REG);
3951 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3952 iommu->reg + DMAR_FEDATA_REG);
3953 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3954 iommu->reg + DMAR_FEADDR_REG);
3955 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3956 iommu->reg + DMAR_FEUADDR_REG);
3958 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3961 for_each_active_iommu(iommu, drhd)
3962 kfree(iommu->iommu_state);
3965 static struct syscore_ops iommu_syscore_ops = {
3966 .resume = iommu_resume,
3967 .suspend = iommu_suspend,
3970 static void __init init_iommu_pm_ops(void)
3972 register_syscore_ops(&iommu_syscore_ops);
3976 static inline void init_iommu_pm_ops(void) {}
3977 #endif /* CONFIG_PM */
3980 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3982 struct acpi_dmar_reserved_memory *rmrr;
3983 struct dmar_rmrr_unit *rmrru;
3985 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3989 rmrru->hdr = header;
3990 rmrr = (struct acpi_dmar_reserved_memory *)header;
3991 rmrru->base_address = rmrr->base_address;
3992 rmrru->end_address = rmrr->end_address;
3993 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3994 ((void *)rmrr) + rmrr->header.length,
3995 &rmrru->devices_cnt);
3996 if (rmrru->devices_cnt && rmrru->devices == NULL) {
4001 list_add(&rmrru->list, &dmar_rmrr_units);
4006 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4008 struct dmar_atsr_unit *atsru;
4009 struct acpi_dmar_atsr *tmp;
4011 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4012 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4013 if (atsr->segment != tmp->segment)
4015 if (atsr->header.length != tmp->header.length)
4017 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4024 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4026 struct acpi_dmar_atsr *atsr;
4027 struct dmar_atsr_unit *atsru;
4029 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4032 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4033 atsru = dmar_find_atsr(atsr);
4037 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4042 * If memory is allocated from slab by ACPI _DSM method, we need to
4043 * copy the memory content because the memory buffer will be freed
4046 atsru->hdr = (void *)(atsru + 1);
4047 memcpy(atsru->hdr, hdr, hdr->length);
4048 atsru->include_all = atsr->flags & 0x1;
4049 if (!atsru->include_all) {
4050 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4051 (void *)atsr + atsr->header.length,
4052 &atsru->devices_cnt);
4053 if (atsru->devices_cnt && atsru->devices == NULL) {
4059 list_add_rcu(&atsru->list, &dmar_atsr_units);
4064 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4066 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4070 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4072 struct acpi_dmar_atsr *atsr;
4073 struct dmar_atsr_unit *atsru;
4075 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4076 atsru = dmar_find_atsr(atsr);
4078 list_del_rcu(&atsru->list);
4080 intel_iommu_free_atsr(atsru);
4086 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4090 struct acpi_dmar_atsr *atsr;
4091 struct dmar_atsr_unit *atsru;
4093 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4094 atsru = dmar_find_atsr(atsr);
4098 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4099 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4106 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4109 struct intel_iommu *iommu = dmaru->iommu;
4111 if (g_iommus[iommu->seq_id])
4114 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4115 pr_warn("%s: Doesn't support hardware pass through.\n",
4119 if (!ecap_sc_support(iommu->ecap) &&
4120 domain_update_iommu_snooping(iommu)) {
4121 pr_warn("%s: Doesn't support snooping.\n",
4125 sp = domain_update_iommu_superpage(iommu) - 1;
4126 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4127 pr_warn("%s: Doesn't support large page.\n",
4133 * Disable translation if already enabled prior to OS handover.
4135 if (iommu->gcmd & DMA_GCMD_TE)
4136 iommu_disable_translation(iommu);
4138 g_iommus[iommu->seq_id] = iommu;
4139 ret = iommu_init_domains(iommu);
4141 ret = iommu_alloc_root_entry(iommu);
4145 if (dmaru->ignored) {
4147 * we always have to disable PMRs or DMA may fail on this device
4150 iommu_disable_protect_mem_regions(iommu);
4154 intel_iommu_init_qi(iommu);
4155 iommu_flush_write_buffer(iommu);
4156 ret = dmar_set_interrupt(iommu);
4160 iommu_set_root_entry(iommu);
4161 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4162 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4163 iommu_enable_translation(iommu);
4165 iommu_disable_protect_mem_regions(iommu);
4169 disable_dmar_iommu(iommu);
4171 free_dmar_iommu(iommu);
4175 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4178 struct intel_iommu *iommu = dmaru->iommu;
4180 if (!intel_iommu_enabled)
4186 ret = intel_iommu_add(dmaru);
4188 disable_dmar_iommu(iommu);
4189 free_dmar_iommu(iommu);
4195 static void intel_iommu_free_dmars(void)
4197 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4198 struct dmar_atsr_unit *atsru, *atsr_n;
4200 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4201 list_del(&rmrru->list);
4202 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4206 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4207 list_del(&atsru->list);
4208 intel_iommu_free_atsr(atsru);
4212 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4215 struct pci_bus *bus;
4216 struct pci_dev *bridge = NULL;
4218 struct acpi_dmar_atsr *atsr;
4219 struct dmar_atsr_unit *atsru;
4221 dev = pci_physfn(dev);
4222 for (bus = dev->bus; bus; bus = bus->parent) {
4224 if (!bridge || !pci_is_pcie(bridge) ||
4225 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4227 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4234 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4235 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4236 if (atsr->segment != pci_domain_nr(dev->bus))
4239 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4240 if (tmp == &bridge->dev)
4243 if (atsru->include_all)
4253 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4256 struct dmar_rmrr_unit *rmrru;
4257 struct dmar_atsr_unit *atsru;
4258 struct acpi_dmar_atsr *atsr;
4259 struct acpi_dmar_reserved_memory *rmrr;
4261 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4264 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4265 rmrr = container_of(rmrru->hdr,
4266 struct acpi_dmar_reserved_memory, header);
4267 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4268 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4269 ((void *)rmrr) + rmrr->header.length,
4270 rmrr->segment, rmrru->devices,
4271 rmrru->devices_cnt);
4274 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4275 dmar_remove_dev_scope(info, rmrr->segment,
4276 rmrru->devices, rmrru->devices_cnt);
4280 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4281 if (atsru->include_all)
4284 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4285 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4286 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4287 (void *)atsr + atsr->header.length,
4288 atsr->segment, atsru->devices,
4289 atsru->devices_cnt);
4294 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4295 if (dmar_remove_dev_scope(info, atsr->segment,
4296 atsru->devices, atsru->devices_cnt))
4305 * Here we only respond to action of unbound device from driver.
4307 * Added device is not attached to its DMAR domain here yet. That will happen
4308 * when mapping the device to iova.
4310 static int device_notifier(struct notifier_block *nb,
4311 unsigned long action, void *data)
4313 struct device *dev = data;
4314 struct dmar_domain *domain;
4316 if (iommu_dummy(dev))
4319 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4322 domain = find_domain(dev);
4326 down_read(&dmar_global_lock);
4327 domain_remove_one_dev_info(domain, dev);
4328 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4329 domain_exit(domain);
4330 up_read(&dmar_global_lock);
4335 static struct notifier_block device_nb = {
4336 .notifier_call = device_notifier,
4339 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4340 unsigned long val, void *v)
4342 struct memory_notify *mhp = v;
4343 unsigned long long start, end;
4344 unsigned long start_vpfn, last_vpfn;
4347 case MEM_GOING_ONLINE:
4348 start = mhp->start_pfn << PAGE_SHIFT;
4349 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4350 if (iommu_domain_identity_map(si_domain, start, end)) {
4351 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4358 case MEM_CANCEL_ONLINE:
4359 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4360 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4361 while (start_vpfn <= last_vpfn) {
4363 struct dmar_drhd_unit *drhd;
4364 struct intel_iommu *iommu;
4365 struct page *freelist;
4367 iova = find_iova(&si_domain->iovad, start_vpfn);
4369 pr_debug("Failed get IOVA for PFN %lx\n",
4374 iova = split_and_remove_iova(&si_domain->iovad, iova,
4375 start_vpfn, last_vpfn);
4377 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4378 start_vpfn, last_vpfn);
4382 freelist = domain_unmap(si_domain, iova->pfn_lo,
4386 for_each_active_iommu(iommu, drhd)
4387 iommu_flush_iotlb_psi(iommu, si_domain,
4388 iova->pfn_lo, iova_size(iova),
4391 dma_free_pagelist(freelist);
4393 start_vpfn = iova->pfn_hi + 1;
4394 free_iova_mem(iova);
4402 static struct notifier_block intel_iommu_memory_nb = {
4403 .notifier_call = intel_iommu_memory_notifier,
4408 static ssize_t intel_iommu_show_version(struct device *dev,
4409 struct device_attribute *attr,
4412 struct intel_iommu *iommu = dev_get_drvdata(dev);
4413 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4414 return sprintf(buf, "%d:%d\n",
4415 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4417 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4419 static ssize_t intel_iommu_show_address(struct device *dev,
4420 struct device_attribute *attr,
4423 struct intel_iommu *iommu = dev_get_drvdata(dev);
4424 return sprintf(buf, "%llx\n", iommu->reg_phys);
4426 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4428 static ssize_t intel_iommu_show_cap(struct device *dev,
4429 struct device_attribute *attr,
4432 struct intel_iommu *iommu = dev_get_drvdata(dev);
4433 return sprintf(buf, "%llx\n", iommu->cap);
4435 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4437 static ssize_t intel_iommu_show_ecap(struct device *dev,
4438 struct device_attribute *attr,
4441 struct intel_iommu *iommu = dev_get_drvdata(dev);
4442 return sprintf(buf, "%llx\n", iommu->ecap);
4444 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4446 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4447 struct device_attribute *attr,
4450 struct intel_iommu *iommu = dev_get_drvdata(dev);
4451 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4453 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4455 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4456 struct device_attribute *attr,
4459 struct intel_iommu *iommu = dev_get_drvdata(dev);
4460 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4461 cap_ndoms(iommu->cap)));
4463 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4465 static struct attribute *intel_iommu_attrs[] = {
4466 &dev_attr_version.attr,
4467 &dev_attr_address.attr,
4469 &dev_attr_ecap.attr,
4470 &dev_attr_domains_supported.attr,
4471 &dev_attr_domains_used.attr,
4475 static struct attribute_group intel_iommu_group = {
4476 .name = "intel-iommu",
4477 .attrs = intel_iommu_attrs,
4480 const struct attribute_group *intel_iommu_groups[] = {
4485 int __init intel_iommu_init(void)
4488 struct dmar_drhd_unit *drhd;
4489 struct intel_iommu *iommu;
4491 /* VT-d is required for a TXT/tboot launch, so enforce that */
4492 force_on = tboot_force_iommu();
4494 if (iommu_init_mempool()) {
4496 panic("tboot: Failed to initialize iommu memory\n");
4500 down_write(&dmar_global_lock);
4501 if (dmar_table_init()) {
4503 panic("tboot: Failed to initialize DMAR table\n");
4507 if (dmar_dev_scope_init() < 0) {
4509 panic("tboot: Failed to initialize DMAR device scope\n");
4513 if (no_iommu || dmar_disabled)
4516 if (list_empty(&dmar_rmrr_units))
4517 pr_info("No RMRR found\n");
4519 if (list_empty(&dmar_atsr_units))
4520 pr_info("No ATSR found\n");
4522 if (dmar_init_reserved_ranges()) {
4524 panic("tboot: Failed to reserve iommu ranges\n");
4525 goto out_free_reserved_range;
4528 init_no_remapping_devices();
4533 panic("tboot: Failed to initialize DMARs\n");
4534 pr_err("Initialization failed\n");
4535 goto out_free_reserved_range;
4537 up_write(&dmar_global_lock);
4538 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4540 init_timer(&unmap_timer);
4541 #ifdef CONFIG_SWIOTLB
4544 dma_ops = &intel_dma_ops;
4546 init_iommu_pm_ops();
4548 for_each_active_iommu(iommu, drhd)
4549 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4553 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4554 bus_register_notifier(&pci_bus_type, &device_nb);
4555 if (si_domain && !hw_pass_through)
4556 register_memory_notifier(&intel_iommu_memory_nb);
4558 intel_iommu_enabled = 1;
4562 out_free_reserved_range:
4563 put_iova_domain(&reserved_iova_list);
4565 intel_iommu_free_dmars();
4566 up_write(&dmar_global_lock);
4567 iommu_exit_mempool();
4571 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4573 struct intel_iommu *iommu = opaque;
4575 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4580 * NB - intel-iommu lacks any sort of reference counting for the users of
4581 * dependent devices. If multiple endpoints have intersecting dependent
4582 * devices, unbinding the driver from any one of them will possibly leave
4583 * the others unable to operate.
4585 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4588 if (!iommu || !dev || !dev_is_pci(dev))
4591 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4594 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4597 struct device_domain_info *info;
4598 struct intel_iommu *iommu;
4599 unsigned long flags;
4602 iommu = device_to_iommu(dev, &bus, &devfn);
4606 info = dev->archdata.iommu;
4611 spin_lock_irqsave(&device_domain_lock, flags);
4612 unlink_domain_info(info);
4613 spin_unlock_irqrestore(&device_domain_lock, flags);
4615 iommu_disable_dev_iotlb(info);
4616 iommu_detach_dev(iommu, info->bus, info->devfn);
4617 iommu_detach_dependent_devices(iommu, dev);
4618 free_devinfo_mem(info);
4619 domain_detach_iommu(domain, iommu);
4621 spin_lock_irqsave(&domain->iommu_lock, flags);
4622 if (!domain->iommu_refcnt[iommu->seq_id])
4623 iommu_detach_domain(domain, iommu);
4624 spin_unlock_irqrestore(&domain->iommu_lock, flags);
4627 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4631 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4633 domain_reserve_special_ranges(domain);
4635 /* calculate AGAW */
4636 domain->gaw = guest_width;
4637 adjust_width = guestwidth_to_adjustwidth(guest_width);
4638 domain->agaw = width_to_agaw(adjust_width);
4640 domain->iommu_coherency = 0;
4641 domain->iommu_snooping = 0;
4642 domain->iommu_superpage = 0;
4643 domain->max_addr = 0;
4645 /* always allocate the top pgd */
4646 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4649 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4653 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4655 struct dmar_domain *dmar_domain;
4656 struct iommu_domain *domain;
4658 if (type != IOMMU_DOMAIN_UNMANAGED)
4661 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4663 pr_err("Can't allocate dmar_domain\n");
4666 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4667 pr_err("Domain initialization failed\n");
4668 domain_exit(dmar_domain);
4671 domain_update_iommu_cap(dmar_domain);
4673 domain = &dmar_domain->domain;
4674 domain->geometry.aperture_start = 0;
4675 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4676 domain->geometry.force_aperture = true;
4681 static void intel_iommu_domain_free(struct iommu_domain *domain)
4683 domain_exit(to_dmar_domain(domain));
4686 static int intel_iommu_attach_device(struct iommu_domain *domain,
4689 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4690 struct intel_iommu *iommu;
4694 if (device_is_rmrr_locked(dev)) {
4695 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4699 /* normally dev is not mapped */
4700 if (unlikely(domain_context_mapped(dev))) {
4701 struct dmar_domain *old_domain;
4703 old_domain = find_domain(dev);
4705 if (domain_type_is_vm_or_si(dmar_domain))
4706 domain_remove_one_dev_info(old_domain, dev);
4708 domain_remove_dev_info(old_domain);
4710 if (!domain_type_is_vm_or_si(old_domain) &&
4711 list_empty(&old_domain->devices))
4712 domain_exit(old_domain);
4716 iommu = device_to_iommu(dev, &bus, &devfn);
4720 /* check if this iommu agaw is sufficient for max mapped address */
4721 addr_width = agaw_to_width(iommu->agaw);
4722 if (addr_width > cap_mgaw(iommu->cap))
4723 addr_width = cap_mgaw(iommu->cap);
4725 if (dmar_domain->max_addr > (1LL << addr_width)) {
4726 pr_err("%s: iommu width (%d) is not "
4727 "sufficient for the mapped address (%llx)\n",
4728 __func__, addr_width, dmar_domain->max_addr);
4731 dmar_domain->gaw = addr_width;
4734 * Knock out extra levels of page tables if necessary
4736 while (iommu->agaw < dmar_domain->agaw) {
4737 struct dma_pte *pte;
4739 pte = dmar_domain->pgd;
4740 if (dma_pte_present(pte)) {
4741 dmar_domain->pgd = (struct dma_pte *)
4742 phys_to_virt(dma_pte_addr(pte));
4743 free_pgtable_page(pte);
4745 dmar_domain->agaw--;
4748 return domain_add_dev_info(dmar_domain, dev);
4751 static void intel_iommu_detach_device(struct iommu_domain *domain,
4754 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
4757 static int intel_iommu_map(struct iommu_domain *domain,
4758 unsigned long iova, phys_addr_t hpa,
4759 size_t size, int iommu_prot)
4761 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4766 if (iommu_prot & IOMMU_READ)
4767 prot |= DMA_PTE_READ;
4768 if (iommu_prot & IOMMU_WRITE)
4769 prot |= DMA_PTE_WRITE;
4770 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4771 prot |= DMA_PTE_SNP;
4773 max_addr = iova + size;
4774 if (dmar_domain->max_addr < max_addr) {
4777 /* check if minimum agaw is sufficient for mapped address */
4778 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4779 if (end < max_addr) {
4780 pr_err("%s: iommu width (%d) is not "
4781 "sufficient for the mapped address (%llx)\n",
4782 __func__, dmar_domain->gaw, max_addr);
4785 dmar_domain->max_addr = max_addr;
4787 /* Round up size to next multiple of PAGE_SIZE, if it and
4788 the low bits of hpa would take us onto the next page */
4789 size = aligned_nrpages(hpa, size);
4790 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4791 hpa >> VTD_PAGE_SHIFT, size, prot);
4795 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4796 unsigned long iova, size_t size)
4798 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4799 struct page *freelist = NULL;
4800 struct intel_iommu *iommu;
4801 unsigned long start_pfn, last_pfn;
4802 unsigned int npages;
4803 int iommu_id, level = 0;
4805 /* Cope with horrid API which requires us to unmap more than the
4806 size argument if it happens to be a large-page mapping. */
4807 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4810 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4811 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4813 start_pfn = iova >> VTD_PAGE_SHIFT;
4814 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4816 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4818 npages = last_pfn - start_pfn + 1;
4820 for_each_domain_iommu(iommu_id, dmar_domain) {
4821 iommu = g_iommus[iommu_id];
4823 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4824 start_pfn, npages, !freelist, 0);
4827 dma_free_pagelist(freelist);
4829 if (dmar_domain->max_addr == iova + size)
4830 dmar_domain->max_addr = iova;
4835 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4838 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4839 struct dma_pte *pte;
4843 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4845 phys = dma_pte_addr(pte);
4850 static bool intel_iommu_capable(enum iommu_cap cap)
4852 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4853 return domain_update_iommu_snooping(NULL) == 1;
4854 if (cap == IOMMU_CAP_INTR_REMAP)
4855 return irq_remapping_enabled == 1;
4860 static int intel_iommu_add_device(struct device *dev)
4862 struct intel_iommu *iommu;
4863 struct iommu_group *group;
4866 iommu = device_to_iommu(dev, &bus, &devfn);
4870 iommu_device_link(iommu->iommu_dev, dev);
4872 group = iommu_group_get_for_dev(dev);
4875 return PTR_ERR(group);
4877 iommu_group_put(group);
4881 static void intel_iommu_remove_device(struct device *dev)
4883 struct intel_iommu *iommu;
4886 iommu = device_to_iommu(dev, &bus, &devfn);
4890 iommu_group_remove_device(dev);
4892 iommu_device_unlink(iommu->iommu_dev, dev);
4895 static const struct iommu_ops intel_iommu_ops = {
4896 .capable = intel_iommu_capable,
4897 .domain_alloc = intel_iommu_domain_alloc,
4898 .domain_free = intel_iommu_domain_free,
4899 .attach_dev = intel_iommu_attach_device,
4900 .detach_dev = intel_iommu_detach_device,
4901 .map = intel_iommu_map,
4902 .unmap = intel_iommu_unmap,
4903 .map_sg = default_iommu_map_sg,
4904 .iova_to_phys = intel_iommu_iova_to_phys,
4905 .add_device = intel_iommu_add_device,
4906 .remove_device = intel_iommu_remove_device,
4907 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4910 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4912 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4913 pr_info("Disabling IOMMU for graphics on this chipset\n");
4917 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4918 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4919 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4920 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4925 static void quirk_iommu_rwbf(struct pci_dev *dev)
4928 * Mobile 4 Series Chipset neglects to set RWBF capability,
4929 * but needs it. Same seems to hold for the desktop versions.
4931 pr_info("Forcing write-buffer flush capability\n");
4935 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4936 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4937 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4938 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4939 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4940 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4941 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4944 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4945 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4946 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4947 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4948 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4949 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4950 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4951 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4953 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4957 if (pci_read_config_word(dev, GGC, &ggc))
4960 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4961 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4963 } else if (dmar_map_gfx) {
4964 /* we have to ensure the gfx device is idle before we flush */
4965 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4966 intel_iommu_strict = 1;
4969 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4970 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4971 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4972 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4974 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4975 ISOCH DMAR unit for the Azalia sound device, but not give it any
4976 TLB entries, which causes it to deadlock. Check for that. We do
4977 this in a function called from init_dmars(), instead of in a PCI
4978 quirk, because we don't want to print the obnoxious "BIOS broken"
4979 message if VT-d is actually disabled.
4981 static void __init check_tylersburg_isoch(void)
4983 struct pci_dev *pdev;
4984 uint32_t vtisochctrl;
4986 /* If there's no Azalia in the system anyway, forget it. */
4987 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4992 /* System Management Registers. Might be hidden, in which case
4993 we can't do the sanity check. But that's OK, because the
4994 known-broken BIOSes _don't_ actually hide it, so far. */
4995 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4999 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5006 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5007 if (vtisochctrl & 1)
5010 /* Drop all bits other than the number of TLB entries */
5011 vtisochctrl &= 0x1c;
5013 /* If we have the recommended number of TLB entries (16), fine. */
5014 if (vtisochctrl == 0x10)
5017 /* Zero TLB entries? You get to ride the short bus to school. */
5019 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5020 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5021 dmi_get_system_info(DMI_BIOS_VENDOR),
5022 dmi_get_system_info(DMI_BIOS_VERSION),
5023 dmi_get_system_info(DMI_PRODUCT_VERSION));
5024 iommu_identity_mapping |= IDENTMAP_AZALIA;
5028 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",