2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
51 #include "irq_remapping.h"
53 #define ROOT_SIZE VTD_PAGE_SIZE
54 #define CONTEXT_SIZE VTD_PAGE_SIZE
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
61 #define IOAPIC_RANGE_START (0xfee00000)
62 #define IOAPIC_RANGE_END (0xfeefffff)
63 #define IOVA_START_ADDR (0x1000)
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
70 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN (1)
82 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
86 /* page table handling */
87 #define LEVEL_STRIDE (9)
88 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
106 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108 static inline int agaw_to_level(int agaw)
113 static inline int agaw_to_width(int agaw)
115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 static inline int width_to_agaw(int width)
120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 static inline unsigned int level_to_offset_bits(int level)
125 return (level - 1) * LEVEL_STRIDE;
128 static inline int pfn_level_offset(unsigned long pfn, int level)
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 static inline unsigned long level_mask(int level)
135 return -1UL << level_to_offset_bits(level);
138 static inline unsigned long level_size(int level)
140 return 1UL << level_to_offset_bits(level);
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
145 return (pfn + level_size(level) - 1) & level_mask(level);
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
166 return mm_to_dma_pfn(page_to_pfn(pg));
168 static inline unsigned long virt_to_dma_pfn(void *p)
170 return page_to_dma_pfn(virt_to_page(p));
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
183 static int force_on = 0;
188 * 12-63: Context Ptr (12 - (haw-1))
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
206 return re->lo & VTD_PAGE_MASK;
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
218 return re->hi & VTD_PAGE_MASK;
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
231 struct context_entry {
236 static inline void context_clear_pasid_enable(struct context_entry *context)
238 context->lo &= ~(1ULL << 11);
241 static inline bool context_pasid_enabled(struct context_entry *context)
243 return !!(context->lo & (1ULL << 11));
246 static inline void context_set_copied(struct context_entry *context)
248 context->hi |= (1ull << 3);
251 static inline bool context_copied(struct context_entry *context)
253 return !!(context->hi & (1ULL << 3));
256 static inline bool __context_present(struct context_entry *context)
258 return (context->lo & 1);
261 static inline bool context_present(struct context_entry *context)
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
268 static inline void context_set_present(struct context_entry *context)
273 static inline void context_set_fault_enable(struct context_entry *context)
275 context->lo &= (((u64)-1) << 2) | 1;
278 static inline void context_set_translation_type(struct context_entry *context,
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
285 static inline void context_set_address_root(struct context_entry *context,
288 context->lo &= ~VTD_PAGE_MASK;
289 context->lo |= value & VTD_PAGE_MASK;
292 static inline void context_set_address_width(struct context_entry *context,
295 context->hi |= value & 7;
298 static inline void context_set_domain_id(struct context_entry *context,
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
304 static inline int context_domain_id(struct context_entry *c)
306 return((c->hi >> 8) & 0xffff);
309 static inline void context_clear_entry(struct context_entry *context)
322 * 12-63: Host physcial address
328 static inline void dma_clear_pte(struct dma_pte *pte)
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
336 return pte->val & VTD_PAGE_MASK;
338 /* Must have a full atomic 64-bit read */
339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
343 static inline bool dma_pte_present(struct dma_pte *pte)
345 return (pte->val & 3) != 0;
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
350 return (pte->val & DMA_PTE_LARGE_PAGE);
353 static inline int first_pte_in_page(struct dma_pte *pte)
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
368 * Domain represents a virtual machine, more than one devices
369 * across iommus may be owned in one domain, e.g. kvm guest.
371 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
373 /* si_domain contains mulitple devices */
374 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
376 #define for_each_domain_iommu(idx, domain) \
377 for (idx = 0; idx < g_num_of_iommus; idx++) \
378 if (domain->iommu_refcnt[idx])
381 int nid; /* node id */
383 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
384 /* Refcount of devices per iommu */
387 u16 iommu_did[DMAR_UNITS_SUPPORTED];
388 /* Domain ids per IOMMU. Use u16 since
389 * domain ids are 16 bit wide according
390 * to VT-d spec, section 9.3 */
392 struct list_head devices; /* all devices' list */
393 struct iova_domain iovad; /* iova's that belong to this domain */
395 struct dma_pte *pgd; /* virtual address */
396 int gaw; /* max guest address width */
398 /* adjusted guest address width, 0 is level 2 30-bit */
401 int flags; /* flags to find out type of domain */
403 int iommu_coherency;/* indicate coherency of iommu access */
404 int iommu_snooping; /* indicate snooping control feature*/
405 int iommu_count; /* reference count of iommu */
406 int iommu_superpage;/* Level of superpages supported:
407 0 == 4KiB (no superpages), 1 == 2MiB,
408 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409 u64 max_addr; /* maximum mapped address */
411 struct iommu_domain domain; /* generic domain data structure for
415 /* PCI domain-device relationship */
416 struct device_domain_info {
417 struct list_head link; /* link to domain siblings */
418 struct list_head global; /* link to global list */
419 u8 bus; /* PCI bus number */
420 u8 devfn; /* PCI devfn number */
421 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
422 struct intel_iommu *iommu; /* IOMMU used by this device */
423 struct dmar_domain *domain; /* pointer to domain */
426 struct dmar_rmrr_unit {
427 struct list_head list; /* list of rmrr units */
428 struct acpi_dmar_header *hdr; /* ACPI header */
429 u64 base_address; /* reserved base address*/
430 u64 end_address; /* reserved end address */
431 struct dmar_dev_scope *devices; /* target devices */
432 int devices_cnt; /* target device count */
435 struct dmar_atsr_unit {
436 struct list_head list; /* list of ATSR units */
437 struct acpi_dmar_header *hdr; /* ACPI header */
438 struct dmar_dev_scope *devices; /* target devices */
439 int devices_cnt; /* target device count */
440 u8 include_all:1; /* include all ports */
443 static LIST_HEAD(dmar_atsr_units);
444 static LIST_HEAD(dmar_rmrr_units);
446 #define for_each_rmrr_units(rmrr) \
447 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
449 static void flush_unmaps_timeout(unsigned long data);
451 static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
453 #define HIGH_WATER_MARK 250
454 struct deferred_flush_tables {
456 struct iova *iova[HIGH_WATER_MARK];
457 struct dmar_domain *domain[HIGH_WATER_MARK];
458 struct page *freelist[HIGH_WATER_MARK];
461 static struct deferred_flush_tables *deferred_flush;
463 /* bitmap for indexing intel_iommus */
464 static int g_num_of_iommus;
466 static DEFINE_SPINLOCK(async_umap_flush_lock);
467 static LIST_HEAD(unmaps_to_do);
470 static long list_size;
472 static void domain_exit(struct dmar_domain *domain);
473 static void domain_remove_dev_info(struct dmar_domain *domain);
474 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
476 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
477 static void domain_context_clear(struct intel_iommu *iommu,
479 static int domain_detach_iommu(struct dmar_domain *domain,
480 struct intel_iommu *iommu);
482 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 int dmar_disabled = 0;
485 int dmar_disabled = 1;
486 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
488 int intel_iommu_enabled = 0;
489 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
491 static int dmar_map_gfx = 1;
492 static int dmar_forcedac;
493 static int intel_iommu_strict;
494 static int intel_iommu_superpage = 1;
495 static int intel_iommu_ecs = 1;
497 /* We only actually use ECS when PASID support (on the new bit 40)
498 * is also advertised. Some early implementations — the ones with
499 * PASID support on bit 28 — have issues even when we *only* use
500 * extended root/context tables. */
501 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
502 ecap_pasid(iommu->ecap))
504 int intel_iommu_gfx_mapped;
505 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
507 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
508 static DEFINE_SPINLOCK(device_domain_lock);
509 static LIST_HEAD(device_domain_list);
511 static const struct iommu_ops intel_iommu_ops;
513 static bool translation_pre_enabled(struct intel_iommu *iommu)
515 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
518 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
520 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
523 static void init_translation_status(struct intel_iommu *iommu)
527 gsts = readl(iommu->reg + DMAR_GSTS_REG);
528 if (gsts & DMA_GSTS_TES)
529 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
532 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
533 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
535 return container_of(dom, struct dmar_domain, domain);
538 static int __init intel_iommu_setup(char *str)
543 if (!strncmp(str, "on", 2)) {
545 pr_info("IOMMU enabled\n");
546 } else if (!strncmp(str, "off", 3)) {
548 pr_info("IOMMU disabled\n");
549 } else if (!strncmp(str, "igfx_off", 8)) {
551 pr_info("Disable GFX device mapping\n");
552 } else if (!strncmp(str, "forcedac", 8)) {
553 pr_info("Forcing DAC for PCI devices\n");
555 } else if (!strncmp(str, "strict", 6)) {
556 pr_info("Disable batched IOTLB flush\n");
557 intel_iommu_strict = 1;
558 } else if (!strncmp(str, "sp_off", 6)) {
559 pr_info("Disable supported super page\n");
560 intel_iommu_superpage = 0;
561 } else if (!strncmp(str, "ecs_off", 7)) {
563 "Intel-IOMMU: disable extended context table support\n");
567 str += strcspn(str, ",");
573 __setup("intel_iommu=", intel_iommu_setup);
575 static struct kmem_cache *iommu_domain_cache;
576 static struct kmem_cache *iommu_devinfo_cache;
578 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
580 struct dmar_domain **domains;
583 domains = iommu->domains[idx];
587 return domains[did & 0xff];
590 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
591 struct dmar_domain *domain)
593 struct dmar_domain **domains;
596 if (!iommu->domains[idx]) {
597 size_t size = 256 * sizeof(struct dmar_domain *);
598 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
601 domains = iommu->domains[idx];
602 if (WARN_ON(!domains))
605 domains[did & 0xff] = domain;
608 static inline void *alloc_pgtable_page(int node)
613 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
615 vaddr = page_address(page);
619 static inline void free_pgtable_page(void *vaddr)
621 free_page((unsigned long)vaddr);
624 static inline void *alloc_domain_mem(void)
626 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
629 static void free_domain_mem(void *vaddr)
631 kmem_cache_free(iommu_domain_cache, vaddr);
634 static inline void * alloc_devinfo_mem(void)
636 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
639 static inline void free_devinfo_mem(void *vaddr)
641 kmem_cache_free(iommu_devinfo_cache, vaddr);
644 static inline int domain_type_is_vm(struct dmar_domain *domain)
646 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
649 static inline int domain_type_is_si(struct dmar_domain *domain)
651 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
654 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
656 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
657 DOMAIN_FLAG_STATIC_IDENTITY);
660 static inline int domain_pfn_supported(struct dmar_domain *domain,
663 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
665 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
668 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
673 sagaw = cap_sagaw(iommu->cap);
674 for (agaw = width_to_agaw(max_gaw);
676 if (test_bit(agaw, &sagaw))
684 * Calculate max SAGAW for each iommu.
686 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
688 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
692 * calculate agaw for each iommu.
693 * "SAGAW" may be different across iommus, use a default agaw, and
694 * get a supported less agaw for iommus that don't support the default agaw.
696 int iommu_calculate_agaw(struct intel_iommu *iommu)
698 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
701 /* This functionin only returns single iommu in a domain */
702 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
706 /* si_domain and vm domain should not get here. */
707 BUG_ON(domain_type_is_vm_or_si(domain));
708 for_each_domain_iommu(iommu_id, domain)
711 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
714 return g_iommus[iommu_id];
717 static void domain_update_iommu_coherency(struct dmar_domain *domain)
719 struct dmar_drhd_unit *drhd;
720 struct intel_iommu *iommu;
724 domain->iommu_coherency = 1;
726 for_each_domain_iommu(i, domain) {
728 if (!ecap_coherent(g_iommus[i]->ecap)) {
729 domain->iommu_coherency = 0;
736 /* No hardware attached; use lowest common denominator */
738 for_each_active_iommu(iommu, drhd) {
739 if (!ecap_coherent(iommu->ecap)) {
740 domain->iommu_coherency = 0;
747 static int domain_update_iommu_snooping(struct intel_iommu *skip)
749 struct dmar_drhd_unit *drhd;
750 struct intel_iommu *iommu;
754 for_each_active_iommu(iommu, drhd) {
756 if (!ecap_sc_support(iommu->ecap)) {
767 static int domain_update_iommu_superpage(struct intel_iommu *skip)
769 struct dmar_drhd_unit *drhd;
770 struct intel_iommu *iommu;
773 if (!intel_iommu_superpage) {
777 /* set iommu_superpage to the smallest common denominator */
779 for_each_active_iommu(iommu, drhd) {
781 mask &= cap_super_page_val(iommu->cap);
791 /* Some capabilities may be different across iommus */
792 static void domain_update_iommu_cap(struct dmar_domain *domain)
794 domain_update_iommu_coherency(domain);
795 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
796 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
799 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
800 u8 bus, u8 devfn, int alloc)
802 struct root_entry *root = &iommu->root_entry[bus];
803 struct context_entry *context;
806 if (ecs_enabled(iommu)) {
815 context = phys_to_virt(*entry & VTD_PAGE_MASK);
817 unsigned long phy_addr;
821 context = alloc_pgtable_page(iommu->node);
825 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
826 phy_addr = virt_to_phys((void *)context);
827 *entry = phy_addr | 1;
828 __iommu_flush_cache(iommu, entry, sizeof(*entry));
830 return &context[devfn];
833 static int iommu_dummy(struct device *dev)
835 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
838 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
840 struct dmar_drhd_unit *drhd = NULL;
841 struct intel_iommu *iommu;
843 struct pci_dev *ptmp, *pdev = NULL;
847 if (iommu_dummy(dev))
850 if (dev_is_pci(dev)) {
851 pdev = to_pci_dev(dev);
852 segment = pci_domain_nr(pdev->bus);
853 } else if (has_acpi_companion(dev))
854 dev = &ACPI_COMPANION(dev)->dev;
857 for_each_active_iommu(iommu, drhd) {
858 if (pdev && segment != drhd->segment)
861 for_each_active_dev_scope(drhd->devices,
862 drhd->devices_cnt, i, tmp) {
864 *bus = drhd->devices[i].bus;
865 *devfn = drhd->devices[i].devfn;
869 if (!pdev || !dev_is_pci(tmp))
872 ptmp = to_pci_dev(tmp);
873 if (ptmp->subordinate &&
874 ptmp->subordinate->number <= pdev->bus->number &&
875 ptmp->subordinate->busn_res.end >= pdev->bus->number)
879 if (pdev && drhd->include_all) {
881 *bus = pdev->bus->number;
882 *devfn = pdev->devfn;
893 static void domain_flush_cache(struct dmar_domain *domain,
894 void *addr, int size)
896 if (!domain->iommu_coherency)
897 clflush_cache_range(addr, size);
900 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
902 struct context_entry *context;
906 spin_lock_irqsave(&iommu->lock, flags);
907 context = iommu_context_addr(iommu, bus, devfn, 0);
909 ret = context_present(context);
910 spin_unlock_irqrestore(&iommu->lock, flags);
914 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
916 struct context_entry *context;
919 spin_lock_irqsave(&iommu->lock, flags);
920 context = iommu_context_addr(iommu, bus, devfn, 0);
922 context_clear_entry(context);
923 __iommu_flush_cache(iommu, context, sizeof(*context));
925 spin_unlock_irqrestore(&iommu->lock, flags);
928 static void free_context_table(struct intel_iommu *iommu)
932 struct context_entry *context;
934 spin_lock_irqsave(&iommu->lock, flags);
935 if (!iommu->root_entry) {
938 for (i = 0; i < ROOT_ENTRY_NR; i++) {
939 context = iommu_context_addr(iommu, i, 0, 0);
941 free_pgtable_page(context);
943 if (!ecs_enabled(iommu))
946 context = iommu_context_addr(iommu, i, 0x80, 0);
948 free_pgtable_page(context);
951 free_pgtable_page(iommu->root_entry);
952 iommu->root_entry = NULL;
954 spin_unlock_irqrestore(&iommu->lock, flags);
957 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958 unsigned long pfn, int *target_level)
960 struct dma_pte *parent, *pte = NULL;
961 int level = agaw_to_level(domain->agaw);
964 BUG_ON(!domain->pgd);
966 if (!domain_pfn_supported(domain, pfn))
967 /* Address beyond IOMMU's addressing capabilities. */
970 parent = domain->pgd;
975 offset = pfn_level_offset(pfn, level);
976 pte = &parent[offset];
977 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
979 if (level == *target_level)
982 if (!dma_pte_present(pte)) {
985 tmp_page = alloc_pgtable_page(domain->nid);
990 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992 if (cmpxchg64(&pte->val, 0ULL, pteval))
993 /* Someone else set it while we were thinking; use theirs. */
994 free_pgtable_page(tmp_page);
996 domain_flush_cache(domain, pte, sizeof(*pte));
1001 parent = phys_to_virt(dma_pte_addr(pte));
1006 *target_level = level;
1012 /* return address's pte at specific level */
1013 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1015 int level, int *large_page)
1017 struct dma_pte *parent, *pte = NULL;
1018 int total = agaw_to_level(domain->agaw);
1021 parent = domain->pgd;
1022 while (level <= total) {
1023 offset = pfn_level_offset(pfn, total);
1024 pte = &parent[offset];
1028 if (!dma_pte_present(pte)) {
1029 *large_page = total;
1033 if (dma_pte_superpage(pte)) {
1034 *large_page = total;
1038 parent = phys_to_virt(dma_pte_addr(pte));
1044 /* clear last level pte, a tlb flush should be followed */
1045 static void dma_pte_clear_range(struct dmar_domain *domain,
1046 unsigned long start_pfn,
1047 unsigned long last_pfn)
1049 unsigned int large_page = 1;
1050 struct dma_pte *first_pte, *pte;
1052 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1053 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054 BUG_ON(start_pfn > last_pfn);
1056 /* we don't need lock here; nobody else touches the iova range */
1059 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1061 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1066 start_pfn += lvl_to_nr_pages(large_page);
1068 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1070 domain_flush_cache(domain, first_pte,
1071 (void *)pte - (void *)first_pte);
1073 } while (start_pfn && start_pfn <= last_pfn);
1076 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1077 struct dma_pte *pte, unsigned long pfn,
1078 unsigned long start_pfn, unsigned long last_pfn)
1080 pfn = max(start_pfn, pfn);
1081 pte = &pte[pfn_level_offset(pfn, level)];
1084 unsigned long level_pfn;
1085 struct dma_pte *level_pte;
1087 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1090 level_pfn = pfn & level_mask(level - 1);
1091 level_pte = phys_to_virt(dma_pte_addr(pte));
1094 dma_pte_free_level(domain, level - 1, level_pte,
1095 level_pfn, start_pfn, last_pfn);
1097 /* If range covers entire pagetable, free it */
1098 if (!(start_pfn > level_pfn ||
1099 last_pfn < level_pfn + level_size(level) - 1)) {
1101 domain_flush_cache(domain, pte, sizeof(*pte));
1102 free_pgtable_page(level_pte);
1105 pfn += level_size(level);
1106 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1109 /* free page table pages. last level pte should already be cleared */
1110 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111 unsigned long start_pfn,
1112 unsigned long last_pfn)
1114 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1115 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116 BUG_ON(start_pfn > last_pfn);
1118 dma_pte_clear_range(domain, start_pfn, last_pfn);
1120 /* We don't need lock here; nobody else touches the iova range */
1121 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1122 domain->pgd, 0, start_pfn, last_pfn);
1125 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126 free_pgtable_page(domain->pgd);
1131 /* When a page at a given level is being unlinked from its parent, we don't
1132 need to *modify* it at all. All we need to do is make a list of all the
1133 pages which can be freed just as soon as we've flushed the IOTLB and we
1134 know the hardware page-walk will no longer touch them.
1135 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1137 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1138 int level, struct dma_pte *pte,
1139 struct page *freelist)
1143 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1144 pg->freelist = freelist;
1150 pte = page_address(pg);
1152 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1153 freelist = dma_pte_list_pagetables(domain, level - 1,
1156 } while (!first_pte_in_page(pte));
1161 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1162 struct dma_pte *pte, unsigned long pfn,
1163 unsigned long start_pfn,
1164 unsigned long last_pfn,
1165 struct page *freelist)
1167 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1169 pfn = max(start_pfn, pfn);
1170 pte = &pte[pfn_level_offset(pfn, level)];
1173 unsigned long level_pfn;
1175 if (!dma_pte_present(pte))
1178 level_pfn = pfn & level_mask(level);
1180 /* If range covers entire pagetable, free it */
1181 if (start_pfn <= level_pfn &&
1182 last_pfn >= level_pfn + level_size(level) - 1) {
1183 /* These suborbinate page tables are going away entirely. Don't
1184 bother to clear them; we're just going to *free* them. */
1185 if (level > 1 && !dma_pte_superpage(pte))
1186 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1192 } else if (level > 1) {
1193 /* Recurse down into a level that isn't *entirely* obsolete */
1194 freelist = dma_pte_clear_level(domain, level - 1,
1195 phys_to_virt(dma_pte_addr(pte)),
1196 level_pfn, start_pfn, last_pfn,
1200 pfn += level_size(level);
1201 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1204 domain_flush_cache(domain, first_pte,
1205 (void *)++last_pte - (void *)first_pte);
1210 /* We can't just free the pages because the IOMMU may still be walking
1211 the page tables, and may have cached the intermediate levels. The
1212 pages can only be freed after the IOTLB flush has been done. */
1213 struct page *domain_unmap(struct dmar_domain *domain,
1214 unsigned long start_pfn,
1215 unsigned long last_pfn)
1217 struct page *freelist = NULL;
1219 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1220 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221 BUG_ON(start_pfn > last_pfn);
1223 /* we don't need lock here; nobody else touches the iova range */
1224 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1225 domain->pgd, 0, start_pfn, last_pfn, NULL);
1228 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1229 struct page *pgd_page = virt_to_page(domain->pgd);
1230 pgd_page->freelist = freelist;
1231 freelist = pgd_page;
1239 void dma_free_pagelist(struct page *freelist)
1243 while ((pg = freelist)) {
1244 freelist = pg->freelist;
1245 free_pgtable_page(page_address(pg));
1249 /* iommu handling */
1250 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1252 struct root_entry *root;
1253 unsigned long flags;
1255 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1257 pr_err("Allocating root entry for %s failed\n",
1262 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1264 spin_lock_irqsave(&iommu->lock, flags);
1265 iommu->root_entry = root;
1266 spin_unlock_irqrestore(&iommu->lock, flags);
1271 static void iommu_set_root_entry(struct intel_iommu *iommu)
1277 addr = virt_to_phys(iommu->root_entry);
1278 if (ecs_enabled(iommu))
1279 addr |= DMA_RTADDR_RTT;
1281 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1284 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1286 /* Make sure hardware complete it */
1287 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288 readl, (sts & DMA_GSTS_RTPS), sts);
1290 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1293 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1298 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1301 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1304 /* Make sure hardware complete it */
1305 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306 readl, (!(val & DMA_GSTS_WBFS)), val);
1308 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1311 /* return value determine if we need a write buffer flush */
1312 static void __iommu_flush_context(struct intel_iommu *iommu,
1313 u16 did, u16 source_id, u8 function_mask,
1320 case DMA_CCMD_GLOBAL_INVL:
1321 val = DMA_CCMD_GLOBAL_INVL;
1323 case DMA_CCMD_DOMAIN_INVL:
1324 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1326 case DMA_CCMD_DEVICE_INVL:
1327 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1328 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1333 val |= DMA_CCMD_ICC;
1335 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1338 /* Make sure hardware complete it */
1339 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1340 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1342 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1345 /* return value determine if we need a write buffer flush */
1346 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1347 u64 addr, unsigned int size_order, u64 type)
1349 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1350 u64 val = 0, val_iva = 0;
1354 case DMA_TLB_GLOBAL_FLUSH:
1355 /* global flush doesn't need set IVA_REG */
1356 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1358 case DMA_TLB_DSI_FLUSH:
1359 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1361 case DMA_TLB_PSI_FLUSH:
1362 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363 /* IH bit is passed in as part of address */
1364 val_iva = size_order | addr;
1369 /* Note: set drain read/write */
1372 * This is probably to be super secure.. Looks like we can
1373 * ignore it without any impact.
1375 if (cap_read_drain(iommu->cap))
1376 val |= DMA_TLB_READ_DRAIN;
1378 if (cap_write_drain(iommu->cap))
1379 val |= DMA_TLB_WRITE_DRAIN;
1381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382 /* Note: Only uses first TLB reg currently */
1384 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1385 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1387 /* Make sure hardware complete it */
1388 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1389 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1391 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1393 /* check IOTLB invalidation granularity */
1394 if (DMA_TLB_IAIG(val) == 0)
1395 pr_err("Flush IOTLB failed\n");
1396 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1397 pr_debug("TLB flush request %Lx, actual %Lx\n",
1398 (unsigned long long)DMA_TLB_IIRG(type),
1399 (unsigned long long)DMA_TLB_IAIG(val));
1402 static struct device_domain_info *
1403 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1407 struct device_domain_info *info;
1408 struct pci_dev *pdev;
1410 assert_spin_locked(&device_domain_lock);
1412 if (!ecap_dev_iotlb_support(iommu->ecap))
1418 list_for_each_entry(info, &domain->devices, link)
1419 if (info->iommu == iommu && info->bus == bus &&
1420 info->devfn == devfn) {
1425 if (!found || !info->dev || !dev_is_pci(info->dev))
1428 pdev = to_pci_dev(info->dev);
1430 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1433 if (!dmar_find_matched_atsr_unit(pdev))
1439 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1441 if (!info || !dev_is_pci(info->dev))
1444 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1447 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1449 if (!info->dev || !dev_is_pci(info->dev) ||
1450 !pci_ats_enabled(to_pci_dev(info->dev)))
1453 pci_disable_ats(to_pci_dev(info->dev));
1456 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1457 u64 addr, unsigned mask)
1460 unsigned long flags;
1461 struct device_domain_info *info;
1463 spin_lock_irqsave(&device_domain_lock, flags);
1464 list_for_each_entry(info, &domain->devices, link) {
1465 struct pci_dev *pdev;
1466 if (!info->dev || !dev_is_pci(info->dev))
1469 pdev = to_pci_dev(info->dev);
1470 if (!pci_ats_enabled(pdev))
1473 sid = info->bus << 8 | info->devfn;
1474 qdep = pci_ats_queue_depth(pdev);
1475 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1477 spin_unlock_irqrestore(&device_domain_lock, flags);
1480 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1481 struct dmar_domain *domain,
1482 unsigned long pfn, unsigned int pages,
1485 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1486 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1487 u16 did = domain->iommu_did[iommu->seq_id];
1494 * Fallback to domain selective flush if no PSI support or the size is
1496 * PSI requires page size to be 2 ^ x, and the base address is naturally
1497 * aligned to the size
1499 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1500 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1503 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1507 * In caching mode, changes of pages from non-present to present require
1508 * flush. However, device IOTLB doesn't need to be flushed in this case.
1510 if (!cap_caching_mode(iommu->cap) || !map)
1511 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1515 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1518 unsigned long flags;
1520 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1521 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1522 pmen &= ~DMA_PMEN_EPM;
1523 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1525 /* wait for the protected region status bit to clear */
1526 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1527 readl, !(pmen & DMA_PMEN_PRS), pmen);
1529 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1532 static void iommu_enable_translation(struct intel_iommu *iommu)
1535 unsigned long flags;
1537 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1538 iommu->gcmd |= DMA_GCMD_TE;
1539 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1541 /* Make sure hardware complete it */
1542 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1543 readl, (sts & DMA_GSTS_TES), sts);
1545 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1548 static void iommu_disable_translation(struct intel_iommu *iommu)
1553 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1554 iommu->gcmd &= ~DMA_GCMD_TE;
1555 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1557 /* Make sure hardware complete it */
1558 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1559 readl, (!(sts & DMA_GSTS_TES)), sts);
1561 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1565 static int iommu_init_domains(struct intel_iommu *iommu)
1567 u32 ndomains, nlongs;
1570 ndomains = cap_ndoms(iommu->cap);
1571 pr_debug("%s: Number of Domains supported <%d>\n",
1572 iommu->name, ndomains);
1573 nlongs = BITS_TO_LONGS(ndomains);
1575 spin_lock_init(&iommu->lock);
1577 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1578 if (!iommu->domain_ids) {
1579 pr_err("%s: Allocating domain id array failed\n",
1584 size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1585 iommu->domains = kzalloc(size, GFP_KERNEL);
1587 if (iommu->domains) {
1588 size = 256 * sizeof(struct dmar_domain *);
1589 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1592 if (!iommu->domains || !iommu->domains[0]) {
1593 pr_err("%s: Allocating domain array failed\n",
1595 kfree(iommu->domain_ids);
1596 kfree(iommu->domains);
1597 iommu->domain_ids = NULL;
1598 iommu->domains = NULL;
1605 * If Caching mode is set, then invalid translations are tagged
1606 * with domain-id 0, hence we need to pre-allocate it. We also
1607 * use domain-id 0 as a marker for non-allocated domain-id, so
1608 * make sure it is not used for a real domain.
1610 set_bit(0, iommu->domain_ids);
1615 static void disable_dmar_iommu(struct intel_iommu *iommu)
1617 struct device_domain_info *info, *tmp;
1618 unsigned long flags;
1620 if (!iommu->domains || !iommu->domain_ids)
1623 spin_lock_irqsave(&device_domain_lock, flags);
1624 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1625 struct dmar_domain *domain;
1627 if (info->iommu != iommu)
1630 if (!info->dev || !info->domain)
1633 domain = info->domain;
1635 dmar_remove_one_dev_info(domain, info->dev);
1637 if (!domain_type_is_vm_or_si(domain))
1638 domain_exit(domain);
1640 spin_unlock_irqrestore(&device_domain_lock, flags);
1642 if (iommu->gcmd & DMA_GCMD_TE)
1643 iommu_disable_translation(iommu);
1646 static void free_dmar_iommu(struct intel_iommu *iommu)
1648 if ((iommu->domains) && (iommu->domain_ids)) {
1649 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1652 for (i = 0; i < elems; i++)
1653 kfree(iommu->domains[i]);
1654 kfree(iommu->domains);
1655 kfree(iommu->domain_ids);
1656 iommu->domains = NULL;
1657 iommu->domain_ids = NULL;
1660 g_iommus[iommu->seq_id] = NULL;
1662 /* free context mapping */
1663 free_context_table(iommu);
1666 static struct dmar_domain *alloc_domain(int flags)
1668 struct dmar_domain *domain;
1670 domain = alloc_domain_mem();
1674 memset(domain, 0, sizeof(*domain));
1676 domain->flags = flags;
1677 INIT_LIST_HEAD(&domain->devices);
1682 /* Must be called with iommu->lock */
1683 static int domain_attach_iommu(struct dmar_domain *domain,
1684 struct intel_iommu *iommu)
1686 unsigned long ndomains;
1689 assert_spin_locked(&device_domain_lock);
1690 assert_spin_locked(&iommu->lock);
1692 domain->iommu_refcnt[iommu->seq_id] += 1;
1693 domain->iommu_count += 1;
1694 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1695 ndomains = cap_ndoms(iommu->cap);
1696 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1698 if (num >= ndomains) {
1699 pr_err("%s: No free domain ids\n", iommu->name);
1700 domain->iommu_refcnt[iommu->seq_id] -= 1;
1701 domain->iommu_count -= 1;
1705 set_bit(num, iommu->domain_ids);
1706 set_iommu_domain(iommu, num, domain);
1708 domain->iommu_did[iommu->seq_id] = num;
1709 domain->nid = iommu->node;
1711 domain_update_iommu_cap(domain);
1717 static int domain_detach_iommu(struct dmar_domain *domain,
1718 struct intel_iommu *iommu)
1720 int num, count = INT_MAX;
1722 assert_spin_locked(&device_domain_lock);
1723 assert_spin_locked(&iommu->lock);
1725 domain->iommu_refcnt[iommu->seq_id] -= 1;
1726 count = --domain->iommu_count;
1727 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1728 num = domain->iommu_did[iommu->seq_id];
1729 clear_bit(num, iommu->domain_ids);
1730 set_iommu_domain(iommu, num, NULL);
1732 domain_update_iommu_cap(domain);
1733 domain->iommu_did[iommu->seq_id] = 0;
1739 static struct iova_domain reserved_iova_list;
1740 static struct lock_class_key reserved_rbtree_key;
1742 static int dmar_init_reserved_ranges(void)
1744 struct pci_dev *pdev = NULL;
1748 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1751 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1752 &reserved_rbtree_key);
1754 /* IOAPIC ranges shouldn't be accessed by DMA */
1755 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1756 IOVA_PFN(IOAPIC_RANGE_END));
1758 pr_err("Reserve IOAPIC range failed\n");
1762 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1763 for_each_pci_dev(pdev) {
1766 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1767 r = &pdev->resource[i];
1768 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1770 iova = reserve_iova(&reserved_iova_list,
1774 pr_err("Reserve iova failed\n");
1782 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1784 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1787 static inline int guestwidth_to_adjustwidth(int gaw)
1790 int r = (gaw - 12) % 9;
1801 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1804 int adjust_width, agaw;
1805 unsigned long sagaw;
1807 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1809 domain_reserve_special_ranges(domain);
1811 /* calculate AGAW */
1812 if (guest_width > cap_mgaw(iommu->cap))
1813 guest_width = cap_mgaw(iommu->cap);
1814 domain->gaw = guest_width;
1815 adjust_width = guestwidth_to_adjustwidth(guest_width);
1816 agaw = width_to_agaw(adjust_width);
1817 sagaw = cap_sagaw(iommu->cap);
1818 if (!test_bit(agaw, &sagaw)) {
1819 /* hardware doesn't support it, choose a bigger one */
1820 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1821 agaw = find_next_bit(&sagaw, 5, agaw);
1825 domain->agaw = agaw;
1827 if (ecap_coherent(iommu->ecap))
1828 domain->iommu_coherency = 1;
1830 domain->iommu_coherency = 0;
1832 if (ecap_sc_support(iommu->ecap))
1833 domain->iommu_snooping = 1;
1835 domain->iommu_snooping = 0;
1837 if (intel_iommu_superpage)
1838 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1840 domain->iommu_superpage = 0;
1842 domain->nid = iommu->node;
1844 /* always allocate the top pgd */
1845 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1848 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1852 static void domain_exit(struct dmar_domain *domain)
1854 struct page *freelist = NULL;
1856 /* Domain 0 is reserved, so dont process it */
1860 /* Flush any lazy unmaps that may reference this domain */
1861 if (!intel_iommu_strict)
1862 flush_unmaps_timeout(0);
1864 /* Remove associated devices and clear attached or cached domains */
1866 domain_remove_dev_info(domain);
1870 put_iova_domain(&domain->iovad);
1872 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1874 dma_free_pagelist(freelist);
1876 free_domain_mem(domain);
1879 static int domain_context_mapping_one(struct dmar_domain *domain,
1880 struct intel_iommu *iommu,
1883 u16 did = domain->iommu_did[iommu->seq_id];
1884 int translation = CONTEXT_TT_MULTI_LEVEL;
1885 struct device_domain_info *info = NULL;
1886 struct context_entry *context;
1887 unsigned long flags;
1888 struct dma_pte *pgd;
1893 if (hw_pass_through && domain_type_is_si(domain))
1894 translation = CONTEXT_TT_PASS_THROUGH;
1896 pr_debug("Set context mapping for %02x:%02x.%d\n",
1897 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1899 BUG_ON(!domain->pgd);
1901 spin_lock_irqsave(&device_domain_lock, flags);
1902 spin_lock(&iommu->lock);
1905 context = iommu_context_addr(iommu, bus, devfn, 1);
1910 if (context_present(context))
1915 context_clear_entry(context);
1916 context_set_domain_id(context, did);
1919 * Skip top levels of page tables for iommu which has less agaw
1920 * than default. Unnecessary for PT mode.
1922 if (translation != CONTEXT_TT_PASS_THROUGH) {
1923 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1925 pgd = phys_to_virt(dma_pte_addr(pgd));
1926 if (!dma_pte_present(pgd))
1930 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1931 translation = info ? CONTEXT_TT_DEV_IOTLB :
1932 CONTEXT_TT_MULTI_LEVEL;
1934 context_set_address_root(context, virt_to_phys(pgd));
1935 context_set_address_width(context, iommu->agaw);
1938 * In pass through mode, AW must be programmed to
1939 * indicate the largest AGAW value supported by
1940 * hardware. And ASR is ignored by hardware.
1942 context_set_address_width(context, iommu->msagaw);
1945 context_set_translation_type(context, translation);
1946 context_set_fault_enable(context);
1947 context_set_present(context);
1948 domain_flush_cache(domain, context, sizeof(*context));
1951 * It's a non-present to present mapping. If hardware doesn't cache
1952 * non-present entry we only need to flush the write-buffer. If the
1953 * _does_ cache non-present entries, then it does so in the special
1954 * domain #0, which we have to flush:
1956 if (cap_caching_mode(iommu->cap)) {
1957 iommu->flush.flush_context(iommu, 0,
1958 (((u16)bus) << 8) | devfn,
1959 DMA_CCMD_MASK_NOBIT,
1960 DMA_CCMD_DEVICE_INVL);
1961 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1963 iommu_flush_write_buffer(iommu);
1965 iommu_enable_dev_iotlb(info);
1970 spin_unlock(&iommu->lock);
1971 spin_unlock_irqrestore(&device_domain_lock, flags);
1976 struct domain_context_mapping_data {
1977 struct dmar_domain *domain;
1978 struct intel_iommu *iommu;
1981 static int domain_context_mapping_cb(struct pci_dev *pdev,
1982 u16 alias, void *opaque)
1984 struct domain_context_mapping_data *data = opaque;
1986 return domain_context_mapping_one(data->domain, data->iommu,
1987 PCI_BUS_NUM(alias), alias & 0xff);
1991 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
1993 struct intel_iommu *iommu;
1995 struct domain_context_mapping_data data;
1997 iommu = device_to_iommu(dev, &bus, &devfn);
2001 if (!dev_is_pci(dev))
2002 return domain_context_mapping_one(domain, iommu, bus, devfn);
2004 data.domain = domain;
2007 return pci_for_each_dma_alias(to_pci_dev(dev),
2008 &domain_context_mapping_cb, &data);
2011 static int domain_context_mapped_cb(struct pci_dev *pdev,
2012 u16 alias, void *opaque)
2014 struct intel_iommu *iommu = opaque;
2016 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2019 static int domain_context_mapped(struct device *dev)
2021 struct intel_iommu *iommu;
2024 iommu = device_to_iommu(dev, &bus, &devfn);
2028 if (!dev_is_pci(dev))
2029 return device_context_mapped(iommu, bus, devfn);
2031 return !pci_for_each_dma_alias(to_pci_dev(dev),
2032 domain_context_mapped_cb, iommu);
2035 /* Returns a number of VTD pages, but aligned to MM page size */
2036 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2039 host_addr &= ~PAGE_MASK;
2040 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2043 /* Return largest possible superpage level for a given mapping */
2044 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2045 unsigned long iov_pfn,
2046 unsigned long phy_pfn,
2047 unsigned long pages)
2049 int support, level = 1;
2050 unsigned long pfnmerge;
2052 support = domain->iommu_superpage;
2054 /* To use a large page, the virtual *and* physical addresses
2055 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2056 of them will mean we have to use smaller pages. So just
2057 merge them and check both at once. */
2058 pfnmerge = iov_pfn | phy_pfn;
2060 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2061 pages >>= VTD_STRIDE_SHIFT;
2064 pfnmerge >>= VTD_STRIDE_SHIFT;
2071 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2072 struct scatterlist *sg, unsigned long phys_pfn,
2073 unsigned long nr_pages, int prot)
2075 struct dma_pte *first_pte = NULL, *pte = NULL;
2076 phys_addr_t uninitialized_var(pteval);
2077 unsigned long sg_res = 0;
2078 unsigned int largepage_lvl = 0;
2079 unsigned long lvl_pages = 0;
2081 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2083 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2086 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2090 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2093 while (nr_pages > 0) {
2097 sg_res = aligned_nrpages(sg->offset, sg->length);
2098 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2099 sg->dma_length = sg->length;
2100 pteval = page_to_phys(sg_page(sg)) | prot;
2101 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2105 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2107 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2110 /* It is large page*/
2111 if (largepage_lvl > 1) {
2112 pteval |= DMA_PTE_LARGE_PAGE;
2113 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2115 * Ensure that old small page tables are
2116 * removed to make room for superpage,
2119 dma_pte_free_pagetable(domain, iov_pfn,
2120 iov_pfn + lvl_pages - 1);
2122 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2126 /* We don't need lock here, nobody else
2127 * touches the iova range
2129 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2131 static int dumps = 5;
2132 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2133 iov_pfn, tmp, (unsigned long long)pteval);
2136 debug_dma_dump_mappings(NULL);
2141 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2143 BUG_ON(nr_pages < lvl_pages);
2144 BUG_ON(sg_res < lvl_pages);
2146 nr_pages -= lvl_pages;
2147 iov_pfn += lvl_pages;
2148 phys_pfn += lvl_pages;
2149 pteval += lvl_pages * VTD_PAGE_SIZE;
2150 sg_res -= lvl_pages;
2152 /* If the next PTE would be the first in a new page, then we
2153 need to flush the cache on the entries we've just written.
2154 And then we'll need to recalculate 'pte', so clear it and
2155 let it get set again in the if (!pte) block above.
2157 If we're done (!nr_pages) we need to flush the cache too.
2159 Also if we've been setting superpages, we may need to
2160 recalculate 'pte' and switch back to smaller pages for the
2161 end of the mapping, if the trailing size is not enough to
2162 use another superpage (i.e. sg_res < lvl_pages). */
2164 if (!nr_pages || first_pte_in_page(pte) ||
2165 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2166 domain_flush_cache(domain, first_pte,
2167 (void *)pte - (void *)first_pte);
2171 if (!sg_res && nr_pages)
2177 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2178 struct scatterlist *sg, unsigned long nr_pages,
2181 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2184 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2185 unsigned long phys_pfn, unsigned long nr_pages,
2188 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2191 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2196 clear_context_table(iommu, bus, devfn);
2197 iommu->flush.flush_context(iommu, 0, 0, 0,
2198 DMA_CCMD_GLOBAL_INVL);
2199 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2202 static inline void unlink_domain_info(struct device_domain_info *info)
2204 assert_spin_locked(&device_domain_lock);
2205 list_del(&info->link);
2206 list_del(&info->global);
2208 info->dev->archdata.iommu = NULL;
2211 static void domain_remove_dev_info(struct dmar_domain *domain)
2213 struct device_domain_info *info, *tmp;
2214 unsigned long flags;
2216 spin_lock_irqsave(&device_domain_lock, flags);
2217 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2218 __dmar_remove_one_dev_info(info);
2219 spin_unlock_irqrestore(&device_domain_lock, flags);
2224 * Note: we use struct device->archdata.iommu stores the info
2226 static struct dmar_domain *find_domain(struct device *dev)
2228 struct device_domain_info *info;
2230 /* No lock here, assumes no domain exit in normal case */
2231 info = dev->archdata.iommu;
2233 return info->domain;
2237 static inline struct device_domain_info *
2238 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2240 struct device_domain_info *info;
2242 list_for_each_entry(info, &device_domain_list, global)
2243 if (info->iommu->segment == segment && info->bus == bus &&
2244 info->devfn == devfn)
2250 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2253 struct dmar_domain *domain)
2255 struct dmar_domain *found = NULL;
2256 struct device_domain_info *info;
2257 unsigned long flags;
2260 info = alloc_devinfo_mem();
2265 info->devfn = devfn;
2267 info->domain = domain;
2268 info->iommu = iommu;
2270 spin_lock_irqsave(&device_domain_lock, flags);
2272 found = find_domain(dev);
2274 struct device_domain_info *info2;
2275 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2277 found = info2->domain;
2280 spin_unlock_irqrestore(&device_domain_lock, flags);
2281 free_devinfo_mem(info);
2282 /* Caller must free the original domain */
2286 spin_lock(&iommu->lock);
2287 ret = domain_attach_iommu(domain, iommu);
2288 spin_unlock(&iommu->lock);
2291 spin_unlock_irqrestore(&device_domain_lock, flags);
2295 list_add(&info->link, &domain->devices);
2296 list_add(&info->global, &device_domain_list);
2298 dev->archdata.iommu = info;
2299 spin_unlock_irqrestore(&device_domain_lock, flags);
2301 if (dev && domain_context_mapping(domain, dev)) {
2302 pr_err("Domain context map for %s failed\n", dev_name(dev));
2303 dmar_remove_one_dev_info(domain, dev);
2310 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2312 *(u16 *)opaque = alias;
2316 /* domain is initialized */
2317 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2319 struct device_domain_info *info = NULL;
2320 struct dmar_domain *domain, *tmp;
2321 struct intel_iommu *iommu;
2322 u16 req_id, dma_alias;
2323 unsigned long flags;
2326 domain = find_domain(dev);
2330 iommu = device_to_iommu(dev, &bus, &devfn);
2334 req_id = ((u16)bus << 8) | devfn;
2336 if (dev_is_pci(dev)) {
2337 struct pci_dev *pdev = to_pci_dev(dev);
2339 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2341 spin_lock_irqsave(&device_domain_lock, flags);
2342 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2343 PCI_BUS_NUM(dma_alias),
2346 iommu = info->iommu;
2347 domain = info->domain;
2349 spin_unlock_irqrestore(&device_domain_lock, flags);
2351 /* DMA alias already has a domain, uses it */
2356 /* Allocate and initialize new domain for the device */
2357 domain = alloc_domain(0);
2360 if (domain_init(domain, iommu, gaw)) {
2361 domain_exit(domain);
2365 /* register PCI DMA alias device */
2366 if (req_id != dma_alias && dev_is_pci(dev)) {
2367 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2368 dma_alias & 0xff, NULL, domain);
2370 if (!tmp || tmp != domain) {
2371 domain_exit(domain);
2380 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2382 if (!tmp || tmp != domain) {
2383 domain_exit(domain);
2390 static int iommu_identity_mapping;
2391 #define IDENTMAP_ALL 1
2392 #define IDENTMAP_GFX 2
2393 #define IDENTMAP_AZALIA 4
2395 static int iommu_domain_identity_map(struct dmar_domain *domain,
2396 unsigned long long start,
2397 unsigned long long end)
2399 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2400 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2402 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2403 dma_to_mm_pfn(last_vpfn))) {
2404 pr_err("Reserving iova failed\n");
2408 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2410 * RMRR range might have overlap with physical memory range,
2413 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2415 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2416 last_vpfn - first_vpfn + 1,
2417 DMA_PTE_READ|DMA_PTE_WRITE);
2420 static int iommu_prepare_identity_map(struct device *dev,
2421 unsigned long long start,
2422 unsigned long long end)
2424 struct dmar_domain *domain;
2427 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2431 /* For _hardware_ passthrough, don't bother. But for software
2432 passthrough, we do it anyway -- it may indicate a memory
2433 range which is reserved in E820, so which didn't get set
2434 up to start with in si_domain */
2435 if (domain == si_domain && hw_pass_through) {
2436 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2437 dev_name(dev), start, end);
2441 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2442 dev_name(dev), start, end);
2445 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2446 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2447 dmi_get_system_info(DMI_BIOS_VENDOR),
2448 dmi_get_system_info(DMI_BIOS_VERSION),
2449 dmi_get_system_info(DMI_PRODUCT_VERSION));
2454 if (end >> agaw_to_width(domain->agaw)) {
2455 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2456 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2457 agaw_to_width(domain->agaw),
2458 dmi_get_system_info(DMI_BIOS_VENDOR),
2459 dmi_get_system_info(DMI_BIOS_VERSION),
2460 dmi_get_system_info(DMI_PRODUCT_VERSION));
2465 ret = iommu_domain_identity_map(domain, start, end);
2472 domain_exit(domain);
2476 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2479 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2481 return iommu_prepare_identity_map(dev, rmrr->base_address,
2485 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2486 static inline void iommu_prepare_isa(void)
2488 struct pci_dev *pdev;
2491 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2495 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2496 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2499 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2504 static inline void iommu_prepare_isa(void)
2508 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2510 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2512 static int __init si_domain_init(int hw)
2516 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2520 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2521 domain_exit(si_domain);
2525 pr_debug("Identity mapping domain allocated\n");
2530 for_each_online_node(nid) {
2531 unsigned long start_pfn, end_pfn;
2534 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2535 ret = iommu_domain_identity_map(si_domain,
2536 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2545 static int identity_mapping(struct device *dev)
2547 struct device_domain_info *info;
2549 if (likely(!iommu_identity_mapping))
2552 info = dev->archdata.iommu;
2553 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2554 return (info->domain == si_domain);
2559 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2561 struct dmar_domain *ndomain;
2562 struct intel_iommu *iommu;
2565 iommu = device_to_iommu(dev, &bus, &devfn);
2569 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2570 if (ndomain != domain)
2576 static bool device_has_rmrr(struct device *dev)
2578 struct dmar_rmrr_unit *rmrr;
2583 for_each_rmrr_units(rmrr) {
2585 * Return TRUE if this RMRR contains the device that
2588 for_each_active_dev_scope(rmrr->devices,
2589 rmrr->devices_cnt, i, tmp)
2600 * There are a couple cases where we need to restrict the functionality of
2601 * devices associated with RMRRs. The first is when evaluating a device for
2602 * identity mapping because problems exist when devices are moved in and out
2603 * of domains and their respective RMRR information is lost. This means that
2604 * a device with associated RMRRs will never be in a "passthrough" domain.
2605 * The second is use of the device through the IOMMU API. This interface
2606 * expects to have full control of the IOVA space for the device. We cannot
2607 * satisfy both the requirement that RMRR access is maintained and have an
2608 * unencumbered IOVA space. We also have no ability to quiesce the device's
2609 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2610 * We therefore prevent devices associated with an RMRR from participating in
2611 * the IOMMU API, which eliminates them from device assignment.
2613 * In both cases we assume that PCI USB devices with RMRRs have them largely
2614 * for historical reasons and that the RMRR space is not actively used post
2615 * boot. This exclusion may change if vendors begin to abuse it.
2617 * The same exception is made for graphics devices, with the requirement that
2618 * any use of the RMRR regions will be torn down before assigning the device
2621 static bool device_is_rmrr_locked(struct device *dev)
2623 if (!device_has_rmrr(dev))
2626 if (dev_is_pci(dev)) {
2627 struct pci_dev *pdev = to_pci_dev(dev);
2629 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2636 static int iommu_should_identity_map(struct device *dev, int startup)
2639 if (dev_is_pci(dev)) {
2640 struct pci_dev *pdev = to_pci_dev(dev);
2642 if (device_is_rmrr_locked(dev))
2645 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2648 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2651 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2655 * We want to start off with all devices in the 1:1 domain, and
2656 * take them out later if we find they can't access all of memory.
2658 * However, we can't do this for PCI devices behind bridges,
2659 * because all PCI devices behind the same bridge will end up
2660 * with the same source-id on their transactions.
2662 * Practically speaking, we can't change things around for these
2663 * devices at run-time, because we can't be sure there'll be no
2664 * DMA transactions in flight for any of their siblings.
2666 * So PCI devices (unless they're on the root bus) as well as
2667 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2668 * the 1:1 domain, just in _case_ one of their siblings turns out
2669 * not to be able to map all of memory.
2671 if (!pci_is_pcie(pdev)) {
2672 if (!pci_is_root_bus(pdev->bus))
2674 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2676 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2679 if (device_has_rmrr(dev))
2684 * At boot time, we don't yet know if devices will be 64-bit capable.
2685 * Assume that they will — if they turn out not to be, then we can
2686 * take them out of the 1:1 domain later.
2690 * If the device's dma_mask is less than the system's memory
2691 * size then this is not a candidate for identity mapping.
2693 u64 dma_mask = *dev->dma_mask;
2695 if (dev->coherent_dma_mask &&
2696 dev->coherent_dma_mask < dma_mask)
2697 dma_mask = dev->coherent_dma_mask;
2699 return dma_mask >= dma_get_required_mask(dev);
2705 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2709 if (!iommu_should_identity_map(dev, 1))
2712 ret = domain_add_dev_info(si_domain, dev);
2714 pr_info("%s identity mapping for device %s\n",
2715 hw ? "Hardware" : "Software", dev_name(dev));
2716 else if (ret == -ENODEV)
2717 /* device not associated with an iommu */
2724 static int __init iommu_prepare_static_identity_mapping(int hw)
2726 struct pci_dev *pdev = NULL;
2727 struct dmar_drhd_unit *drhd;
2728 struct intel_iommu *iommu;
2733 for_each_pci_dev(pdev) {
2734 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2739 for_each_active_iommu(iommu, drhd)
2740 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2741 struct acpi_device_physical_node *pn;
2742 struct acpi_device *adev;
2744 if (dev->bus != &acpi_bus_type)
2747 adev= to_acpi_device(dev);
2748 mutex_lock(&adev->physical_node_lock);
2749 list_for_each_entry(pn, &adev->physical_node_list, node) {
2750 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2754 mutex_unlock(&adev->physical_node_lock);
2762 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2765 * Start from the sane iommu hardware state.
2766 * If the queued invalidation is already initialized by us
2767 * (for example, while enabling interrupt-remapping) then
2768 * we got the things already rolling from a sane state.
2772 * Clear any previous faults.
2774 dmar_fault(-1, iommu);
2776 * Disable queued invalidation if supported and already enabled
2777 * before OS handover.
2779 dmar_disable_qi(iommu);
2782 if (dmar_enable_qi(iommu)) {
2784 * Queued Invalidate not enabled, use Register Based Invalidate
2786 iommu->flush.flush_context = __iommu_flush_context;
2787 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2788 pr_info("%s: Using Register based invalidation\n",
2791 iommu->flush.flush_context = qi_flush_context;
2792 iommu->flush.flush_iotlb = qi_flush_iotlb;
2793 pr_info("%s: Using Queued invalidation\n", iommu->name);
2797 static int copy_context_table(struct intel_iommu *iommu,
2798 struct root_entry *old_re,
2799 struct context_entry **tbl,
2802 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2803 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2804 phys_addr_t old_ce_phys;
2806 tbl_idx = ext ? bus * 2 : bus;
2808 for (devfn = 0; devfn < 256; devfn++) {
2809 /* First calculate the correct index */
2810 idx = (ext ? devfn * 2 : devfn) % 256;
2813 /* First save what we may have and clean up */
2815 tbl[tbl_idx] = new_ce;
2816 __iommu_flush_cache(iommu, new_ce,
2826 old_ce_phys = root_entry_lctp(old_re);
2828 old_ce_phys = root_entry_uctp(old_re);
2831 if (ext && devfn == 0) {
2832 /* No LCTP, try UCTP */
2841 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2845 new_ce = alloc_pgtable_page(iommu->node);
2852 /* Now copy the context entry */
2855 if (!__context_present(&ce))
2858 did = context_domain_id(&ce);
2859 if (did >= 0 && did < cap_ndoms(iommu->cap))
2860 set_bit(did, iommu->domain_ids);
2863 * We need a marker for copied context entries. This
2864 * marker needs to work for the old format as well as
2865 * for extended context entries.
2867 * Bit 67 of the context entry is used. In the old
2868 * format this bit is available to software, in the
2869 * extended format it is the PGE bit, but PGE is ignored
2870 * by HW if PASIDs are disabled (and thus still
2873 * So disable PASIDs first and then mark the entry
2874 * copied. This means that we don't copy PASID
2875 * translations from the old kernel, but this is fine as
2876 * faults there are not fatal.
2878 context_clear_pasid_enable(&ce);
2879 context_set_copied(&ce);
2884 tbl[tbl_idx + pos] = new_ce;
2886 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2895 static int copy_translation_tables(struct intel_iommu *iommu)
2897 struct context_entry **ctxt_tbls;
2898 struct root_entry *old_rt;
2899 phys_addr_t old_rt_phys;
2900 int ctxt_table_entries;
2901 unsigned long flags;
2906 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2907 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2908 new_ext = !!ecap_ecs(iommu->ecap);
2911 * The RTT bit can only be changed when translation is disabled,
2912 * but disabling translation means to open a window for data
2913 * corruption. So bail out and don't copy anything if we would
2914 * have to change the bit.
2919 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2923 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2927 /* This is too big for the stack - allocate it from slab */
2928 ctxt_table_entries = ext ? 512 : 256;
2930 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2934 for (bus = 0; bus < 256; bus++) {
2935 ret = copy_context_table(iommu, &old_rt[bus],
2936 ctxt_tbls, bus, ext);
2938 pr_err("%s: Failed to copy context table for bus %d\n",
2944 spin_lock_irqsave(&iommu->lock, flags);
2946 /* Context tables are copied, now write them to the root_entry table */
2947 for (bus = 0; bus < 256; bus++) {
2948 int idx = ext ? bus * 2 : bus;
2951 if (ctxt_tbls[idx]) {
2952 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2953 iommu->root_entry[bus].lo = val;
2956 if (!ext || !ctxt_tbls[idx + 1])
2959 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2960 iommu->root_entry[bus].hi = val;
2963 spin_unlock_irqrestore(&iommu->lock, flags);
2967 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2977 static int __init init_dmars(void)
2979 struct dmar_drhd_unit *drhd;
2980 struct dmar_rmrr_unit *rmrr;
2981 bool copied_tables = false;
2983 struct intel_iommu *iommu;
2989 * initialize and program root entry to not present
2992 for_each_drhd_unit(drhd) {
2994 * lock not needed as this is only incremented in the single
2995 * threaded kernel __init code path all other access are read
2998 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3002 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3005 /* Preallocate enough resources for IOMMU hot-addition */
3006 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3007 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3009 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3012 pr_err("Allocating global iommu array failed\n");
3017 deferred_flush = kzalloc(g_num_of_iommus *
3018 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3019 if (!deferred_flush) {
3024 for_each_active_iommu(iommu, drhd) {
3025 g_iommus[iommu->seq_id] = iommu;
3027 intel_iommu_init_qi(iommu);
3029 ret = iommu_init_domains(iommu);
3033 init_translation_status(iommu);
3035 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3036 iommu_disable_translation(iommu);
3037 clear_translation_pre_enabled(iommu);
3038 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3044 * we could share the same root & context tables
3045 * among all IOMMU's. Need to Split it later.
3047 ret = iommu_alloc_root_entry(iommu);
3051 if (translation_pre_enabled(iommu)) {
3052 pr_info("Translation already enabled - trying to copy translation structures\n");
3054 ret = copy_translation_tables(iommu);
3057 * We found the IOMMU with translation
3058 * enabled - but failed to copy over the
3059 * old root-entry table. Try to proceed
3060 * by disabling translation now and
3061 * allocating a clean root-entry table.
3062 * This might cause DMAR faults, but
3063 * probably the dump will still succeed.
3065 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3067 iommu_disable_translation(iommu);
3068 clear_translation_pre_enabled(iommu);
3070 pr_info("Copied translation tables from previous kernel for %s\n",
3072 copied_tables = true;
3076 iommu_flush_write_buffer(iommu);
3077 iommu_set_root_entry(iommu);
3078 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3079 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3081 if (!ecap_pass_through(iommu->ecap))
3082 hw_pass_through = 0;
3085 if (iommu_pass_through)
3086 iommu_identity_mapping |= IDENTMAP_ALL;
3088 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3089 iommu_identity_mapping |= IDENTMAP_GFX;
3092 if (iommu_identity_mapping) {
3093 ret = si_domain_init(hw_pass_through);
3098 check_tylersburg_isoch();
3101 * If we copied translations from a previous kernel in the kdump
3102 * case, we can not assign the devices to domains now, as that
3103 * would eliminate the old mappings. So skip this part and defer
3104 * the assignment to device driver initialization time.
3110 * If pass through is not set or not enabled, setup context entries for
3111 * identity mappings for rmrr, gfx, and isa and may fall back to static
3112 * identity mapping if iommu_identity_mapping is set.
3114 if (iommu_identity_mapping) {
3115 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3117 pr_crit("Failed to setup IOMMU pass-through\n");
3123 * for each dev attached to rmrr
3125 * locate drhd for dev, alloc domain for dev
3126 * allocate free domain
3127 * allocate page table entries for rmrr
3128 * if context not allocated for bus
3129 * allocate and init context
3130 * set present in root table for this bus
3131 * init context with domain, translation etc
3135 pr_info("Setting RMRR:\n");
3136 for_each_rmrr_units(rmrr) {
3137 /* some BIOS lists non-exist devices in DMAR table. */
3138 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3140 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3142 pr_err("Mapping reserved region failed\n");
3146 iommu_prepare_isa();
3153 * global invalidate context cache
3154 * global invalidate iotlb
3155 * enable translation
3157 for_each_iommu(iommu, drhd) {
3158 if (drhd->ignored) {
3160 * we always have to disable PMRs or DMA may fail on
3164 iommu_disable_protect_mem_regions(iommu);
3168 iommu_flush_write_buffer(iommu);
3170 ret = dmar_set_interrupt(iommu);
3174 if (!translation_pre_enabled(iommu))
3175 iommu_enable_translation(iommu);
3177 iommu_disable_protect_mem_regions(iommu);
3183 for_each_active_iommu(iommu, drhd) {
3184 disable_dmar_iommu(iommu);
3185 free_dmar_iommu(iommu);
3187 kfree(deferred_flush);
3194 /* This takes a number of _MM_ pages, not VTD pages */
3195 static struct iova *intel_alloc_iova(struct device *dev,
3196 struct dmar_domain *domain,
3197 unsigned long nrpages, uint64_t dma_mask)
3199 struct iova *iova = NULL;
3201 /* Restrict dma_mask to the width that the iommu can handle */
3202 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3204 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3206 * First try to allocate an io virtual address in
3207 * DMA_BIT_MASK(32) and if that fails then try allocating
3210 iova = alloc_iova(&domain->iovad, nrpages,
3211 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3215 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3216 if (unlikely(!iova)) {
3217 pr_err("Allocating %ld-page iova for %s failed",
3218 nrpages, dev_name(dev));
3225 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3227 struct dmar_domain *domain;
3229 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3231 pr_err("Allocating domain for %s failed\n",
3239 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3241 struct device_domain_info *info;
3243 /* No lock here, assumes no domain exit in normal case */
3244 info = dev->archdata.iommu;
3246 return info->domain;
3248 return __get_valid_domain_for_dev(dev);
3251 /* Check if the dev needs to go through non-identity map and unmap process.*/
3252 static int iommu_no_mapping(struct device *dev)
3256 if (iommu_dummy(dev))
3259 if (!iommu_identity_mapping)
3262 found = identity_mapping(dev);
3264 if (iommu_should_identity_map(dev, 0))
3268 * 32 bit DMA is removed from si_domain and fall back
3269 * to non-identity mapping.
3271 dmar_remove_one_dev_info(si_domain, dev);
3272 pr_info("32bit %s uses non-identity mapping\n",
3278 * In case of a detached 64 bit DMA device from vm, the device
3279 * is put into si_domain for identity mapping.
3281 if (iommu_should_identity_map(dev, 0)) {
3283 ret = domain_add_dev_info(si_domain, dev);
3285 pr_info("64bit %s uses identity mapping\n",
3295 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3296 size_t size, int dir, u64 dma_mask)
3298 struct dmar_domain *domain;
3299 phys_addr_t start_paddr;
3303 struct intel_iommu *iommu;
3304 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3306 BUG_ON(dir == DMA_NONE);
3308 if (iommu_no_mapping(dev))
3311 domain = get_valid_domain_for_dev(dev);
3315 iommu = domain_get_iommu(domain);
3316 size = aligned_nrpages(paddr, size);
3318 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3323 * Check if DMAR supports zero-length reads on write only
3326 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3327 !cap_zlr(iommu->cap))
3328 prot |= DMA_PTE_READ;
3329 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3330 prot |= DMA_PTE_WRITE;
3332 * paddr - (paddr + size) might be partial page, we should map the whole
3333 * page. Note: if two part of one page are separately mapped, we
3334 * might have two guest_addr mapping to the same host paddr, but this
3335 * is not a big problem
3337 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3338 mm_to_dma_pfn(paddr_pfn), size, prot);
3342 /* it's a non-present to present mapping. Only flush if caching mode */
3343 if (cap_caching_mode(iommu->cap))
3344 iommu_flush_iotlb_psi(iommu, domain,
3345 mm_to_dma_pfn(iova->pfn_lo),
3348 iommu_flush_write_buffer(iommu);
3350 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3351 start_paddr += paddr & ~PAGE_MASK;
3356 __free_iova(&domain->iovad, iova);
3357 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3358 dev_name(dev), size, (unsigned long long)paddr, dir);
3362 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3363 unsigned long offset, size_t size,
3364 enum dma_data_direction dir,
3365 struct dma_attrs *attrs)
3367 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3368 dir, *dev->dma_mask);
3371 static void flush_unmaps(void)
3377 /* just flush them all */
3378 for (i = 0; i < g_num_of_iommus; i++) {
3379 struct intel_iommu *iommu = g_iommus[i];
3383 if (!deferred_flush[i].next)
3386 /* In caching mode, global flushes turn emulation expensive */
3387 if (!cap_caching_mode(iommu->cap))
3388 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3389 DMA_TLB_GLOBAL_FLUSH);
3390 for (j = 0; j < deferred_flush[i].next; j++) {
3392 struct iova *iova = deferred_flush[i].iova[j];
3393 struct dmar_domain *domain = deferred_flush[i].domain[j];
3395 /* On real hardware multiple invalidations are expensive */
3396 if (cap_caching_mode(iommu->cap))
3397 iommu_flush_iotlb_psi(iommu, domain,
3398 iova->pfn_lo, iova_size(iova),
3399 !deferred_flush[i].freelist[j], 0);
3401 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3402 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3403 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3405 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3406 if (deferred_flush[i].freelist[j])
3407 dma_free_pagelist(deferred_flush[i].freelist[j]);
3409 deferred_flush[i].next = 0;
3415 static void flush_unmaps_timeout(unsigned long data)
3417 unsigned long flags;
3419 spin_lock_irqsave(&async_umap_flush_lock, flags);
3421 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3424 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3426 unsigned long flags;
3428 struct intel_iommu *iommu;
3430 spin_lock_irqsave(&async_umap_flush_lock, flags);
3431 if (list_size == HIGH_WATER_MARK)
3434 iommu = domain_get_iommu(dom);
3435 iommu_id = iommu->seq_id;
3437 next = deferred_flush[iommu_id].next;
3438 deferred_flush[iommu_id].domain[next] = dom;
3439 deferred_flush[iommu_id].iova[next] = iova;
3440 deferred_flush[iommu_id].freelist[next] = freelist;
3441 deferred_flush[iommu_id].next++;
3444 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3448 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3451 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3453 struct dmar_domain *domain;
3454 unsigned long start_pfn, last_pfn;
3456 struct intel_iommu *iommu;
3457 struct page *freelist;
3459 if (iommu_no_mapping(dev))
3462 domain = find_domain(dev);
3465 iommu = domain_get_iommu(domain);
3467 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3468 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3469 (unsigned long long)dev_addr))
3472 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3473 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3475 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3476 dev_name(dev), start_pfn, last_pfn);
3478 freelist = domain_unmap(domain, start_pfn, last_pfn);
3480 if (intel_iommu_strict) {
3481 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3482 last_pfn - start_pfn + 1, !freelist, 0);
3484 __free_iova(&domain->iovad, iova);
3485 dma_free_pagelist(freelist);
3487 add_unmap(domain, iova, freelist);
3489 * queue up the release of the unmap to save the 1/6th of the
3490 * cpu used up by the iotlb flush operation...
3495 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3496 size_t size, enum dma_data_direction dir,
3497 struct dma_attrs *attrs)
3499 intel_unmap(dev, dev_addr);
3502 static void *intel_alloc_coherent(struct device *dev, size_t size,
3503 dma_addr_t *dma_handle, gfp_t flags,
3504 struct dma_attrs *attrs)
3506 struct page *page = NULL;
3509 size = PAGE_ALIGN(size);
3510 order = get_order(size);
3512 if (!iommu_no_mapping(dev))
3513 flags &= ~(GFP_DMA | GFP_DMA32);
3514 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3515 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3521 if (flags & __GFP_WAIT) {
3522 unsigned int count = size >> PAGE_SHIFT;
3524 page = dma_alloc_from_contiguous(dev, count, order);
3525 if (page && iommu_no_mapping(dev) &&
3526 page_to_phys(page) + size > dev->coherent_dma_mask) {
3527 dma_release_from_contiguous(dev, page, count);
3533 page = alloc_pages(flags, order);
3536 memset(page_address(page), 0, size);
3538 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3540 dev->coherent_dma_mask);
3542 return page_address(page);
3543 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3544 __free_pages(page, order);
3549 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3550 dma_addr_t dma_handle, struct dma_attrs *attrs)
3553 struct page *page = virt_to_page(vaddr);
3555 size = PAGE_ALIGN(size);
3556 order = get_order(size);
3558 intel_unmap(dev, dma_handle);
3559 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3560 __free_pages(page, order);
3563 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3564 int nelems, enum dma_data_direction dir,
3565 struct dma_attrs *attrs)
3567 intel_unmap(dev, sglist[0].dma_address);
3570 static int intel_nontranslate_map_sg(struct device *hddev,
3571 struct scatterlist *sglist, int nelems, int dir)
3574 struct scatterlist *sg;
3576 for_each_sg(sglist, sg, nelems, i) {
3577 BUG_ON(!sg_page(sg));
3578 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3579 sg->dma_length = sg->length;
3584 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3585 enum dma_data_direction dir, struct dma_attrs *attrs)
3588 struct dmar_domain *domain;
3591 struct iova *iova = NULL;
3593 struct scatterlist *sg;
3594 unsigned long start_vpfn;
3595 struct intel_iommu *iommu;
3597 BUG_ON(dir == DMA_NONE);
3598 if (iommu_no_mapping(dev))
3599 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3601 domain = get_valid_domain_for_dev(dev);
3605 iommu = domain_get_iommu(domain);
3607 for_each_sg(sglist, sg, nelems, i)
3608 size += aligned_nrpages(sg->offset, sg->length);
3610 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3613 sglist->dma_length = 0;
3618 * Check if DMAR supports zero-length reads on write only
3621 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3622 !cap_zlr(iommu->cap))
3623 prot |= DMA_PTE_READ;
3624 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3625 prot |= DMA_PTE_WRITE;
3627 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3629 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3630 if (unlikely(ret)) {
3631 dma_pte_free_pagetable(domain, start_vpfn,
3632 start_vpfn + size - 1);
3633 __free_iova(&domain->iovad, iova);
3637 /* it's a non-present to present mapping. Only flush if caching mode */
3638 if (cap_caching_mode(iommu->cap))
3639 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3641 iommu_flush_write_buffer(iommu);
3646 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3651 struct dma_map_ops intel_dma_ops = {
3652 .alloc = intel_alloc_coherent,
3653 .free = intel_free_coherent,
3654 .map_sg = intel_map_sg,
3655 .unmap_sg = intel_unmap_sg,
3656 .map_page = intel_map_page,
3657 .unmap_page = intel_unmap_page,
3658 .mapping_error = intel_mapping_error,
3661 static inline int iommu_domain_cache_init(void)
3665 iommu_domain_cache = kmem_cache_create("iommu_domain",
3666 sizeof(struct dmar_domain),
3671 if (!iommu_domain_cache) {
3672 pr_err("Couldn't create iommu_domain cache\n");
3679 static inline int iommu_devinfo_cache_init(void)
3683 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3684 sizeof(struct device_domain_info),
3688 if (!iommu_devinfo_cache) {
3689 pr_err("Couldn't create devinfo cache\n");
3696 static int __init iommu_init_mempool(void)
3699 ret = iommu_iova_cache_init();
3703 ret = iommu_domain_cache_init();
3707 ret = iommu_devinfo_cache_init();
3711 kmem_cache_destroy(iommu_domain_cache);
3713 iommu_iova_cache_destroy();
3718 static void __init iommu_exit_mempool(void)
3720 kmem_cache_destroy(iommu_devinfo_cache);
3721 kmem_cache_destroy(iommu_domain_cache);
3722 iommu_iova_cache_destroy();
3725 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3727 struct dmar_drhd_unit *drhd;
3731 /* We know that this device on this chipset has its own IOMMU.
3732 * If we find it under a different IOMMU, then the BIOS is lying
3733 * to us. Hope that the IOMMU for this device is actually
3734 * disabled, and it needs no translation...
3736 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3738 /* "can't" happen */
3739 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3742 vtbar &= 0xffff0000;
3744 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3745 drhd = dmar_find_matched_drhd_unit(pdev);
3746 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3747 TAINT_FIRMWARE_WORKAROUND,
3748 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3749 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3751 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3753 static void __init init_no_remapping_devices(void)
3755 struct dmar_drhd_unit *drhd;
3759 for_each_drhd_unit(drhd) {
3760 if (!drhd->include_all) {
3761 for_each_active_dev_scope(drhd->devices,
3762 drhd->devices_cnt, i, dev)
3764 /* ignore DMAR unit if no devices exist */
3765 if (i == drhd->devices_cnt)
3770 for_each_active_drhd_unit(drhd) {
3771 if (drhd->include_all)
3774 for_each_active_dev_scope(drhd->devices,
3775 drhd->devices_cnt, i, dev)
3776 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3778 if (i < drhd->devices_cnt)
3781 /* This IOMMU has *only* gfx devices. Either bypass it or
3782 set the gfx_mapped flag, as appropriate */
3784 intel_iommu_gfx_mapped = 1;
3787 for_each_active_dev_scope(drhd->devices,
3788 drhd->devices_cnt, i, dev)
3789 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3794 #ifdef CONFIG_SUSPEND
3795 static int init_iommu_hw(void)
3797 struct dmar_drhd_unit *drhd;
3798 struct intel_iommu *iommu = NULL;
3800 for_each_active_iommu(iommu, drhd)
3802 dmar_reenable_qi(iommu);
3804 for_each_iommu(iommu, drhd) {
3805 if (drhd->ignored) {
3807 * we always have to disable PMRs or DMA may fail on
3811 iommu_disable_protect_mem_regions(iommu);
3815 iommu_flush_write_buffer(iommu);
3817 iommu_set_root_entry(iommu);
3819 iommu->flush.flush_context(iommu, 0, 0, 0,
3820 DMA_CCMD_GLOBAL_INVL);
3821 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3822 iommu_enable_translation(iommu);
3823 iommu_disable_protect_mem_regions(iommu);
3829 static void iommu_flush_all(void)
3831 struct dmar_drhd_unit *drhd;
3832 struct intel_iommu *iommu;
3834 for_each_active_iommu(iommu, drhd) {
3835 iommu->flush.flush_context(iommu, 0, 0, 0,
3836 DMA_CCMD_GLOBAL_INVL);
3837 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3838 DMA_TLB_GLOBAL_FLUSH);
3842 static int iommu_suspend(void)
3844 struct dmar_drhd_unit *drhd;
3845 struct intel_iommu *iommu = NULL;
3848 for_each_active_iommu(iommu, drhd) {
3849 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3851 if (!iommu->iommu_state)
3857 for_each_active_iommu(iommu, drhd) {
3858 iommu_disable_translation(iommu);
3860 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3862 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3863 readl(iommu->reg + DMAR_FECTL_REG);
3864 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3865 readl(iommu->reg + DMAR_FEDATA_REG);
3866 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3867 readl(iommu->reg + DMAR_FEADDR_REG);
3868 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3869 readl(iommu->reg + DMAR_FEUADDR_REG);
3871 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3876 for_each_active_iommu(iommu, drhd)
3877 kfree(iommu->iommu_state);
3882 static void iommu_resume(void)
3884 struct dmar_drhd_unit *drhd;
3885 struct intel_iommu *iommu = NULL;
3888 if (init_iommu_hw()) {
3890 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3892 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3896 for_each_active_iommu(iommu, drhd) {
3898 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3900 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3901 iommu->reg + DMAR_FECTL_REG);
3902 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3903 iommu->reg + DMAR_FEDATA_REG);
3904 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3905 iommu->reg + DMAR_FEADDR_REG);
3906 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3907 iommu->reg + DMAR_FEUADDR_REG);
3909 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3912 for_each_active_iommu(iommu, drhd)
3913 kfree(iommu->iommu_state);
3916 static struct syscore_ops iommu_syscore_ops = {
3917 .resume = iommu_resume,
3918 .suspend = iommu_suspend,
3921 static void __init init_iommu_pm_ops(void)
3923 register_syscore_ops(&iommu_syscore_ops);
3927 static inline void init_iommu_pm_ops(void) {}
3928 #endif /* CONFIG_PM */
3931 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3933 struct acpi_dmar_reserved_memory *rmrr;
3934 struct dmar_rmrr_unit *rmrru;
3936 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3940 rmrru->hdr = header;
3941 rmrr = (struct acpi_dmar_reserved_memory *)header;
3942 rmrru->base_address = rmrr->base_address;
3943 rmrru->end_address = rmrr->end_address;
3944 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3945 ((void *)rmrr) + rmrr->header.length,
3946 &rmrru->devices_cnt);
3947 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3952 list_add(&rmrru->list, &dmar_rmrr_units);
3957 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3959 struct dmar_atsr_unit *atsru;
3960 struct acpi_dmar_atsr *tmp;
3962 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3963 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3964 if (atsr->segment != tmp->segment)
3966 if (atsr->header.length != tmp->header.length)
3968 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3975 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3977 struct acpi_dmar_atsr *atsr;
3978 struct dmar_atsr_unit *atsru;
3980 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3983 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3984 atsru = dmar_find_atsr(atsr);
3988 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3993 * If memory is allocated from slab by ACPI _DSM method, we need to
3994 * copy the memory content because the memory buffer will be freed
3997 atsru->hdr = (void *)(atsru + 1);
3998 memcpy(atsru->hdr, hdr, hdr->length);
3999 atsru->include_all = atsr->flags & 0x1;
4000 if (!atsru->include_all) {
4001 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4002 (void *)atsr + atsr->header.length,
4003 &atsru->devices_cnt);
4004 if (atsru->devices_cnt && atsru->devices == NULL) {
4010 list_add_rcu(&atsru->list, &dmar_atsr_units);
4015 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4017 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4021 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4023 struct acpi_dmar_atsr *atsr;
4024 struct dmar_atsr_unit *atsru;
4026 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4027 atsru = dmar_find_atsr(atsr);
4029 list_del_rcu(&atsru->list);
4031 intel_iommu_free_atsr(atsru);
4037 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4041 struct acpi_dmar_atsr *atsr;
4042 struct dmar_atsr_unit *atsru;
4044 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4045 atsru = dmar_find_atsr(atsr);
4049 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4050 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4057 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4060 struct intel_iommu *iommu = dmaru->iommu;
4062 if (g_iommus[iommu->seq_id])
4065 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4066 pr_warn("%s: Doesn't support hardware pass through.\n",
4070 if (!ecap_sc_support(iommu->ecap) &&
4071 domain_update_iommu_snooping(iommu)) {
4072 pr_warn("%s: Doesn't support snooping.\n",
4076 sp = domain_update_iommu_superpage(iommu) - 1;
4077 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4078 pr_warn("%s: Doesn't support large page.\n",
4084 * Disable translation if already enabled prior to OS handover.
4086 if (iommu->gcmd & DMA_GCMD_TE)
4087 iommu_disable_translation(iommu);
4089 g_iommus[iommu->seq_id] = iommu;
4090 ret = iommu_init_domains(iommu);
4092 ret = iommu_alloc_root_entry(iommu);
4096 if (dmaru->ignored) {
4098 * we always have to disable PMRs or DMA may fail on this device
4101 iommu_disable_protect_mem_regions(iommu);
4105 intel_iommu_init_qi(iommu);
4106 iommu_flush_write_buffer(iommu);
4107 ret = dmar_set_interrupt(iommu);
4111 iommu_set_root_entry(iommu);
4112 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4113 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4114 iommu_enable_translation(iommu);
4116 iommu_disable_protect_mem_regions(iommu);
4120 disable_dmar_iommu(iommu);
4122 free_dmar_iommu(iommu);
4126 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4129 struct intel_iommu *iommu = dmaru->iommu;
4131 if (!intel_iommu_enabled)
4137 ret = intel_iommu_add(dmaru);
4139 disable_dmar_iommu(iommu);
4140 free_dmar_iommu(iommu);
4146 static void intel_iommu_free_dmars(void)
4148 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4149 struct dmar_atsr_unit *atsru, *atsr_n;
4151 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4152 list_del(&rmrru->list);
4153 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4157 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4158 list_del(&atsru->list);
4159 intel_iommu_free_atsr(atsru);
4163 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4166 struct pci_bus *bus;
4167 struct pci_dev *bridge = NULL;
4169 struct acpi_dmar_atsr *atsr;
4170 struct dmar_atsr_unit *atsru;
4172 dev = pci_physfn(dev);
4173 for (bus = dev->bus; bus; bus = bus->parent) {
4175 if (!bridge || !pci_is_pcie(bridge) ||
4176 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4178 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4185 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4186 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4187 if (atsr->segment != pci_domain_nr(dev->bus))
4190 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4191 if (tmp == &bridge->dev)
4194 if (atsru->include_all)
4204 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4207 struct dmar_rmrr_unit *rmrru;
4208 struct dmar_atsr_unit *atsru;
4209 struct acpi_dmar_atsr *atsr;
4210 struct acpi_dmar_reserved_memory *rmrr;
4212 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4215 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4216 rmrr = container_of(rmrru->hdr,
4217 struct acpi_dmar_reserved_memory, header);
4218 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4219 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4220 ((void *)rmrr) + rmrr->header.length,
4221 rmrr->segment, rmrru->devices,
4222 rmrru->devices_cnt);
4225 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4226 dmar_remove_dev_scope(info, rmrr->segment,
4227 rmrru->devices, rmrru->devices_cnt);
4231 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4232 if (atsru->include_all)
4235 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4236 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4237 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4238 (void *)atsr + atsr->header.length,
4239 atsr->segment, atsru->devices,
4240 atsru->devices_cnt);
4245 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4246 if (dmar_remove_dev_scope(info, atsr->segment,
4247 atsru->devices, atsru->devices_cnt))
4256 * Here we only respond to action of unbound device from driver.
4258 * Added device is not attached to its DMAR domain here yet. That will happen
4259 * when mapping the device to iova.
4261 static int device_notifier(struct notifier_block *nb,
4262 unsigned long action, void *data)
4264 struct device *dev = data;
4265 struct dmar_domain *domain;
4267 if (iommu_dummy(dev))
4270 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4273 domain = find_domain(dev);
4277 dmar_remove_one_dev_info(domain, dev);
4278 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4279 domain_exit(domain);
4284 static struct notifier_block device_nb = {
4285 .notifier_call = device_notifier,
4288 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4289 unsigned long val, void *v)
4291 struct memory_notify *mhp = v;
4292 unsigned long long start, end;
4293 unsigned long start_vpfn, last_vpfn;
4296 case MEM_GOING_ONLINE:
4297 start = mhp->start_pfn << PAGE_SHIFT;
4298 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4299 if (iommu_domain_identity_map(si_domain, start, end)) {
4300 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4307 case MEM_CANCEL_ONLINE:
4308 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4309 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4310 while (start_vpfn <= last_vpfn) {
4312 struct dmar_drhd_unit *drhd;
4313 struct intel_iommu *iommu;
4314 struct page *freelist;
4316 iova = find_iova(&si_domain->iovad, start_vpfn);
4318 pr_debug("Failed get IOVA for PFN %lx\n",
4323 iova = split_and_remove_iova(&si_domain->iovad, iova,
4324 start_vpfn, last_vpfn);
4326 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4327 start_vpfn, last_vpfn);
4331 freelist = domain_unmap(si_domain, iova->pfn_lo,
4335 for_each_active_iommu(iommu, drhd)
4336 iommu_flush_iotlb_psi(iommu, si_domain,
4337 iova->pfn_lo, iova_size(iova),
4340 dma_free_pagelist(freelist);
4342 start_vpfn = iova->pfn_hi + 1;
4343 free_iova_mem(iova);
4351 static struct notifier_block intel_iommu_memory_nb = {
4352 .notifier_call = intel_iommu_memory_notifier,
4357 static ssize_t intel_iommu_show_version(struct device *dev,
4358 struct device_attribute *attr,
4361 struct intel_iommu *iommu = dev_get_drvdata(dev);
4362 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4363 return sprintf(buf, "%d:%d\n",
4364 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4366 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4368 static ssize_t intel_iommu_show_address(struct device *dev,
4369 struct device_attribute *attr,
4372 struct intel_iommu *iommu = dev_get_drvdata(dev);
4373 return sprintf(buf, "%llx\n", iommu->reg_phys);
4375 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4377 static ssize_t intel_iommu_show_cap(struct device *dev,
4378 struct device_attribute *attr,
4381 struct intel_iommu *iommu = dev_get_drvdata(dev);
4382 return sprintf(buf, "%llx\n", iommu->cap);
4384 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4386 static ssize_t intel_iommu_show_ecap(struct device *dev,
4387 struct device_attribute *attr,
4390 struct intel_iommu *iommu = dev_get_drvdata(dev);
4391 return sprintf(buf, "%llx\n", iommu->ecap);
4393 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4395 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4396 struct device_attribute *attr,
4399 struct intel_iommu *iommu = dev_get_drvdata(dev);
4400 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4402 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4404 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4405 struct device_attribute *attr,
4408 struct intel_iommu *iommu = dev_get_drvdata(dev);
4409 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4410 cap_ndoms(iommu->cap)));
4412 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4414 static struct attribute *intel_iommu_attrs[] = {
4415 &dev_attr_version.attr,
4416 &dev_attr_address.attr,
4418 &dev_attr_ecap.attr,
4419 &dev_attr_domains_supported.attr,
4420 &dev_attr_domains_used.attr,
4424 static struct attribute_group intel_iommu_group = {
4425 .name = "intel-iommu",
4426 .attrs = intel_iommu_attrs,
4429 const struct attribute_group *intel_iommu_groups[] = {
4434 int __init intel_iommu_init(void)
4437 struct dmar_drhd_unit *drhd;
4438 struct intel_iommu *iommu;
4440 /* VT-d is required for a TXT/tboot launch, so enforce that */
4441 force_on = tboot_force_iommu();
4443 if (iommu_init_mempool()) {
4445 panic("tboot: Failed to initialize iommu memory\n");
4449 down_write(&dmar_global_lock);
4450 if (dmar_table_init()) {
4452 panic("tboot: Failed to initialize DMAR table\n");
4456 if (dmar_dev_scope_init() < 0) {
4458 panic("tboot: Failed to initialize DMAR device scope\n");
4462 if (no_iommu || dmar_disabled)
4465 if (list_empty(&dmar_rmrr_units))
4466 pr_info("No RMRR found\n");
4468 if (list_empty(&dmar_atsr_units))
4469 pr_info("No ATSR found\n");
4471 if (dmar_init_reserved_ranges()) {
4473 panic("tboot: Failed to reserve iommu ranges\n");
4474 goto out_free_reserved_range;
4477 init_no_remapping_devices();
4482 panic("tboot: Failed to initialize DMARs\n");
4483 pr_err("Initialization failed\n");
4484 goto out_free_reserved_range;
4486 up_write(&dmar_global_lock);
4487 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4489 init_timer(&unmap_timer);
4490 #ifdef CONFIG_SWIOTLB
4493 dma_ops = &intel_dma_ops;
4495 init_iommu_pm_ops();
4497 for_each_active_iommu(iommu, drhd)
4498 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4502 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4503 bus_register_notifier(&pci_bus_type, &device_nb);
4504 if (si_domain && !hw_pass_through)
4505 register_memory_notifier(&intel_iommu_memory_nb);
4507 intel_iommu_enabled = 1;
4511 out_free_reserved_range:
4512 put_iova_domain(&reserved_iova_list);
4514 intel_iommu_free_dmars();
4515 up_write(&dmar_global_lock);
4516 iommu_exit_mempool();
4520 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4522 struct intel_iommu *iommu = opaque;
4524 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4529 * NB - intel-iommu lacks any sort of reference counting for the users of
4530 * dependent devices. If multiple endpoints have intersecting dependent
4531 * devices, unbinding the driver from any one of them will possibly leave
4532 * the others unable to operate.
4534 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4536 if (!iommu || !dev || !dev_is_pci(dev))
4539 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4542 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4544 struct intel_iommu *iommu;
4545 unsigned long flags;
4547 assert_spin_locked(&device_domain_lock);
4552 iommu = info->iommu;
4555 iommu_disable_dev_iotlb(info);
4556 domain_context_clear(iommu, info->dev);
4559 unlink_domain_info(info);
4561 spin_lock_irqsave(&iommu->lock, flags);
4562 domain_detach_iommu(info->domain, iommu);
4563 spin_unlock_irqrestore(&iommu->lock, flags);
4565 free_devinfo_mem(info);
4568 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4571 struct device_domain_info *info;
4572 unsigned long flags;
4574 spin_lock_irqsave(&device_domain_lock, flags);
4575 info = dev->archdata.iommu;
4576 __dmar_remove_one_dev_info(info);
4577 spin_unlock_irqrestore(&device_domain_lock, flags);
4580 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4584 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4586 domain_reserve_special_ranges(domain);
4588 /* calculate AGAW */
4589 domain->gaw = guest_width;
4590 adjust_width = guestwidth_to_adjustwidth(guest_width);
4591 domain->agaw = width_to_agaw(adjust_width);
4593 domain->iommu_coherency = 0;
4594 domain->iommu_snooping = 0;
4595 domain->iommu_superpage = 0;
4596 domain->max_addr = 0;
4598 /* always allocate the top pgd */
4599 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4602 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4606 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4608 struct dmar_domain *dmar_domain;
4609 struct iommu_domain *domain;
4611 if (type != IOMMU_DOMAIN_UNMANAGED)
4614 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4616 pr_err("Can't allocate dmar_domain\n");
4619 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4620 pr_err("Domain initialization failed\n");
4621 domain_exit(dmar_domain);
4624 domain_update_iommu_cap(dmar_domain);
4626 domain = &dmar_domain->domain;
4627 domain->geometry.aperture_start = 0;
4628 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4629 domain->geometry.force_aperture = true;
4634 static void intel_iommu_domain_free(struct iommu_domain *domain)
4636 domain_exit(to_dmar_domain(domain));
4639 static int intel_iommu_attach_device(struct iommu_domain *domain,
4642 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4643 struct intel_iommu *iommu;
4647 if (device_is_rmrr_locked(dev)) {
4648 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4652 /* normally dev is not mapped */
4653 if (unlikely(domain_context_mapped(dev))) {
4654 struct dmar_domain *old_domain;
4656 old_domain = find_domain(dev);
4659 dmar_remove_one_dev_info(old_domain, dev);
4662 if (!domain_type_is_vm_or_si(old_domain) &&
4663 list_empty(&old_domain->devices))
4664 domain_exit(old_domain);
4668 iommu = device_to_iommu(dev, &bus, &devfn);
4672 /* check if this iommu agaw is sufficient for max mapped address */
4673 addr_width = agaw_to_width(iommu->agaw);
4674 if (addr_width > cap_mgaw(iommu->cap))
4675 addr_width = cap_mgaw(iommu->cap);
4677 if (dmar_domain->max_addr > (1LL << addr_width)) {
4678 pr_err("%s: iommu width (%d) is not "
4679 "sufficient for the mapped address (%llx)\n",
4680 __func__, addr_width, dmar_domain->max_addr);
4683 dmar_domain->gaw = addr_width;
4686 * Knock out extra levels of page tables if necessary
4688 while (iommu->agaw < dmar_domain->agaw) {
4689 struct dma_pte *pte;
4691 pte = dmar_domain->pgd;
4692 if (dma_pte_present(pte)) {
4693 dmar_domain->pgd = (struct dma_pte *)
4694 phys_to_virt(dma_pte_addr(pte));
4695 free_pgtable_page(pte);
4697 dmar_domain->agaw--;
4700 return domain_add_dev_info(dmar_domain, dev);
4703 static void intel_iommu_detach_device(struct iommu_domain *domain,
4706 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
4709 static int intel_iommu_map(struct iommu_domain *domain,
4710 unsigned long iova, phys_addr_t hpa,
4711 size_t size, int iommu_prot)
4713 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4718 if (iommu_prot & IOMMU_READ)
4719 prot |= DMA_PTE_READ;
4720 if (iommu_prot & IOMMU_WRITE)
4721 prot |= DMA_PTE_WRITE;
4722 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4723 prot |= DMA_PTE_SNP;
4725 max_addr = iova + size;
4726 if (dmar_domain->max_addr < max_addr) {
4729 /* check if minimum agaw is sufficient for mapped address */
4730 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4731 if (end < max_addr) {
4732 pr_err("%s: iommu width (%d) is not "
4733 "sufficient for the mapped address (%llx)\n",
4734 __func__, dmar_domain->gaw, max_addr);
4737 dmar_domain->max_addr = max_addr;
4739 /* Round up size to next multiple of PAGE_SIZE, if it and
4740 the low bits of hpa would take us onto the next page */
4741 size = aligned_nrpages(hpa, size);
4742 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4743 hpa >> VTD_PAGE_SHIFT, size, prot);
4747 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4748 unsigned long iova, size_t size)
4750 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4751 struct page *freelist = NULL;
4752 struct intel_iommu *iommu;
4753 unsigned long start_pfn, last_pfn;
4754 unsigned int npages;
4755 int iommu_id, level = 0;
4757 /* Cope with horrid API which requires us to unmap more than the
4758 size argument if it happens to be a large-page mapping. */
4759 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4762 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4763 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4765 start_pfn = iova >> VTD_PAGE_SHIFT;
4766 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4768 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4770 npages = last_pfn - start_pfn + 1;
4772 for_each_domain_iommu(iommu_id, dmar_domain) {
4773 iommu = g_iommus[iommu_id];
4775 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4776 start_pfn, npages, !freelist, 0);
4779 dma_free_pagelist(freelist);
4781 if (dmar_domain->max_addr == iova + size)
4782 dmar_domain->max_addr = iova;
4787 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4790 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4791 struct dma_pte *pte;
4795 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4797 phys = dma_pte_addr(pte);
4802 static bool intel_iommu_capable(enum iommu_cap cap)
4804 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4805 return domain_update_iommu_snooping(NULL) == 1;
4806 if (cap == IOMMU_CAP_INTR_REMAP)
4807 return irq_remapping_enabled == 1;
4812 static int intel_iommu_add_device(struct device *dev)
4814 struct intel_iommu *iommu;
4815 struct iommu_group *group;
4818 iommu = device_to_iommu(dev, &bus, &devfn);
4822 iommu_device_link(iommu->iommu_dev, dev);
4824 group = iommu_group_get_for_dev(dev);
4827 return PTR_ERR(group);
4829 iommu_group_put(group);
4833 static void intel_iommu_remove_device(struct device *dev)
4835 struct intel_iommu *iommu;
4838 iommu = device_to_iommu(dev, &bus, &devfn);
4842 iommu_group_remove_device(dev);
4844 iommu_device_unlink(iommu->iommu_dev, dev);
4847 static const struct iommu_ops intel_iommu_ops = {
4848 .capable = intel_iommu_capable,
4849 .domain_alloc = intel_iommu_domain_alloc,
4850 .domain_free = intel_iommu_domain_free,
4851 .attach_dev = intel_iommu_attach_device,
4852 .detach_dev = intel_iommu_detach_device,
4853 .map = intel_iommu_map,
4854 .unmap = intel_iommu_unmap,
4855 .map_sg = default_iommu_map_sg,
4856 .iova_to_phys = intel_iommu_iova_to_phys,
4857 .add_device = intel_iommu_add_device,
4858 .remove_device = intel_iommu_remove_device,
4859 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4862 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4864 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4865 pr_info("Disabling IOMMU for graphics on this chipset\n");
4869 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4870 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4871 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4872 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4873 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4874 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4875 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4877 static void quirk_iommu_rwbf(struct pci_dev *dev)
4880 * Mobile 4 Series Chipset neglects to set RWBF capability,
4881 * but needs it. Same seems to hold for the desktop versions.
4883 pr_info("Forcing write-buffer flush capability\n");
4887 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4888 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4889 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4890 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4891 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4892 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4893 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4896 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4897 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4898 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4899 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4900 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4901 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4902 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4903 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4905 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4909 if (pci_read_config_word(dev, GGC, &ggc))
4912 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4913 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4915 } else if (dmar_map_gfx) {
4916 /* we have to ensure the gfx device is idle before we flush */
4917 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4918 intel_iommu_strict = 1;
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4924 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4926 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4927 ISOCH DMAR unit for the Azalia sound device, but not give it any
4928 TLB entries, which causes it to deadlock. Check for that. We do
4929 this in a function called from init_dmars(), instead of in a PCI
4930 quirk, because we don't want to print the obnoxious "BIOS broken"
4931 message if VT-d is actually disabled.
4933 static void __init check_tylersburg_isoch(void)
4935 struct pci_dev *pdev;
4936 uint32_t vtisochctrl;
4938 /* If there's no Azalia in the system anyway, forget it. */
4939 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4944 /* System Management Registers. Might be hidden, in which case
4945 we can't do the sanity check. But that's OK, because the
4946 known-broken BIOSes _don't_ actually hide it, so far. */
4947 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4951 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4958 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4959 if (vtisochctrl & 1)
4962 /* Drop all bits other than the number of TLB entries */
4963 vtisochctrl &= 0x1c;
4965 /* If we have the recommended number of TLB entries (16), fine. */
4966 if (vtisochctrl == 0x10)
4969 /* Zero TLB entries? You get to ride the short bus to school. */
4971 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4972 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4973 dmi_get_system_info(DMI_BIOS_VENDOR),
4974 dmi_get_system_info(DMI_BIOS_VERSION),
4975 dmi_get_system_info(DMI_PRODUCT_VERSION));
4976 iommu_identity_mapping |= IDENTMAP_AZALIA;
4980 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",