iommu/vt-d: Only insert alias dev_info if there is an alias
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
50
51 #include "irq_remapping.h"
52
53 #define ROOT_SIZE               VTD_PAGE_SIZE
54 #define CONTEXT_SIZE            VTD_PAGE_SIZE
55
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60
61 #define IOAPIC_RANGE_START      (0xfee00000)
62 #define IOAPIC_RANGE_END        (0xfeefffff)
63 #define IOVA_START_ADDR         (0x1000)
64
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69
70 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
76                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN          (1)
81
82 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
85
86 /* page table handling */
87 #define LEVEL_STRIDE            (9)
88 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
89
90 /*
91  * This bitmap is used to advertise the page sizes our hardware support
92  * to the IOMMU core, which will then use this information to split
93  * physically contiguous memory regions it is mapping into page sizes
94  * that we support.
95  *
96  * Traditionally the IOMMU core just handed us the mappings directly,
97  * after making sure the size is an order of a 4KiB page and that the
98  * mapping has natural alignment.
99  *
100  * To retain this behavior, we currently advertise that we support
101  * all page sizes that are an order of 4KiB.
102  *
103  * If at some point we'd like to utilize the IOMMU core's new behavior,
104  * we could change this to advertise the real page sizes we support.
105  */
106 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
107
108 static inline int agaw_to_level(int agaw)
109 {
110         return agaw + 2;
111 }
112
113 static inline int agaw_to_width(int agaw)
114 {
115         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 }
117
118 static inline int width_to_agaw(int width)
119 {
120         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 }
122
123 static inline unsigned int level_to_offset_bits(int level)
124 {
125         return (level - 1) * LEVEL_STRIDE;
126 }
127
128 static inline int pfn_level_offset(unsigned long pfn, int level)
129 {
130         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131 }
132
133 static inline unsigned long level_mask(int level)
134 {
135         return -1UL << level_to_offset_bits(level);
136 }
137
138 static inline unsigned long level_size(int level)
139 {
140         return 1UL << level_to_offset_bits(level);
141 }
142
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
144 {
145         return (pfn + level_size(level) - 1) & level_mask(level);
146 }
147
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149 {
150         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 }
152
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154    are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156 {
157         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 }
159
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161 {
162         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163 }
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
165 {
166         return mm_to_dma_pfn(page_to_pfn(pg));
167 }
168 static inline unsigned long virt_to_dma_pfn(void *p)
169 {
170         return page_to_dma_pfn(virt_to_page(p));
171 }
172
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
175
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
178
179 /*
180  * set to 1 to panic kernel if can't successfully enable VT-d
181  * (used when kernel is launched w/ TXT)
182  */
183 static int force_on = 0;
184
185 /*
186  * 0: Present
187  * 1-11: Reserved
188  * 12-63: Context Ptr (12 - (haw-1))
189  * 64-127: Reserved
190  */
191 struct root_entry {
192         u64     lo;
193         u64     hi;
194 };
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
196
197 /*
198  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199  * if marked present.
200  */
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 {
203         if (!(re->lo & 1))
204                 return 0;
205
206         return re->lo & VTD_PAGE_MASK;
207 }
208
209 /*
210  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211  * if marked present.
212  */
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 {
215         if (!(re->hi & 1))
216                 return 0;
217
218         return re->hi & VTD_PAGE_MASK;
219 }
220 /*
221  * low 64 bits:
222  * 0: present
223  * 1: fault processing disable
224  * 2-3: translation type
225  * 12-63: address space root
226  * high 64 bits:
227  * 0-2: address width
228  * 3-6: aval
229  * 8-23: domain id
230  */
231 struct context_entry {
232         u64 lo;
233         u64 hi;
234 };
235
236 static inline void context_clear_pasid_enable(struct context_entry *context)
237 {
238         context->lo &= ~(1ULL << 11);
239 }
240
241 static inline bool context_pasid_enabled(struct context_entry *context)
242 {
243         return !!(context->lo & (1ULL << 11));
244 }
245
246 static inline void context_set_copied(struct context_entry *context)
247 {
248         context->hi |= (1ull << 3);
249 }
250
251 static inline bool context_copied(struct context_entry *context)
252 {
253         return !!(context->hi & (1ULL << 3));
254 }
255
256 static inline bool __context_present(struct context_entry *context)
257 {
258         return (context->lo & 1);
259 }
260
261 static inline bool context_present(struct context_entry *context)
262 {
263         return context_pasid_enabled(context) ?
264              __context_present(context) :
265              __context_present(context) && !context_copied(context);
266 }
267
268 static inline void context_set_present(struct context_entry *context)
269 {
270         context->lo |= 1;
271 }
272
273 static inline void context_set_fault_enable(struct context_entry *context)
274 {
275         context->lo &= (((u64)-1) << 2) | 1;
276 }
277
278 static inline void context_set_translation_type(struct context_entry *context,
279                                                 unsigned long value)
280 {
281         context->lo &= (((u64)-1) << 4) | 3;
282         context->lo |= (value & 3) << 2;
283 }
284
285 static inline void context_set_address_root(struct context_entry *context,
286                                             unsigned long value)
287 {
288         context->lo &= ~VTD_PAGE_MASK;
289         context->lo |= value & VTD_PAGE_MASK;
290 }
291
292 static inline void context_set_address_width(struct context_entry *context,
293                                              unsigned long value)
294 {
295         context->hi |= value & 7;
296 }
297
298 static inline void context_set_domain_id(struct context_entry *context,
299                                          unsigned long value)
300 {
301         context->hi |= (value & ((1 << 16) - 1)) << 8;
302 }
303
304 static inline int context_domain_id(struct context_entry *c)
305 {
306         return((c->hi >> 8) & 0xffff);
307 }
308
309 static inline void context_clear_entry(struct context_entry *context)
310 {
311         context->lo = 0;
312         context->hi = 0;
313 }
314
315 /*
316  * 0: readable
317  * 1: writable
318  * 2-6: reserved
319  * 7: super page
320  * 8-10: available
321  * 11: snoop behavior
322  * 12-63: Host physcial address
323  */
324 struct dma_pte {
325         u64 val;
326 };
327
328 static inline void dma_clear_pte(struct dma_pte *pte)
329 {
330         pte->val = 0;
331 }
332
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
334 {
335 #ifdef CONFIG_64BIT
336         return pte->val & VTD_PAGE_MASK;
337 #else
338         /* Must have a full atomic 64-bit read */
339         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
340 #endif
341 }
342
343 static inline bool dma_pte_present(struct dma_pte *pte)
344 {
345         return (pte->val & 3) != 0;
346 }
347
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
349 {
350         return (pte->val & DMA_PTE_LARGE_PAGE);
351 }
352
353 static inline int first_pte_in_page(struct dma_pte *pte)
354 {
355         return !((unsigned long)pte & ~VTD_PAGE_MASK);
356 }
357
358 /*
359  * This domain is a statically identity mapping domain.
360  *      1. This domain creats a static 1:1 mapping to all usable memory.
361  *      2. It maps to each iommu if successful.
362  *      3. Each iommu mapps to this domain if successful.
363  */
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
366
367 /*
368  * Domain represents a virtual machine, more than one devices
369  * across iommus may be owned in one domain, e.g. kvm guest.
370  */
371 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
372
373 /* si_domain contains mulitple devices */
374 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
375
376 #define for_each_domain_iommu(idx, domain)                      \
377         for (idx = 0; idx < g_num_of_iommus; idx++)             \
378                 if (domain->iommu_refcnt[idx])
379
380 struct dmar_domain {
381         int     nid;                    /* node id */
382
383         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
384                                         /* Refcount of devices per iommu */
385
386
387         u16             iommu_did[DMAR_UNITS_SUPPORTED];
388                                         /* Domain ids per IOMMU. Use u16 since
389                                          * domain ids are 16 bit wide according
390                                          * to VT-d spec, section 9.3 */
391
392         struct list_head devices;       /* all devices' list */
393         struct iova_domain iovad;       /* iova's that belong to this domain */
394
395         struct dma_pte  *pgd;           /* virtual address */
396         int             gaw;            /* max guest address width */
397
398         /* adjusted guest address width, 0 is level 2 30-bit */
399         int             agaw;
400
401         int             flags;          /* flags to find out type of domain */
402
403         int             iommu_coherency;/* indicate coherency of iommu access */
404         int             iommu_snooping; /* indicate snooping control feature*/
405         int             iommu_count;    /* reference count of iommu */
406         int             iommu_superpage;/* Level of superpages supported:
407                                            0 == 4KiB (no superpages), 1 == 2MiB,
408                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409         u64             max_addr;       /* maximum mapped address */
410
411         struct iommu_domain domain;     /* generic domain data structure for
412                                            iommu core */
413 };
414
415 /* PCI domain-device relationship */
416 struct device_domain_info {
417         struct list_head link;  /* link to domain siblings */
418         struct list_head global; /* link to global list */
419         u8 bus;                 /* PCI bus number */
420         u8 devfn;               /* PCI devfn number */
421         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
422         struct intel_iommu *iommu; /* IOMMU used by this device */
423         struct dmar_domain *domain; /* pointer to domain */
424 };
425
426 struct dmar_rmrr_unit {
427         struct list_head list;          /* list of rmrr units   */
428         struct acpi_dmar_header *hdr;   /* ACPI header          */
429         u64     base_address;           /* reserved base address*/
430         u64     end_address;            /* reserved end address */
431         struct dmar_dev_scope *devices; /* target devices */
432         int     devices_cnt;            /* target device count */
433 };
434
435 struct dmar_atsr_unit {
436         struct list_head list;          /* list of ATSR units */
437         struct acpi_dmar_header *hdr;   /* ACPI header */
438         struct dmar_dev_scope *devices; /* target devices */
439         int devices_cnt;                /* target device count */
440         u8 include_all:1;               /* include all ports */
441 };
442
443 static LIST_HEAD(dmar_atsr_units);
444 static LIST_HEAD(dmar_rmrr_units);
445
446 #define for_each_rmrr_units(rmrr) \
447         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
448
449 static void flush_unmaps_timeout(unsigned long data);
450
451 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
452
453 #define HIGH_WATER_MARK 250
454 struct deferred_flush_tables {
455         int next;
456         struct iova *iova[HIGH_WATER_MARK];
457         struct dmar_domain *domain[HIGH_WATER_MARK];
458         struct page *freelist[HIGH_WATER_MARK];
459 };
460
461 static struct deferred_flush_tables *deferred_flush;
462
463 /* bitmap for indexing intel_iommus */
464 static int g_num_of_iommus;
465
466 static DEFINE_SPINLOCK(async_umap_flush_lock);
467 static LIST_HEAD(unmaps_to_do);
468
469 static int timer_on;
470 static long list_size;
471
472 static void domain_exit(struct dmar_domain *domain);
473 static void domain_remove_dev_info(struct dmar_domain *domain);
474 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
475                                      struct device *dev);
476 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
477 static void domain_context_clear(struct intel_iommu *iommu,
478                                  struct device *dev);
479 static int domain_detach_iommu(struct dmar_domain *domain,
480                                struct intel_iommu *iommu);
481
482 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 int dmar_disabled = 0;
484 #else
485 int dmar_disabled = 1;
486 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
487
488 int intel_iommu_enabled = 0;
489 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
490
491 static int dmar_map_gfx = 1;
492 static int dmar_forcedac;
493 static int intel_iommu_strict;
494 static int intel_iommu_superpage = 1;
495 static int intel_iommu_ecs = 1;
496
497 /* We only actually use ECS when PASID support (on the new bit 40)
498  * is also advertised. Some early implementations — the ones with
499  * PASID support on bit 28 — have issues even when we *only* use
500  * extended root/context tables. */
501 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
502                             ecap_pasid(iommu->ecap))
503
504 int intel_iommu_gfx_mapped;
505 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
506
507 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
508 static DEFINE_SPINLOCK(device_domain_lock);
509 static LIST_HEAD(device_domain_list);
510
511 static const struct iommu_ops intel_iommu_ops;
512
513 static bool translation_pre_enabled(struct intel_iommu *iommu)
514 {
515         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
516 }
517
518 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
519 {
520         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
521 }
522
523 static void init_translation_status(struct intel_iommu *iommu)
524 {
525         u32 gsts;
526
527         gsts = readl(iommu->reg + DMAR_GSTS_REG);
528         if (gsts & DMA_GSTS_TES)
529                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
530 }
531
532 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
533 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
534 {
535         return container_of(dom, struct dmar_domain, domain);
536 }
537
538 static int __init intel_iommu_setup(char *str)
539 {
540         if (!str)
541                 return -EINVAL;
542         while (*str) {
543                 if (!strncmp(str, "on", 2)) {
544                         dmar_disabled = 0;
545                         pr_info("IOMMU enabled\n");
546                 } else if (!strncmp(str, "off", 3)) {
547                         dmar_disabled = 1;
548                         pr_info("IOMMU disabled\n");
549                 } else if (!strncmp(str, "igfx_off", 8)) {
550                         dmar_map_gfx = 0;
551                         pr_info("Disable GFX device mapping\n");
552                 } else if (!strncmp(str, "forcedac", 8)) {
553                         pr_info("Forcing DAC for PCI devices\n");
554                         dmar_forcedac = 1;
555                 } else if (!strncmp(str, "strict", 6)) {
556                         pr_info("Disable batched IOTLB flush\n");
557                         intel_iommu_strict = 1;
558                 } else if (!strncmp(str, "sp_off", 6)) {
559                         pr_info("Disable supported super page\n");
560                         intel_iommu_superpage = 0;
561                 } else if (!strncmp(str, "ecs_off", 7)) {
562                         printk(KERN_INFO
563                                 "Intel-IOMMU: disable extended context table support\n");
564                         intel_iommu_ecs = 0;
565                 }
566
567                 str += strcspn(str, ",");
568                 while (*str == ',')
569                         str++;
570         }
571         return 0;
572 }
573 __setup("intel_iommu=", intel_iommu_setup);
574
575 static struct kmem_cache *iommu_domain_cache;
576 static struct kmem_cache *iommu_devinfo_cache;
577
578 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
579 {
580         struct dmar_domain **domains;
581         int idx = did >> 8;
582
583         domains = iommu->domains[idx];
584         if (!domains)
585                 return NULL;
586
587         return domains[did & 0xff];
588 }
589
590 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
591                              struct dmar_domain *domain)
592 {
593         struct dmar_domain **domains;
594         int idx = did >> 8;
595
596         if (!iommu->domains[idx]) {
597                 size_t size = 256 * sizeof(struct dmar_domain *);
598                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
599         }
600
601         domains = iommu->domains[idx];
602         if (WARN_ON(!domains))
603                 return;
604         else
605                 domains[did & 0xff] = domain;
606 }
607
608 static inline void *alloc_pgtable_page(int node)
609 {
610         struct page *page;
611         void *vaddr = NULL;
612
613         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
614         if (page)
615                 vaddr = page_address(page);
616         return vaddr;
617 }
618
619 static inline void free_pgtable_page(void *vaddr)
620 {
621         free_page((unsigned long)vaddr);
622 }
623
624 static inline void *alloc_domain_mem(void)
625 {
626         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
627 }
628
629 static void free_domain_mem(void *vaddr)
630 {
631         kmem_cache_free(iommu_domain_cache, vaddr);
632 }
633
634 static inline void * alloc_devinfo_mem(void)
635 {
636         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
637 }
638
639 static inline void free_devinfo_mem(void *vaddr)
640 {
641         kmem_cache_free(iommu_devinfo_cache, vaddr);
642 }
643
644 static inline int domain_type_is_vm(struct dmar_domain *domain)
645 {
646         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
647 }
648
649 static inline int domain_type_is_si(struct dmar_domain *domain)
650 {
651         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
652 }
653
654 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
655 {
656         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
657                                 DOMAIN_FLAG_STATIC_IDENTITY);
658 }
659
660 static inline int domain_pfn_supported(struct dmar_domain *domain,
661                                        unsigned long pfn)
662 {
663         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
664
665         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
666 }
667
668 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
669 {
670         unsigned long sagaw;
671         int agaw = -1;
672
673         sagaw = cap_sagaw(iommu->cap);
674         for (agaw = width_to_agaw(max_gaw);
675              agaw >= 0; agaw--) {
676                 if (test_bit(agaw, &sagaw))
677                         break;
678         }
679
680         return agaw;
681 }
682
683 /*
684  * Calculate max SAGAW for each iommu.
685  */
686 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
687 {
688         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
689 }
690
691 /*
692  * calculate agaw for each iommu.
693  * "SAGAW" may be different across iommus, use a default agaw, and
694  * get a supported less agaw for iommus that don't support the default agaw.
695  */
696 int iommu_calculate_agaw(struct intel_iommu *iommu)
697 {
698         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
699 }
700
701 /* This functionin only returns single iommu in a domain */
702 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
703 {
704         int iommu_id;
705
706         /* si_domain and vm domain should not get here. */
707         BUG_ON(domain_type_is_vm_or_si(domain));
708         for_each_domain_iommu(iommu_id, domain)
709                 break;
710
711         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
712                 return NULL;
713
714         return g_iommus[iommu_id];
715 }
716
717 static void domain_update_iommu_coherency(struct dmar_domain *domain)
718 {
719         struct dmar_drhd_unit *drhd;
720         struct intel_iommu *iommu;
721         bool found = false;
722         int i;
723
724         domain->iommu_coherency = 1;
725
726         for_each_domain_iommu(i, domain) {
727                 found = true;
728                 if (!ecap_coherent(g_iommus[i]->ecap)) {
729                         domain->iommu_coherency = 0;
730                         break;
731                 }
732         }
733         if (found)
734                 return;
735
736         /* No hardware attached; use lowest common denominator */
737         rcu_read_lock();
738         for_each_active_iommu(iommu, drhd) {
739                 if (!ecap_coherent(iommu->ecap)) {
740                         domain->iommu_coherency = 0;
741                         break;
742                 }
743         }
744         rcu_read_unlock();
745 }
746
747 static int domain_update_iommu_snooping(struct intel_iommu *skip)
748 {
749         struct dmar_drhd_unit *drhd;
750         struct intel_iommu *iommu;
751         int ret = 1;
752
753         rcu_read_lock();
754         for_each_active_iommu(iommu, drhd) {
755                 if (iommu != skip) {
756                         if (!ecap_sc_support(iommu->ecap)) {
757                                 ret = 0;
758                                 break;
759                         }
760                 }
761         }
762         rcu_read_unlock();
763
764         return ret;
765 }
766
767 static int domain_update_iommu_superpage(struct intel_iommu *skip)
768 {
769         struct dmar_drhd_unit *drhd;
770         struct intel_iommu *iommu;
771         int mask = 0xf;
772
773         if (!intel_iommu_superpage) {
774                 return 0;
775         }
776
777         /* set iommu_superpage to the smallest common denominator */
778         rcu_read_lock();
779         for_each_active_iommu(iommu, drhd) {
780                 if (iommu != skip) {
781                         mask &= cap_super_page_val(iommu->cap);
782                         if (!mask)
783                                 break;
784                 }
785         }
786         rcu_read_unlock();
787
788         return fls(mask);
789 }
790
791 /* Some capabilities may be different across iommus */
792 static void domain_update_iommu_cap(struct dmar_domain *domain)
793 {
794         domain_update_iommu_coherency(domain);
795         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
796         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
797 }
798
799 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
800                                                        u8 bus, u8 devfn, int alloc)
801 {
802         struct root_entry *root = &iommu->root_entry[bus];
803         struct context_entry *context;
804         u64 *entry;
805
806         if (ecs_enabled(iommu)) {
807                 if (devfn >= 0x80) {
808                         devfn -= 0x80;
809                         entry = &root->hi;
810                 }
811                 devfn *= 2;
812         }
813         entry = &root->lo;
814         if (*entry & 1)
815                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
816         else {
817                 unsigned long phy_addr;
818                 if (!alloc)
819                         return NULL;
820
821                 context = alloc_pgtable_page(iommu->node);
822                 if (!context)
823                         return NULL;
824
825                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
826                 phy_addr = virt_to_phys((void *)context);
827                 *entry = phy_addr | 1;
828                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
829         }
830         return &context[devfn];
831 }
832
833 static int iommu_dummy(struct device *dev)
834 {
835         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
836 }
837
838 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
839 {
840         struct dmar_drhd_unit *drhd = NULL;
841         struct intel_iommu *iommu;
842         struct device *tmp;
843         struct pci_dev *ptmp, *pdev = NULL;
844         u16 segment = 0;
845         int i;
846
847         if (iommu_dummy(dev))
848                 return NULL;
849
850         if (dev_is_pci(dev)) {
851                 pdev = to_pci_dev(dev);
852                 segment = pci_domain_nr(pdev->bus);
853         } else if (has_acpi_companion(dev))
854                 dev = &ACPI_COMPANION(dev)->dev;
855
856         rcu_read_lock();
857         for_each_active_iommu(iommu, drhd) {
858                 if (pdev && segment != drhd->segment)
859                         continue;
860
861                 for_each_active_dev_scope(drhd->devices,
862                                           drhd->devices_cnt, i, tmp) {
863                         if (tmp == dev) {
864                                 *bus = drhd->devices[i].bus;
865                                 *devfn = drhd->devices[i].devfn;
866                                 goto out;
867                         }
868
869                         if (!pdev || !dev_is_pci(tmp))
870                                 continue;
871
872                         ptmp = to_pci_dev(tmp);
873                         if (ptmp->subordinate &&
874                             ptmp->subordinate->number <= pdev->bus->number &&
875                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
876                                 goto got_pdev;
877                 }
878
879                 if (pdev && drhd->include_all) {
880                 got_pdev:
881                         *bus = pdev->bus->number;
882                         *devfn = pdev->devfn;
883                         goto out;
884                 }
885         }
886         iommu = NULL;
887  out:
888         rcu_read_unlock();
889
890         return iommu;
891 }
892
893 static void domain_flush_cache(struct dmar_domain *domain,
894                                void *addr, int size)
895 {
896         if (!domain->iommu_coherency)
897                 clflush_cache_range(addr, size);
898 }
899
900 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
901 {
902         struct context_entry *context;
903         int ret = 0;
904         unsigned long flags;
905
906         spin_lock_irqsave(&iommu->lock, flags);
907         context = iommu_context_addr(iommu, bus, devfn, 0);
908         if (context)
909                 ret = context_present(context);
910         spin_unlock_irqrestore(&iommu->lock, flags);
911         return ret;
912 }
913
914 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
915 {
916         struct context_entry *context;
917         unsigned long flags;
918
919         spin_lock_irqsave(&iommu->lock, flags);
920         context = iommu_context_addr(iommu, bus, devfn, 0);
921         if (context) {
922                 context_clear_entry(context);
923                 __iommu_flush_cache(iommu, context, sizeof(*context));
924         }
925         spin_unlock_irqrestore(&iommu->lock, flags);
926 }
927
928 static void free_context_table(struct intel_iommu *iommu)
929 {
930         int i;
931         unsigned long flags;
932         struct context_entry *context;
933
934         spin_lock_irqsave(&iommu->lock, flags);
935         if (!iommu->root_entry) {
936                 goto out;
937         }
938         for (i = 0; i < ROOT_ENTRY_NR; i++) {
939                 context = iommu_context_addr(iommu, i, 0, 0);
940                 if (context)
941                         free_pgtable_page(context);
942
943                 if (!ecs_enabled(iommu))
944                         continue;
945
946                 context = iommu_context_addr(iommu, i, 0x80, 0);
947                 if (context)
948                         free_pgtable_page(context);
949
950         }
951         free_pgtable_page(iommu->root_entry);
952         iommu->root_entry = NULL;
953 out:
954         spin_unlock_irqrestore(&iommu->lock, flags);
955 }
956
957 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958                                       unsigned long pfn, int *target_level)
959 {
960         struct dma_pte *parent, *pte = NULL;
961         int level = agaw_to_level(domain->agaw);
962         int offset;
963
964         BUG_ON(!domain->pgd);
965
966         if (!domain_pfn_supported(domain, pfn))
967                 /* Address beyond IOMMU's addressing capabilities. */
968                 return NULL;
969
970         parent = domain->pgd;
971
972         while (1) {
973                 void *tmp_page;
974
975                 offset = pfn_level_offset(pfn, level);
976                 pte = &parent[offset];
977                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
978                         break;
979                 if (level == *target_level)
980                         break;
981
982                 if (!dma_pte_present(pte)) {
983                         uint64_t pteval;
984
985                         tmp_page = alloc_pgtable_page(domain->nid);
986
987                         if (!tmp_page)
988                                 return NULL;
989
990                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992                         if (cmpxchg64(&pte->val, 0ULL, pteval))
993                                 /* Someone else set it while we were thinking; use theirs. */
994                                 free_pgtable_page(tmp_page);
995                         else
996                                 domain_flush_cache(domain, pte, sizeof(*pte));
997                 }
998                 if (level == 1)
999                         break;
1000
1001                 parent = phys_to_virt(dma_pte_addr(pte));
1002                 level--;
1003         }
1004
1005         if (!*target_level)
1006                 *target_level = level;
1007
1008         return pte;
1009 }
1010
1011
1012 /* return address's pte at specific level */
1013 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1014                                          unsigned long pfn,
1015                                          int level, int *large_page)
1016 {
1017         struct dma_pte *parent, *pte = NULL;
1018         int total = agaw_to_level(domain->agaw);
1019         int offset;
1020
1021         parent = domain->pgd;
1022         while (level <= total) {
1023                 offset = pfn_level_offset(pfn, total);
1024                 pte = &parent[offset];
1025                 if (level == total)
1026                         return pte;
1027
1028                 if (!dma_pte_present(pte)) {
1029                         *large_page = total;
1030                         break;
1031                 }
1032
1033                 if (dma_pte_superpage(pte)) {
1034                         *large_page = total;
1035                         return pte;
1036                 }
1037
1038                 parent = phys_to_virt(dma_pte_addr(pte));
1039                 total--;
1040         }
1041         return NULL;
1042 }
1043
1044 /* clear last level pte, a tlb flush should be followed */
1045 static void dma_pte_clear_range(struct dmar_domain *domain,
1046                                 unsigned long start_pfn,
1047                                 unsigned long last_pfn)
1048 {
1049         unsigned int large_page = 1;
1050         struct dma_pte *first_pte, *pte;
1051
1052         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1053         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054         BUG_ON(start_pfn > last_pfn);
1055
1056         /* we don't need lock here; nobody else touches the iova range */
1057         do {
1058                 large_page = 1;
1059                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1060                 if (!pte) {
1061                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1062                         continue;
1063                 }
1064                 do {
1065                         dma_clear_pte(pte);
1066                         start_pfn += lvl_to_nr_pages(large_page);
1067                         pte++;
1068                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1069
1070                 domain_flush_cache(domain, first_pte,
1071                                    (void *)pte - (void *)first_pte);
1072
1073         } while (start_pfn && start_pfn <= last_pfn);
1074 }
1075
1076 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1077                                struct dma_pte *pte, unsigned long pfn,
1078                                unsigned long start_pfn, unsigned long last_pfn)
1079 {
1080         pfn = max(start_pfn, pfn);
1081         pte = &pte[pfn_level_offset(pfn, level)];
1082
1083         do {
1084                 unsigned long level_pfn;
1085                 struct dma_pte *level_pte;
1086
1087                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1088                         goto next;
1089
1090                 level_pfn = pfn & level_mask(level - 1);
1091                 level_pte = phys_to_virt(dma_pte_addr(pte));
1092
1093                 if (level > 2)
1094                         dma_pte_free_level(domain, level - 1, level_pte,
1095                                            level_pfn, start_pfn, last_pfn);
1096
1097                 /* If range covers entire pagetable, free it */
1098                 if (!(start_pfn > level_pfn ||
1099                       last_pfn < level_pfn + level_size(level) - 1)) {
1100                         dma_clear_pte(pte);
1101                         domain_flush_cache(domain, pte, sizeof(*pte));
1102                         free_pgtable_page(level_pte);
1103                 }
1104 next:
1105                 pfn += level_size(level);
1106         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1107 }
1108
1109 /* free page table pages. last level pte should already be cleared */
1110 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111                                    unsigned long start_pfn,
1112                                    unsigned long last_pfn)
1113 {
1114         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1115         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116         BUG_ON(start_pfn > last_pfn);
1117
1118         dma_pte_clear_range(domain, start_pfn, last_pfn);
1119
1120         /* We don't need lock here; nobody else touches the iova range */
1121         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1122                            domain->pgd, 0, start_pfn, last_pfn);
1123
1124         /* free pgd */
1125         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126                 free_pgtable_page(domain->pgd);
1127                 domain->pgd = NULL;
1128         }
1129 }
1130
1131 /* When a page at a given level is being unlinked from its parent, we don't
1132    need to *modify* it at all. All we need to do is make a list of all the
1133    pages which can be freed just as soon as we've flushed the IOTLB and we
1134    know the hardware page-walk will no longer touch them.
1135    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1136    be freed. */
1137 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1138                                             int level, struct dma_pte *pte,
1139                                             struct page *freelist)
1140 {
1141         struct page *pg;
1142
1143         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1144         pg->freelist = freelist;
1145         freelist = pg;
1146
1147         if (level == 1)
1148                 return freelist;
1149
1150         pte = page_address(pg);
1151         do {
1152                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1153                         freelist = dma_pte_list_pagetables(domain, level - 1,
1154                                                            pte, freelist);
1155                 pte++;
1156         } while (!first_pte_in_page(pte));
1157
1158         return freelist;
1159 }
1160
1161 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1162                                         struct dma_pte *pte, unsigned long pfn,
1163                                         unsigned long start_pfn,
1164                                         unsigned long last_pfn,
1165                                         struct page *freelist)
1166 {
1167         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1168
1169         pfn = max(start_pfn, pfn);
1170         pte = &pte[pfn_level_offset(pfn, level)];
1171
1172         do {
1173                 unsigned long level_pfn;
1174
1175                 if (!dma_pte_present(pte))
1176                         goto next;
1177
1178                 level_pfn = pfn & level_mask(level);
1179
1180                 /* If range covers entire pagetable, free it */
1181                 if (start_pfn <= level_pfn &&
1182                     last_pfn >= level_pfn + level_size(level) - 1) {
1183                         /* These suborbinate page tables are going away entirely. Don't
1184                            bother to clear them; we're just going to *free* them. */
1185                         if (level > 1 && !dma_pte_superpage(pte))
1186                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1187
1188                         dma_clear_pte(pte);
1189                         if (!first_pte)
1190                                 first_pte = pte;
1191                         last_pte = pte;
1192                 } else if (level > 1) {
1193                         /* Recurse down into a level that isn't *entirely* obsolete */
1194                         freelist = dma_pte_clear_level(domain, level - 1,
1195                                                        phys_to_virt(dma_pte_addr(pte)),
1196                                                        level_pfn, start_pfn, last_pfn,
1197                                                        freelist);
1198                 }
1199 next:
1200                 pfn += level_size(level);
1201         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1202
1203         if (first_pte)
1204                 domain_flush_cache(domain, first_pte,
1205                                    (void *)++last_pte - (void *)first_pte);
1206
1207         return freelist;
1208 }
1209
1210 /* We can't just free the pages because the IOMMU may still be walking
1211    the page tables, and may have cached the intermediate levels. The
1212    pages can only be freed after the IOTLB flush has been done. */
1213 struct page *domain_unmap(struct dmar_domain *domain,
1214                           unsigned long start_pfn,
1215                           unsigned long last_pfn)
1216 {
1217         struct page *freelist = NULL;
1218
1219         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1220         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221         BUG_ON(start_pfn > last_pfn);
1222
1223         /* we don't need lock here; nobody else touches the iova range */
1224         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1225                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1226
1227         /* free pgd */
1228         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1229                 struct page *pgd_page = virt_to_page(domain->pgd);
1230                 pgd_page->freelist = freelist;
1231                 freelist = pgd_page;
1232
1233                 domain->pgd = NULL;
1234         }
1235
1236         return freelist;
1237 }
1238
1239 void dma_free_pagelist(struct page *freelist)
1240 {
1241         struct page *pg;
1242
1243         while ((pg = freelist)) {
1244                 freelist = pg->freelist;
1245                 free_pgtable_page(page_address(pg));
1246         }
1247 }
1248
1249 /* iommu handling */
1250 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1251 {
1252         struct root_entry *root;
1253         unsigned long flags;
1254
1255         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1256         if (!root) {
1257                 pr_err("Allocating root entry for %s failed\n",
1258                         iommu->name);
1259                 return -ENOMEM;
1260         }
1261
1262         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1263
1264         spin_lock_irqsave(&iommu->lock, flags);
1265         iommu->root_entry = root;
1266         spin_unlock_irqrestore(&iommu->lock, flags);
1267
1268         return 0;
1269 }
1270
1271 static void iommu_set_root_entry(struct intel_iommu *iommu)
1272 {
1273         u64 addr;
1274         u32 sts;
1275         unsigned long flag;
1276
1277         addr = virt_to_phys(iommu->root_entry);
1278         if (ecs_enabled(iommu))
1279                 addr |= DMA_RTADDR_RTT;
1280
1281         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1283
1284         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1285
1286         /* Make sure hardware complete it */
1287         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288                       readl, (sts & DMA_GSTS_RTPS), sts);
1289
1290         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1291 }
1292
1293 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1294 {
1295         u32 val;
1296         unsigned long flag;
1297
1298         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1299                 return;
1300
1301         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1303
1304         /* Make sure hardware complete it */
1305         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306                       readl, (!(val & DMA_GSTS_WBFS)), val);
1307
1308         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1309 }
1310
1311 /* return value determine if we need a write buffer flush */
1312 static void __iommu_flush_context(struct intel_iommu *iommu,
1313                                   u16 did, u16 source_id, u8 function_mask,
1314                                   u64 type)
1315 {
1316         u64 val = 0;
1317         unsigned long flag;
1318
1319         switch (type) {
1320         case DMA_CCMD_GLOBAL_INVL:
1321                 val = DMA_CCMD_GLOBAL_INVL;
1322                 break;
1323         case DMA_CCMD_DOMAIN_INVL:
1324                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1325                 break;
1326         case DMA_CCMD_DEVICE_INVL:
1327                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1328                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1329                 break;
1330         default:
1331                 BUG();
1332         }
1333         val |= DMA_CCMD_ICC;
1334
1335         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1337
1338         /* Make sure hardware complete it */
1339         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1340                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1341
1342         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1343 }
1344
1345 /* return value determine if we need a write buffer flush */
1346 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1347                                 u64 addr, unsigned int size_order, u64 type)
1348 {
1349         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1350         u64 val = 0, val_iva = 0;
1351         unsigned long flag;
1352
1353         switch (type) {
1354         case DMA_TLB_GLOBAL_FLUSH:
1355                 /* global flush doesn't need set IVA_REG */
1356                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1357                 break;
1358         case DMA_TLB_DSI_FLUSH:
1359                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1360                 break;
1361         case DMA_TLB_PSI_FLUSH:
1362                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363                 /* IH bit is passed in as part of address */
1364                 val_iva = size_order | addr;
1365                 break;
1366         default:
1367                 BUG();
1368         }
1369         /* Note: set drain read/write */
1370 #if 0
1371         /*
1372          * This is probably to be super secure.. Looks like we can
1373          * ignore it without any impact.
1374          */
1375         if (cap_read_drain(iommu->cap))
1376                 val |= DMA_TLB_READ_DRAIN;
1377 #endif
1378         if (cap_write_drain(iommu->cap))
1379                 val |= DMA_TLB_WRITE_DRAIN;
1380
1381         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382         /* Note: Only uses first TLB reg currently */
1383         if (val_iva)
1384                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1385         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1386
1387         /* Make sure hardware complete it */
1388         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1389                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1390
1391         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1392
1393         /* check IOTLB invalidation granularity */
1394         if (DMA_TLB_IAIG(val) == 0)
1395                 pr_err("Flush IOTLB failed\n");
1396         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1397                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1398                         (unsigned long long)DMA_TLB_IIRG(type),
1399                         (unsigned long long)DMA_TLB_IAIG(val));
1400 }
1401
1402 static struct device_domain_info *
1403 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1404                          u8 bus, u8 devfn)
1405 {
1406         bool found = false;
1407         struct device_domain_info *info;
1408         struct pci_dev *pdev;
1409
1410         assert_spin_locked(&device_domain_lock);
1411
1412         if (!ecap_dev_iotlb_support(iommu->ecap))
1413                 return NULL;
1414
1415         if (!iommu->qi)
1416                 return NULL;
1417
1418         list_for_each_entry(info, &domain->devices, link)
1419                 if (info->iommu == iommu && info->bus == bus &&
1420                     info->devfn == devfn) {
1421                         found = true;
1422                         break;
1423                 }
1424
1425         if (!found || !info->dev || !dev_is_pci(info->dev))
1426                 return NULL;
1427
1428         pdev = to_pci_dev(info->dev);
1429
1430         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1431                 return NULL;
1432
1433         if (!dmar_find_matched_atsr_unit(pdev))
1434                 return NULL;
1435
1436         return info;
1437 }
1438
1439 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1440 {
1441         if (!info || !dev_is_pci(info->dev))
1442                 return;
1443
1444         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1445 }
1446
1447 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1448 {
1449         if (!info->dev || !dev_is_pci(info->dev) ||
1450             !pci_ats_enabled(to_pci_dev(info->dev)))
1451                 return;
1452
1453         pci_disable_ats(to_pci_dev(info->dev));
1454 }
1455
1456 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1457                                   u64 addr, unsigned mask)
1458 {
1459         u16 sid, qdep;
1460         unsigned long flags;
1461         struct device_domain_info *info;
1462
1463         spin_lock_irqsave(&device_domain_lock, flags);
1464         list_for_each_entry(info, &domain->devices, link) {
1465                 struct pci_dev *pdev;
1466                 if (!info->dev || !dev_is_pci(info->dev))
1467                         continue;
1468
1469                 pdev = to_pci_dev(info->dev);
1470                 if (!pci_ats_enabled(pdev))
1471                         continue;
1472
1473                 sid = info->bus << 8 | info->devfn;
1474                 qdep = pci_ats_queue_depth(pdev);
1475                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1476         }
1477         spin_unlock_irqrestore(&device_domain_lock, flags);
1478 }
1479
1480 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1481                                   struct dmar_domain *domain,
1482                                   unsigned long pfn, unsigned int pages,
1483                                   int ih, int map)
1484 {
1485         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1486         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1487         u16 did = domain->iommu_did[iommu->seq_id];
1488
1489         BUG_ON(pages == 0);
1490
1491         if (ih)
1492                 ih = 1 << 6;
1493         /*
1494          * Fallback to domain selective flush if no PSI support or the size is
1495          * too big.
1496          * PSI requires page size to be 2 ^ x, and the base address is naturally
1497          * aligned to the size
1498          */
1499         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1500                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1501                                                 DMA_TLB_DSI_FLUSH);
1502         else
1503                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1504                                                 DMA_TLB_PSI_FLUSH);
1505
1506         /*
1507          * In caching mode, changes of pages from non-present to present require
1508          * flush. However, device IOTLB doesn't need to be flushed in this case.
1509          */
1510         if (!cap_caching_mode(iommu->cap) || !map)
1511                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1512                                       addr, mask);
1513 }
1514
1515 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1516 {
1517         u32 pmen;
1518         unsigned long flags;
1519
1520         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1521         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1522         pmen &= ~DMA_PMEN_EPM;
1523         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1524
1525         /* wait for the protected region status bit to clear */
1526         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1527                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1528
1529         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1530 }
1531
1532 static void iommu_enable_translation(struct intel_iommu *iommu)
1533 {
1534         u32 sts;
1535         unsigned long flags;
1536
1537         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1538         iommu->gcmd |= DMA_GCMD_TE;
1539         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1540
1541         /* Make sure hardware complete it */
1542         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1543                       readl, (sts & DMA_GSTS_TES), sts);
1544
1545         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1546 }
1547
1548 static void iommu_disable_translation(struct intel_iommu *iommu)
1549 {
1550         u32 sts;
1551         unsigned long flag;
1552
1553         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1554         iommu->gcmd &= ~DMA_GCMD_TE;
1555         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1556
1557         /* Make sure hardware complete it */
1558         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1559                       readl, (!(sts & DMA_GSTS_TES)), sts);
1560
1561         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1562 }
1563
1564
1565 static int iommu_init_domains(struct intel_iommu *iommu)
1566 {
1567         u32 ndomains, nlongs;
1568         size_t size;
1569
1570         ndomains = cap_ndoms(iommu->cap);
1571         pr_debug("%s: Number of Domains supported <%d>\n",
1572                  iommu->name, ndomains);
1573         nlongs = BITS_TO_LONGS(ndomains);
1574
1575         spin_lock_init(&iommu->lock);
1576
1577         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1578         if (!iommu->domain_ids) {
1579                 pr_err("%s: Allocating domain id array failed\n",
1580                        iommu->name);
1581                 return -ENOMEM;
1582         }
1583
1584         size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1585         iommu->domains = kzalloc(size, GFP_KERNEL);
1586
1587         if (iommu->domains) {
1588                 size = 256 * sizeof(struct dmar_domain *);
1589                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1590         }
1591
1592         if (!iommu->domains || !iommu->domains[0]) {
1593                 pr_err("%s: Allocating domain array failed\n",
1594                        iommu->name);
1595                 kfree(iommu->domain_ids);
1596                 kfree(iommu->domains);
1597                 iommu->domain_ids = NULL;
1598                 iommu->domains    = NULL;
1599                 return -ENOMEM;
1600         }
1601
1602
1603
1604         /*
1605          * If Caching mode is set, then invalid translations are tagged
1606          * with domain-id 0, hence we need to pre-allocate it. We also
1607          * use domain-id 0 as a marker for non-allocated domain-id, so
1608          * make sure it is not used for a real domain.
1609          */
1610         set_bit(0, iommu->domain_ids);
1611
1612         return 0;
1613 }
1614
1615 static void disable_dmar_iommu(struct intel_iommu *iommu)
1616 {
1617         struct device_domain_info *info, *tmp;
1618         unsigned long flags;
1619
1620         if (!iommu->domains || !iommu->domain_ids)
1621                 return;
1622
1623         spin_lock_irqsave(&device_domain_lock, flags);
1624         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1625                 struct dmar_domain *domain;
1626
1627                 if (info->iommu != iommu)
1628                         continue;
1629
1630                 if (!info->dev || !info->domain)
1631                         continue;
1632
1633                 domain = info->domain;
1634
1635                 dmar_remove_one_dev_info(domain, info->dev);
1636
1637                 if (!domain_type_is_vm_or_si(domain))
1638                         domain_exit(domain);
1639         }
1640         spin_unlock_irqrestore(&device_domain_lock, flags);
1641
1642         if (iommu->gcmd & DMA_GCMD_TE)
1643                 iommu_disable_translation(iommu);
1644 }
1645
1646 static void free_dmar_iommu(struct intel_iommu *iommu)
1647 {
1648         if ((iommu->domains) && (iommu->domain_ids)) {
1649                 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1650                 int i;
1651
1652                 for (i = 0; i < elems; i++)
1653                         kfree(iommu->domains[i]);
1654                 kfree(iommu->domains);
1655                 kfree(iommu->domain_ids);
1656                 iommu->domains = NULL;
1657                 iommu->domain_ids = NULL;
1658         }
1659
1660         g_iommus[iommu->seq_id] = NULL;
1661
1662         /* free context mapping */
1663         free_context_table(iommu);
1664 }
1665
1666 static struct dmar_domain *alloc_domain(int flags)
1667 {
1668         struct dmar_domain *domain;
1669
1670         domain = alloc_domain_mem();
1671         if (!domain)
1672                 return NULL;
1673
1674         memset(domain, 0, sizeof(*domain));
1675         domain->nid = -1;
1676         domain->flags = flags;
1677         INIT_LIST_HEAD(&domain->devices);
1678
1679         return domain;
1680 }
1681
1682 /* Must be called with iommu->lock */
1683 static int domain_attach_iommu(struct dmar_domain *domain,
1684                                struct intel_iommu *iommu)
1685 {
1686         unsigned long ndomains;
1687         int num;
1688
1689         assert_spin_locked(&device_domain_lock);
1690         assert_spin_locked(&iommu->lock);
1691
1692         domain->iommu_refcnt[iommu->seq_id] += 1;
1693         domain->iommu_count += 1;
1694         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1695                 ndomains = cap_ndoms(iommu->cap);
1696                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1697
1698                 if (num >= ndomains) {
1699                         pr_err("%s: No free domain ids\n", iommu->name);
1700                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1701                         domain->iommu_count -= 1;
1702                         return -ENOSPC;
1703                 }
1704
1705                 set_bit(num, iommu->domain_ids);
1706                 set_iommu_domain(iommu, num, domain);
1707
1708                 domain->iommu_did[iommu->seq_id] = num;
1709                 domain->nid                      = iommu->node;
1710
1711                 domain_update_iommu_cap(domain);
1712         }
1713
1714         return 0;
1715 }
1716
1717 static int domain_detach_iommu(struct dmar_domain *domain,
1718                                struct intel_iommu *iommu)
1719 {
1720         int num, count = INT_MAX;
1721
1722         assert_spin_locked(&device_domain_lock);
1723         assert_spin_locked(&iommu->lock);
1724
1725         domain->iommu_refcnt[iommu->seq_id] -= 1;
1726         count = --domain->iommu_count;
1727         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1728                 num = domain->iommu_did[iommu->seq_id];
1729                 clear_bit(num, iommu->domain_ids);
1730                 set_iommu_domain(iommu, num, NULL);
1731
1732                 domain_update_iommu_cap(domain);
1733                 domain->iommu_did[iommu->seq_id] = 0;
1734         }
1735
1736         return count;
1737 }
1738
1739 static struct iova_domain reserved_iova_list;
1740 static struct lock_class_key reserved_rbtree_key;
1741
1742 static int dmar_init_reserved_ranges(void)
1743 {
1744         struct pci_dev *pdev = NULL;
1745         struct iova *iova;
1746         int i;
1747
1748         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1749                         DMA_32BIT_PFN);
1750
1751         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1752                 &reserved_rbtree_key);
1753
1754         /* IOAPIC ranges shouldn't be accessed by DMA */
1755         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1756                 IOVA_PFN(IOAPIC_RANGE_END));
1757         if (!iova) {
1758                 pr_err("Reserve IOAPIC range failed\n");
1759                 return -ENODEV;
1760         }
1761
1762         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1763         for_each_pci_dev(pdev) {
1764                 struct resource *r;
1765
1766                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1767                         r = &pdev->resource[i];
1768                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1769                                 continue;
1770                         iova = reserve_iova(&reserved_iova_list,
1771                                             IOVA_PFN(r->start),
1772                                             IOVA_PFN(r->end));
1773                         if (!iova) {
1774                                 pr_err("Reserve iova failed\n");
1775                                 return -ENODEV;
1776                         }
1777                 }
1778         }
1779         return 0;
1780 }
1781
1782 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1783 {
1784         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1785 }
1786
1787 static inline int guestwidth_to_adjustwidth(int gaw)
1788 {
1789         int agaw;
1790         int r = (gaw - 12) % 9;
1791
1792         if (r == 0)
1793                 agaw = gaw;
1794         else
1795                 agaw = gaw + 9 - r;
1796         if (agaw > 64)
1797                 agaw = 64;
1798         return agaw;
1799 }
1800
1801 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1802                        int guest_width)
1803 {
1804         int adjust_width, agaw;
1805         unsigned long sagaw;
1806
1807         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1808                         DMA_32BIT_PFN);
1809         domain_reserve_special_ranges(domain);
1810
1811         /* calculate AGAW */
1812         if (guest_width > cap_mgaw(iommu->cap))
1813                 guest_width = cap_mgaw(iommu->cap);
1814         domain->gaw = guest_width;
1815         adjust_width = guestwidth_to_adjustwidth(guest_width);
1816         agaw = width_to_agaw(adjust_width);
1817         sagaw = cap_sagaw(iommu->cap);
1818         if (!test_bit(agaw, &sagaw)) {
1819                 /* hardware doesn't support it, choose a bigger one */
1820                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1821                 agaw = find_next_bit(&sagaw, 5, agaw);
1822                 if (agaw >= 5)
1823                         return -ENODEV;
1824         }
1825         domain->agaw = agaw;
1826
1827         if (ecap_coherent(iommu->ecap))
1828                 domain->iommu_coherency = 1;
1829         else
1830                 domain->iommu_coherency = 0;
1831
1832         if (ecap_sc_support(iommu->ecap))
1833                 domain->iommu_snooping = 1;
1834         else
1835                 domain->iommu_snooping = 0;
1836
1837         if (intel_iommu_superpage)
1838                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1839         else
1840                 domain->iommu_superpage = 0;
1841
1842         domain->nid = iommu->node;
1843
1844         /* always allocate the top pgd */
1845         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1846         if (!domain->pgd)
1847                 return -ENOMEM;
1848         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1849         return 0;
1850 }
1851
1852 static void domain_exit(struct dmar_domain *domain)
1853 {
1854         struct page *freelist = NULL;
1855
1856         /* Domain 0 is reserved, so dont process it */
1857         if (!domain)
1858                 return;
1859
1860         /* Flush any lazy unmaps that may reference this domain */
1861         if (!intel_iommu_strict)
1862                 flush_unmaps_timeout(0);
1863
1864         /* Remove associated devices and clear attached or cached domains */
1865         rcu_read_lock();
1866         domain_remove_dev_info(domain);
1867         rcu_read_unlock();
1868
1869         /* destroy iovas */
1870         put_iova_domain(&domain->iovad);
1871
1872         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1873
1874         dma_free_pagelist(freelist);
1875
1876         free_domain_mem(domain);
1877 }
1878
1879 static int domain_context_mapping_one(struct dmar_domain *domain,
1880                                       struct intel_iommu *iommu,
1881                                       u8 bus, u8 devfn)
1882 {
1883         u16 did = domain->iommu_did[iommu->seq_id];
1884         int translation = CONTEXT_TT_MULTI_LEVEL;
1885         struct device_domain_info *info = NULL;
1886         struct context_entry *context;
1887         unsigned long flags;
1888         struct dma_pte *pgd;
1889         int ret, agaw;
1890
1891         WARN_ON(did == 0);
1892
1893         if (hw_pass_through && domain_type_is_si(domain))
1894                 translation = CONTEXT_TT_PASS_THROUGH;
1895
1896         pr_debug("Set context mapping for %02x:%02x.%d\n",
1897                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1898
1899         BUG_ON(!domain->pgd);
1900
1901         spin_lock_irqsave(&device_domain_lock, flags);
1902         spin_lock(&iommu->lock);
1903
1904         ret = -ENOMEM;
1905         context = iommu_context_addr(iommu, bus, devfn, 1);
1906         if (!context)
1907                 goto out_unlock;
1908
1909         ret = 0;
1910         if (context_present(context))
1911                 goto out_unlock;
1912
1913         pgd = domain->pgd;
1914
1915         context_clear_entry(context);
1916         context_set_domain_id(context, did);
1917
1918         /*
1919          * Skip top levels of page tables for iommu which has less agaw
1920          * than default.  Unnecessary for PT mode.
1921          */
1922         if (translation != CONTEXT_TT_PASS_THROUGH) {
1923                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1924                         ret = -ENOMEM;
1925                         pgd = phys_to_virt(dma_pte_addr(pgd));
1926                         if (!dma_pte_present(pgd))
1927                                 goto out_unlock;
1928                 }
1929
1930                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1931                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1932                                      CONTEXT_TT_MULTI_LEVEL;
1933
1934                 context_set_address_root(context, virt_to_phys(pgd));
1935                 context_set_address_width(context, iommu->agaw);
1936         } else {
1937                 /*
1938                  * In pass through mode, AW must be programmed to
1939                  * indicate the largest AGAW value supported by
1940                  * hardware. And ASR is ignored by hardware.
1941                  */
1942                 context_set_address_width(context, iommu->msagaw);
1943         }
1944
1945         context_set_translation_type(context, translation);
1946         context_set_fault_enable(context);
1947         context_set_present(context);
1948         domain_flush_cache(domain, context, sizeof(*context));
1949
1950         /*
1951          * It's a non-present to present mapping. If hardware doesn't cache
1952          * non-present entry we only need to flush the write-buffer. If the
1953          * _does_ cache non-present entries, then it does so in the special
1954          * domain #0, which we have to flush:
1955          */
1956         if (cap_caching_mode(iommu->cap)) {
1957                 iommu->flush.flush_context(iommu, 0,
1958                                            (((u16)bus) << 8) | devfn,
1959                                            DMA_CCMD_MASK_NOBIT,
1960                                            DMA_CCMD_DEVICE_INVL);
1961                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1962         } else {
1963                 iommu_flush_write_buffer(iommu);
1964         }
1965         iommu_enable_dev_iotlb(info);
1966
1967         ret = 0;
1968
1969 out_unlock:
1970         spin_unlock(&iommu->lock);
1971         spin_unlock_irqrestore(&device_domain_lock, flags);
1972
1973         return 0;
1974 }
1975
1976 struct domain_context_mapping_data {
1977         struct dmar_domain *domain;
1978         struct intel_iommu *iommu;
1979 };
1980
1981 static int domain_context_mapping_cb(struct pci_dev *pdev,
1982                                      u16 alias, void *opaque)
1983 {
1984         struct domain_context_mapping_data *data = opaque;
1985
1986         return domain_context_mapping_one(data->domain, data->iommu,
1987                                           PCI_BUS_NUM(alias), alias & 0xff);
1988 }
1989
1990 static int
1991 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
1992 {
1993         struct intel_iommu *iommu;
1994         u8 bus, devfn;
1995         struct domain_context_mapping_data data;
1996
1997         iommu = device_to_iommu(dev, &bus, &devfn);
1998         if (!iommu)
1999                 return -ENODEV;
2000
2001         if (!dev_is_pci(dev))
2002                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2003
2004         data.domain = domain;
2005         data.iommu = iommu;
2006
2007         return pci_for_each_dma_alias(to_pci_dev(dev),
2008                                       &domain_context_mapping_cb, &data);
2009 }
2010
2011 static int domain_context_mapped_cb(struct pci_dev *pdev,
2012                                     u16 alias, void *opaque)
2013 {
2014         struct intel_iommu *iommu = opaque;
2015
2016         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2017 }
2018
2019 static int domain_context_mapped(struct device *dev)
2020 {
2021         struct intel_iommu *iommu;
2022         u8 bus, devfn;
2023
2024         iommu = device_to_iommu(dev, &bus, &devfn);
2025         if (!iommu)
2026                 return -ENODEV;
2027
2028         if (!dev_is_pci(dev))
2029                 return device_context_mapped(iommu, bus, devfn);
2030
2031         return !pci_for_each_dma_alias(to_pci_dev(dev),
2032                                        domain_context_mapped_cb, iommu);
2033 }
2034
2035 /* Returns a number of VTD pages, but aligned to MM page size */
2036 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2037                                             size_t size)
2038 {
2039         host_addr &= ~PAGE_MASK;
2040         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2041 }
2042
2043 /* Return largest possible superpage level for a given mapping */
2044 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2045                                           unsigned long iov_pfn,
2046                                           unsigned long phy_pfn,
2047                                           unsigned long pages)
2048 {
2049         int support, level = 1;
2050         unsigned long pfnmerge;
2051
2052         support = domain->iommu_superpage;
2053
2054         /* To use a large page, the virtual *and* physical addresses
2055            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2056            of them will mean we have to use smaller pages. So just
2057            merge them and check both at once. */
2058         pfnmerge = iov_pfn | phy_pfn;
2059
2060         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2061                 pages >>= VTD_STRIDE_SHIFT;
2062                 if (!pages)
2063                         break;
2064                 pfnmerge >>= VTD_STRIDE_SHIFT;
2065                 level++;
2066                 support--;
2067         }
2068         return level;
2069 }
2070
2071 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2072                             struct scatterlist *sg, unsigned long phys_pfn,
2073                             unsigned long nr_pages, int prot)
2074 {
2075         struct dma_pte *first_pte = NULL, *pte = NULL;
2076         phys_addr_t uninitialized_var(pteval);
2077         unsigned long sg_res = 0;
2078         unsigned int largepage_lvl = 0;
2079         unsigned long lvl_pages = 0;
2080
2081         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2082
2083         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2084                 return -EINVAL;
2085
2086         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2087
2088         if (!sg) {
2089                 sg_res = nr_pages;
2090                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2091         }
2092
2093         while (nr_pages > 0) {
2094                 uint64_t tmp;
2095
2096                 if (!sg_res) {
2097                         sg_res = aligned_nrpages(sg->offset, sg->length);
2098                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2099                         sg->dma_length = sg->length;
2100                         pteval = page_to_phys(sg_page(sg)) | prot;
2101                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2102                 }
2103
2104                 if (!pte) {
2105                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2106
2107                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2108                         if (!pte)
2109                                 return -ENOMEM;
2110                         /* It is large page*/
2111                         if (largepage_lvl > 1) {
2112                                 pteval |= DMA_PTE_LARGE_PAGE;
2113                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2114                                 /*
2115                                  * Ensure that old small page tables are
2116                                  * removed to make room for superpage,
2117                                  * if they exist.
2118                                  */
2119                                 dma_pte_free_pagetable(domain, iov_pfn,
2120                                                        iov_pfn + lvl_pages - 1);
2121                         } else {
2122                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2123                         }
2124
2125                 }
2126                 /* We don't need lock here, nobody else
2127                  * touches the iova range
2128                  */
2129                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2130                 if (tmp) {
2131                         static int dumps = 5;
2132                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2133                                 iov_pfn, tmp, (unsigned long long)pteval);
2134                         if (dumps) {
2135                                 dumps--;
2136                                 debug_dma_dump_mappings(NULL);
2137                         }
2138                         WARN_ON(1);
2139                 }
2140
2141                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2142
2143                 BUG_ON(nr_pages < lvl_pages);
2144                 BUG_ON(sg_res < lvl_pages);
2145
2146                 nr_pages -= lvl_pages;
2147                 iov_pfn += lvl_pages;
2148                 phys_pfn += lvl_pages;
2149                 pteval += lvl_pages * VTD_PAGE_SIZE;
2150                 sg_res -= lvl_pages;
2151
2152                 /* If the next PTE would be the first in a new page, then we
2153                    need to flush the cache on the entries we've just written.
2154                    And then we'll need to recalculate 'pte', so clear it and
2155                    let it get set again in the if (!pte) block above.
2156
2157                    If we're done (!nr_pages) we need to flush the cache too.
2158
2159                    Also if we've been setting superpages, we may need to
2160                    recalculate 'pte' and switch back to smaller pages for the
2161                    end of the mapping, if the trailing size is not enough to
2162                    use another superpage (i.e. sg_res < lvl_pages). */
2163                 pte++;
2164                 if (!nr_pages || first_pte_in_page(pte) ||
2165                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2166                         domain_flush_cache(domain, first_pte,
2167                                            (void *)pte - (void *)first_pte);
2168                         pte = NULL;
2169                 }
2170
2171                 if (!sg_res && nr_pages)
2172                         sg = sg_next(sg);
2173         }
2174         return 0;
2175 }
2176
2177 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2178                                     struct scatterlist *sg, unsigned long nr_pages,
2179                                     int prot)
2180 {
2181         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2182 }
2183
2184 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2185                                      unsigned long phys_pfn, unsigned long nr_pages,
2186                                      int prot)
2187 {
2188         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2189 }
2190
2191 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2192 {
2193         if (!iommu)
2194                 return;
2195
2196         clear_context_table(iommu, bus, devfn);
2197         iommu->flush.flush_context(iommu, 0, 0, 0,
2198                                            DMA_CCMD_GLOBAL_INVL);
2199         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2200 }
2201
2202 static inline void unlink_domain_info(struct device_domain_info *info)
2203 {
2204         assert_spin_locked(&device_domain_lock);
2205         list_del(&info->link);
2206         list_del(&info->global);
2207         if (info->dev)
2208                 info->dev->archdata.iommu = NULL;
2209 }
2210
2211 static void domain_remove_dev_info(struct dmar_domain *domain)
2212 {
2213         struct device_domain_info *info, *tmp;
2214         unsigned long flags;
2215
2216         spin_lock_irqsave(&device_domain_lock, flags);
2217         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2218                 __dmar_remove_one_dev_info(info);
2219         spin_unlock_irqrestore(&device_domain_lock, flags);
2220 }
2221
2222 /*
2223  * find_domain
2224  * Note: we use struct device->archdata.iommu stores the info
2225  */
2226 static struct dmar_domain *find_domain(struct device *dev)
2227 {
2228         struct device_domain_info *info;
2229
2230         /* No lock here, assumes no domain exit in normal case */
2231         info = dev->archdata.iommu;
2232         if (info)
2233                 return info->domain;
2234         return NULL;
2235 }
2236
2237 static inline struct device_domain_info *
2238 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2239 {
2240         struct device_domain_info *info;
2241
2242         list_for_each_entry(info, &device_domain_list, global)
2243                 if (info->iommu->segment == segment && info->bus == bus &&
2244                     info->devfn == devfn)
2245                         return info;
2246
2247         return NULL;
2248 }
2249
2250 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2251                                                     int bus, int devfn,
2252                                                     struct device *dev,
2253                                                     struct dmar_domain *domain)
2254 {
2255         struct dmar_domain *found = NULL;
2256         struct device_domain_info *info;
2257         unsigned long flags;
2258         int ret;
2259
2260         info = alloc_devinfo_mem();
2261         if (!info)
2262                 return NULL;
2263
2264         info->bus = bus;
2265         info->devfn = devfn;
2266         info->dev = dev;
2267         info->domain = domain;
2268         info->iommu = iommu;
2269
2270         spin_lock_irqsave(&device_domain_lock, flags);
2271         if (dev)
2272                 found = find_domain(dev);
2273         else {
2274                 struct device_domain_info *info2;
2275                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2276                 if (info2)
2277                         found = info2->domain;
2278         }
2279         if (found) {
2280                 spin_unlock_irqrestore(&device_domain_lock, flags);
2281                 free_devinfo_mem(info);
2282                 /* Caller must free the original domain */
2283                 return found;
2284         }
2285
2286         spin_lock(&iommu->lock);
2287         ret = domain_attach_iommu(domain, iommu);
2288         spin_unlock(&iommu->lock);
2289
2290         if (ret) {
2291                 spin_unlock_irqrestore(&device_domain_lock, flags);
2292                 return NULL;
2293         }
2294
2295         list_add(&info->link, &domain->devices);
2296         list_add(&info->global, &device_domain_list);
2297         if (dev)
2298                 dev->archdata.iommu = info;
2299         spin_unlock_irqrestore(&device_domain_lock, flags);
2300
2301         if (dev && domain_context_mapping(domain, dev)) {
2302                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2303                 dmar_remove_one_dev_info(domain, dev);
2304                 return NULL;
2305         }
2306
2307         return domain;
2308 }
2309
2310 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2311 {
2312         *(u16 *)opaque = alias;
2313         return 0;
2314 }
2315
2316 /* domain is initialized */
2317 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2318 {
2319         struct device_domain_info *info = NULL;
2320         struct dmar_domain *domain, *tmp;
2321         struct intel_iommu *iommu;
2322         u16 req_id, dma_alias;
2323         unsigned long flags;
2324         u8 bus, devfn;
2325
2326         domain = find_domain(dev);
2327         if (domain)
2328                 return domain;
2329
2330         iommu = device_to_iommu(dev, &bus, &devfn);
2331         if (!iommu)
2332                 return NULL;
2333
2334         req_id = ((u16)bus << 8) | devfn;
2335
2336         if (dev_is_pci(dev)) {
2337                 struct pci_dev *pdev = to_pci_dev(dev);
2338
2339                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2340
2341                 spin_lock_irqsave(&device_domain_lock, flags);
2342                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2343                                                       PCI_BUS_NUM(dma_alias),
2344                                                       dma_alias & 0xff);
2345                 if (info) {
2346                         iommu = info->iommu;
2347                         domain = info->domain;
2348                 }
2349                 spin_unlock_irqrestore(&device_domain_lock, flags);
2350
2351                 /* DMA alias already has a domain, uses it */
2352                 if (info)
2353                         goto found_domain;
2354         }
2355
2356         /* Allocate and initialize new domain for the device */
2357         domain = alloc_domain(0);
2358         if (!domain)
2359                 return NULL;
2360         if (domain_init(domain, iommu, gaw)) {
2361                 domain_exit(domain);
2362                 return NULL;
2363         }
2364
2365         /* register PCI DMA alias device */
2366         if (req_id != dma_alias && dev_is_pci(dev)) {
2367                 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2368                                                dma_alias & 0xff, NULL, domain);
2369
2370                 if (!tmp || tmp != domain) {
2371                         domain_exit(domain);
2372                         domain = tmp;
2373                 }
2374
2375                 if (!domain)
2376                         return NULL;
2377         }
2378
2379 found_domain:
2380         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2381
2382         if (!tmp || tmp != domain) {
2383                 domain_exit(domain);
2384                 domain = tmp;
2385         }
2386
2387         return domain;
2388 }
2389
2390 static int iommu_identity_mapping;
2391 #define IDENTMAP_ALL            1
2392 #define IDENTMAP_GFX            2
2393 #define IDENTMAP_AZALIA         4
2394
2395 static int iommu_domain_identity_map(struct dmar_domain *domain,
2396                                      unsigned long long start,
2397                                      unsigned long long end)
2398 {
2399         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2400         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2401
2402         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2403                           dma_to_mm_pfn(last_vpfn))) {
2404                 pr_err("Reserving iova failed\n");
2405                 return -ENOMEM;
2406         }
2407
2408         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2409         /*
2410          * RMRR range might have overlap with physical memory range,
2411          * clear it first
2412          */
2413         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2414
2415         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2416                                   last_vpfn - first_vpfn + 1,
2417                                   DMA_PTE_READ|DMA_PTE_WRITE);
2418 }
2419
2420 static int iommu_prepare_identity_map(struct device *dev,
2421                                       unsigned long long start,
2422                                       unsigned long long end)
2423 {
2424         struct dmar_domain *domain;
2425         int ret;
2426
2427         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2428         if (!domain)
2429                 return -ENOMEM;
2430
2431         /* For _hardware_ passthrough, don't bother. But for software
2432            passthrough, we do it anyway -- it may indicate a memory
2433            range which is reserved in E820, so which didn't get set
2434            up to start with in si_domain */
2435         if (domain == si_domain && hw_pass_through) {
2436                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2437                         dev_name(dev), start, end);
2438                 return 0;
2439         }
2440
2441         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2442                 dev_name(dev), start, end);
2443
2444         if (end < start) {
2445                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2446                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2447                         dmi_get_system_info(DMI_BIOS_VENDOR),
2448                         dmi_get_system_info(DMI_BIOS_VERSION),
2449                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2450                 ret = -EIO;
2451                 goto error;
2452         }
2453
2454         if (end >> agaw_to_width(domain->agaw)) {
2455                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2456                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2457                      agaw_to_width(domain->agaw),
2458                      dmi_get_system_info(DMI_BIOS_VENDOR),
2459                      dmi_get_system_info(DMI_BIOS_VERSION),
2460                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2461                 ret = -EIO;
2462                 goto error;
2463         }
2464
2465         ret = iommu_domain_identity_map(domain, start, end);
2466         if (ret)
2467                 goto error;
2468
2469         return 0;
2470
2471  error:
2472         domain_exit(domain);
2473         return ret;
2474 }
2475
2476 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2477                                          struct device *dev)
2478 {
2479         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2480                 return 0;
2481         return iommu_prepare_identity_map(dev, rmrr->base_address,
2482                                           rmrr->end_address);
2483 }
2484
2485 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2486 static inline void iommu_prepare_isa(void)
2487 {
2488         struct pci_dev *pdev;
2489         int ret;
2490
2491         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2492         if (!pdev)
2493                 return;
2494
2495         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2496         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2497
2498         if (ret)
2499                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2500
2501         pci_dev_put(pdev);
2502 }
2503 #else
2504 static inline void iommu_prepare_isa(void)
2505 {
2506         return;
2507 }
2508 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2509
2510 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2511
2512 static int __init si_domain_init(int hw)
2513 {
2514         int nid, ret = 0;
2515
2516         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2517         if (!si_domain)
2518                 return -EFAULT;
2519
2520         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2521                 domain_exit(si_domain);
2522                 return -EFAULT;
2523         }
2524
2525         pr_debug("Identity mapping domain allocated\n");
2526
2527         if (hw)
2528                 return 0;
2529
2530         for_each_online_node(nid) {
2531                 unsigned long start_pfn, end_pfn;
2532                 int i;
2533
2534                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2535                         ret = iommu_domain_identity_map(si_domain,
2536                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2537                         if (ret)
2538                                 return ret;
2539                 }
2540         }
2541
2542         return 0;
2543 }
2544
2545 static int identity_mapping(struct device *dev)
2546 {
2547         struct device_domain_info *info;
2548
2549         if (likely(!iommu_identity_mapping))
2550                 return 0;
2551
2552         info = dev->archdata.iommu;
2553         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2554                 return (info->domain == si_domain);
2555
2556         return 0;
2557 }
2558
2559 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2560 {
2561         struct dmar_domain *ndomain;
2562         struct intel_iommu *iommu;
2563         u8 bus, devfn;
2564
2565         iommu = device_to_iommu(dev, &bus, &devfn);
2566         if (!iommu)
2567                 return -ENODEV;
2568
2569         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2570         if (ndomain != domain)
2571                 return -EBUSY;
2572
2573         return 0;
2574 }
2575
2576 static bool device_has_rmrr(struct device *dev)
2577 {
2578         struct dmar_rmrr_unit *rmrr;
2579         struct device *tmp;
2580         int i;
2581
2582         rcu_read_lock();
2583         for_each_rmrr_units(rmrr) {
2584                 /*
2585                  * Return TRUE if this RMRR contains the device that
2586                  * is passed in.
2587                  */
2588                 for_each_active_dev_scope(rmrr->devices,
2589                                           rmrr->devices_cnt, i, tmp)
2590                         if (tmp == dev) {
2591                                 rcu_read_unlock();
2592                                 return true;
2593                         }
2594         }
2595         rcu_read_unlock();
2596         return false;
2597 }
2598
2599 /*
2600  * There are a couple cases where we need to restrict the functionality of
2601  * devices associated with RMRRs.  The first is when evaluating a device for
2602  * identity mapping because problems exist when devices are moved in and out
2603  * of domains and their respective RMRR information is lost.  This means that
2604  * a device with associated RMRRs will never be in a "passthrough" domain.
2605  * The second is use of the device through the IOMMU API.  This interface
2606  * expects to have full control of the IOVA space for the device.  We cannot
2607  * satisfy both the requirement that RMRR access is maintained and have an
2608  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2609  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2610  * We therefore prevent devices associated with an RMRR from participating in
2611  * the IOMMU API, which eliminates them from device assignment.
2612  *
2613  * In both cases we assume that PCI USB devices with RMRRs have them largely
2614  * for historical reasons and that the RMRR space is not actively used post
2615  * boot.  This exclusion may change if vendors begin to abuse it.
2616  *
2617  * The same exception is made for graphics devices, with the requirement that
2618  * any use of the RMRR regions will be torn down before assigning the device
2619  * to a guest.
2620  */
2621 static bool device_is_rmrr_locked(struct device *dev)
2622 {
2623         if (!device_has_rmrr(dev))
2624                 return false;
2625
2626         if (dev_is_pci(dev)) {
2627                 struct pci_dev *pdev = to_pci_dev(dev);
2628
2629                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2630                         return false;
2631         }
2632
2633         return true;
2634 }
2635
2636 static int iommu_should_identity_map(struct device *dev, int startup)
2637 {
2638
2639         if (dev_is_pci(dev)) {
2640                 struct pci_dev *pdev = to_pci_dev(dev);
2641
2642                 if (device_is_rmrr_locked(dev))
2643                         return 0;
2644
2645                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2646                         return 1;
2647
2648                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2649                         return 1;
2650
2651                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2652                         return 0;
2653
2654                 /*
2655                  * We want to start off with all devices in the 1:1 domain, and
2656                  * take them out later if we find they can't access all of memory.
2657                  *
2658                  * However, we can't do this for PCI devices behind bridges,
2659                  * because all PCI devices behind the same bridge will end up
2660                  * with the same source-id on their transactions.
2661                  *
2662                  * Practically speaking, we can't change things around for these
2663                  * devices at run-time, because we can't be sure there'll be no
2664                  * DMA transactions in flight for any of their siblings.
2665                  *
2666                  * So PCI devices (unless they're on the root bus) as well as
2667                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2668                  * the 1:1 domain, just in _case_ one of their siblings turns out
2669                  * not to be able to map all of memory.
2670                  */
2671                 if (!pci_is_pcie(pdev)) {
2672                         if (!pci_is_root_bus(pdev->bus))
2673                                 return 0;
2674                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2675                                 return 0;
2676                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2677                         return 0;
2678         } else {
2679                 if (device_has_rmrr(dev))
2680                         return 0;
2681         }
2682
2683         /*
2684          * At boot time, we don't yet know if devices will be 64-bit capable.
2685          * Assume that they will — if they turn out not to be, then we can
2686          * take them out of the 1:1 domain later.
2687          */
2688         if (!startup) {
2689                 /*
2690                  * If the device's dma_mask is less than the system's memory
2691                  * size then this is not a candidate for identity mapping.
2692                  */
2693                 u64 dma_mask = *dev->dma_mask;
2694
2695                 if (dev->coherent_dma_mask &&
2696                     dev->coherent_dma_mask < dma_mask)
2697                         dma_mask = dev->coherent_dma_mask;
2698
2699                 return dma_mask >= dma_get_required_mask(dev);
2700         }
2701
2702         return 1;
2703 }
2704
2705 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2706 {
2707         int ret;
2708
2709         if (!iommu_should_identity_map(dev, 1))
2710                 return 0;
2711
2712         ret = domain_add_dev_info(si_domain, dev);
2713         if (!ret)
2714                 pr_info("%s identity mapping for device %s\n",
2715                         hw ? "Hardware" : "Software", dev_name(dev));
2716         else if (ret == -ENODEV)
2717                 /* device not associated with an iommu */
2718                 ret = 0;
2719
2720         return ret;
2721 }
2722
2723
2724 static int __init iommu_prepare_static_identity_mapping(int hw)
2725 {
2726         struct pci_dev *pdev = NULL;
2727         struct dmar_drhd_unit *drhd;
2728         struct intel_iommu *iommu;
2729         struct device *dev;
2730         int i;
2731         int ret = 0;
2732
2733         for_each_pci_dev(pdev) {
2734                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2735                 if (ret)
2736                         return ret;
2737         }
2738
2739         for_each_active_iommu(iommu, drhd)
2740                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2741                         struct acpi_device_physical_node *pn;
2742                         struct acpi_device *adev;
2743
2744                         if (dev->bus != &acpi_bus_type)
2745                                 continue;
2746
2747                         adev= to_acpi_device(dev);
2748                         mutex_lock(&adev->physical_node_lock);
2749                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2750                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2751                                 if (ret)
2752                                         break;
2753                         }
2754                         mutex_unlock(&adev->physical_node_lock);
2755                         if (ret)
2756                                 return ret;
2757                 }
2758
2759         return 0;
2760 }
2761
2762 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2763 {
2764         /*
2765          * Start from the sane iommu hardware state.
2766          * If the queued invalidation is already initialized by us
2767          * (for example, while enabling interrupt-remapping) then
2768          * we got the things already rolling from a sane state.
2769          */
2770         if (!iommu->qi) {
2771                 /*
2772                  * Clear any previous faults.
2773                  */
2774                 dmar_fault(-1, iommu);
2775                 /*
2776                  * Disable queued invalidation if supported and already enabled
2777                  * before OS handover.
2778                  */
2779                 dmar_disable_qi(iommu);
2780         }
2781
2782         if (dmar_enable_qi(iommu)) {
2783                 /*
2784                  * Queued Invalidate not enabled, use Register Based Invalidate
2785                  */
2786                 iommu->flush.flush_context = __iommu_flush_context;
2787                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2788                 pr_info("%s: Using Register based invalidation\n",
2789                         iommu->name);
2790         } else {
2791                 iommu->flush.flush_context = qi_flush_context;
2792                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2793                 pr_info("%s: Using Queued invalidation\n", iommu->name);
2794         }
2795 }
2796
2797 static int copy_context_table(struct intel_iommu *iommu,
2798                               struct root_entry *old_re,
2799                               struct context_entry **tbl,
2800                               int bus, bool ext)
2801 {
2802         struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2803         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2804         phys_addr_t old_ce_phys;
2805
2806         tbl_idx = ext ? bus * 2 : bus;
2807
2808         for (devfn = 0; devfn < 256; devfn++) {
2809                 /* First calculate the correct index */
2810                 idx = (ext ? devfn * 2 : devfn) % 256;
2811
2812                 if (idx == 0) {
2813                         /* First save what we may have and clean up */
2814                         if (new_ce) {
2815                                 tbl[tbl_idx] = new_ce;
2816                                 __iommu_flush_cache(iommu, new_ce,
2817                                                     VTD_PAGE_SIZE);
2818                                 pos = 1;
2819                         }
2820
2821                         if (old_ce)
2822                                 iounmap(old_ce);
2823
2824                         ret = 0;
2825                         if (devfn < 0x80)
2826                                 old_ce_phys = root_entry_lctp(old_re);
2827                         else
2828                                 old_ce_phys = root_entry_uctp(old_re);
2829
2830                         if (!old_ce_phys) {
2831                                 if (ext && devfn == 0) {
2832                                         /* No LCTP, try UCTP */
2833                                         devfn = 0x7f;
2834                                         continue;
2835                                 } else {
2836                                         goto out;
2837                                 }
2838                         }
2839
2840                         ret = -ENOMEM;
2841                         old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2842                         if (!old_ce)
2843                                 goto out;
2844
2845                         new_ce = alloc_pgtable_page(iommu->node);
2846                         if (!new_ce)
2847                                 goto out_unmap;
2848
2849                         ret = 0;
2850                 }
2851
2852                 /* Now copy the context entry */
2853                 ce = old_ce[idx];
2854
2855                 if (!__context_present(&ce))
2856                         continue;
2857
2858                 did = context_domain_id(&ce);
2859                 if (did >= 0 && did < cap_ndoms(iommu->cap))
2860                         set_bit(did, iommu->domain_ids);
2861
2862                 /*
2863                  * We need a marker for copied context entries. This
2864                  * marker needs to work for the old format as well as
2865                  * for extended context entries.
2866                  *
2867                  * Bit 67 of the context entry is used. In the old
2868                  * format this bit is available to software, in the
2869                  * extended format it is the PGE bit, but PGE is ignored
2870                  * by HW if PASIDs are disabled (and thus still
2871                  * available).
2872                  *
2873                  * So disable PASIDs first and then mark the entry
2874                  * copied. This means that we don't copy PASID
2875                  * translations from the old kernel, but this is fine as
2876                  * faults there are not fatal.
2877                  */
2878                 context_clear_pasid_enable(&ce);
2879                 context_set_copied(&ce);
2880
2881                 new_ce[idx] = ce;
2882         }
2883
2884         tbl[tbl_idx + pos] = new_ce;
2885
2886         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2887
2888 out_unmap:
2889         iounmap(old_ce);
2890
2891 out:
2892         return ret;
2893 }
2894
2895 static int copy_translation_tables(struct intel_iommu *iommu)
2896 {
2897         struct context_entry **ctxt_tbls;
2898         struct root_entry *old_rt;
2899         phys_addr_t old_rt_phys;
2900         int ctxt_table_entries;
2901         unsigned long flags;
2902         u64 rtaddr_reg;
2903         int bus, ret;
2904         bool new_ext, ext;
2905
2906         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2907         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
2908         new_ext    = !!ecap_ecs(iommu->ecap);
2909
2910         /*
2911          * The RTT bit can only be changed when translation is disabled,
2912          * but disabling translation means to open a window for data
2913          * corruption. So bail out and don't copy anything if we would
2914          * have to change the bit.
2915          */
2916         if (new_ext != ext)
2917                 return -EINVAL;
2918
2919         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2920         if (!old_rt_phys)
2921                 return -EINVAL;
2922
2923         old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2924         if (!old_rt)
2925                 return -ENOMEM;
2926
2927         /* This is too big for the stack - allocate it from slab */
2928         ctxt_table_entries = ext ? 512 : 256;
2929         ret = -ENOMEM;
2930         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2931         if (!ctxt_tbls)
2932                 goto out_unmap;
2933
2934         for (bus = 0; bus < 256; bus++) {
2935                 ret = copy_context_table(iommu, &old_rt[bus],
2936                                          ctxt_tbls, bus, ext);
2937                 if (ret) {
2938                         pr_err("%s: Failed to copy context table for bus %d\n",
2939                                 iommu->name, bus);
2940                         continue;
2941                 }
2942         }
2943
2944         spin_lock_irqsave(&iommu->lock, flags);
2945
2946         /* Context tables are copied, now write them to the root_entry table */
2947         for (bus = 0; bus < 256; bus++) {
2948                 int idx = ext ? bus * 2 : bus;
2949                 u64 val;
2950
2951                 if (ctxt_tbls[idx]) {
2952                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
2953                         iommu->root_entry[bus].lo = val;
2954                 }
2955
2956                 if (!ext || !ctxt_tbls[idx + 1])
2957                         continue;
2958
2959                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2960                 iommu->root_entry[bus].hi = val;
2961         }
2962
2963         spin_unlock_irqrestore(&iommu->lock, flags);
2964
2965         kfree(ctxt_tbls);
2966
2967         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2968
2969         ret = 0;
2970
2971 out_unmap:
2972         iounmap(old_rt);
2973
2974         return ret;
2975 }
2976
2977 static int __init init_dmars(void)
2978 {
2979         struct dmar_drhd_unit *drhd;
2980         struct dmar_rmrr_unit *rmrr;
2981         bool copied_tables = false;
2982         struct device *dev;
2983         struct intel_iommu *iommu;
2984         int i, ret;
2985
2986         /*
2987          * for each drhd
2988          *    allocate root
2989          *    initialize and program root entry to not present
2990          * endfor
2991          */
2992         for_each_drhd_unit(drhd) {
2993                 /*
2994                  * lock not needed as this is only incremented in the single
2995                  * threaded kernel __init code path all other access are read
2996                  * only
2997                  */
2998                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
2999                         g_num_of_iommus++;
3000                         continue;
3001                 }
3002                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3003         }
3004
3005         /* Preallocate enough resources for IOMMU hot-addition */
3006         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3007                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3008
3009         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3010                         GFP_KERNEL);
3011         if (!g_iommus) {
3012                 pr_err("Allocating global iommu array failed\n");
3013                 ret = -ENOMEM;
3014                 goto error;
3015         }
3016
3017         deferred_flush = kzalloc(g_num_of_iommus *
3018                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3019         if (!deferred_flush) {
3020                 ret = -ENOMEM;
3021                 goto free_g_iommus;
3022         }
3023
3024         for_each_active_iommu(iommu, drhd) {
3025                 g_iommus[iommu->seq_id] = iommu;
3026
3027                 intel_iommu_init_qi(iommu);
3028
3029                 ret = iommu_init_domains(iommu);
3030                 if (ret)
3031                         goto free_iommu;
3032
3033                 init_translation_status(iommu);
3034
3035                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3036                         iommu_disable_translation(iommu);
3037                         clear_translation_pre_enabled(iommu);
3038                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3039                                 iommu->name);
3040                 }
3041
3042                 /*
3043                  * TBD:
3044                  * we could share the same root & context tables
3045                  * among all IOMMU's. Need to Split it later.
3046                  */
3047                 ret = iommu_alloc_root_entry(iommu);
3048                 if (ret)
3049                         goto free_iommu;
3050
3051                 if (translation_pre_enabled(iommu)) {
3052                         pr_info("Translation already enabled - trying to copy translation structures\n");
3053
3054                         ret = copy_translation_tables(iommu);
3055                         if (ret) {
3056                                 /*
3057                                  * We found the IOMMU with translation
3058                                  * enabled - but failed to copy over the
3059                                  * old root-entry table. Try to proceed
3060                                  * by disabling translation now and
3061                                  * allocating a clean root-entry table.
3062                                  * This might cause DMAR faults, but
3063                                  * probably the dump will still succeed.
3064                                  */
3065                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3066                                        iommu->name);
3067                                 iommu_disable_translation(iommu);
3068                                 clear_translation_pre_enabled(iommu);
3069                         } else {
3070                                 pr_info("Copied translation tables from previous kernel for %s\n",
3071                                         iommu->name);
3072                                 copied_tables = true;
3073                         }
3074                 }
3075
3076                 iommu_flush_write_buffer(iommu);
3077                 iommu_set_root_entry(iommu);
3078                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3079                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3080
3081                 if (!ecap_pass_through(iommu->ecap))
3082                         hw_pass_through = 0;
3083         }
3084
3085         if (iommu_pass_through)
3086                 iommu_identity_mapping |= IDENTMAP_ALL;
3087
3088 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3089         iommu_identity_mapping |= IDENTMAP_GFX;
3090 #endif
3091
3092         if (iommu_identity_mapping) {
3093                 ret = si_domain_init(hw_pass_through);
3094                 if (ret)
3095                         goto free_iommu;
3096         }
3097
3098         check_tylersburg_isoch();
3099
3100         /*
3101          * If we copied translations from a previous kernel in the kdump
3102          * case, we can not assign the devices to domains now, as that
3103          * would eliminate the old mappings. So skip this part and defer
3104          * the assignment to device driver initialization time.
3105          */
3106         if (copied_tables)
3107                 goto domains_done;
3108
3109         /*
3110          * If pass through is not set or not enabled, setup context entries for
3111          * identity mappings for rmrr, gfx, and isa and may fall back to static
3112          * identity mapping if iommu_identity_mapping is set.
3113          */
3114         if (iommu_identity_mapping) {
3115                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3116                 if (ret) {
3117                         pr_crit("Failed to setup IOMMU pass-through\n");
3118                         goto free_iommu;
3119                 }
3120         }
3121         /*
3122          * For each rmrr
3123          *   for each dev attached to rmrr
3124          *   do
3125          *     locate drhd for dev, alloc domain for dev
3126          *     allocate free domain
3127          *     allocate page table entries for rmrr
3128          *     if context not allocated for bus
3129          *           allocate and init context
3130          *           set present in root table for this bus
3131          *     init context with domain, translation etc
3132          *    endfor
3133          * endfor
3134          */
3135         pr_info("Setting RMRR:\n");
3136         for_each_rmrr_units(rmrr) {
3137                 /* some BIOS lists non-exist devices in DMAR table. */
3138                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3139                                           i, dev) {
3140                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3141                         if (ret)
3142                                 pr_err("Mapping reserved region failed\n");
3143                 }
3144         }
3145
3146         iommu_prepare_isa();
3147
3148 domains_done:
3149
3150         /*
3151          * for each drhd
3152          *   enable fault log
3153          *   global invalidate context cache
3154          *   global invalidate iotlb
3155          *   enable translation
3156          */
3157         for_each_iommu(iommu, drhd) {
3158                 if (drhd->ignored) {
3159                         /*
3160                          * we always have to disable PMRs or DMA may fail on
3161                          * this device
3162                          */
3163                         if (force_on)
3164                                 iommu_disable_protect_mem_regions(iommu);
3165                         continue;
3166                 }
3167
3168                 iommu_flush_write_buffer(iommu);
3169
3170                 ret = dmar_set_interrupt(iommu);
3171                 if (ret)
3172                         goto free_iommu;
3173
3174                 if (!translation_pre_enabled(iommu))
3175                         iommu_enable_translation(iommu);
3176
3177                 iommu_disable_protect_mem_regions(iommu);
3178         }
3179
3180         return 0;
3181
3182 free_iommu:
3183         for_each_active_iommu(iommu, drhd) {
3184                 disable_dmar_iommu(iommu);
3185                 free_dmar_iommu(iommu);
3186         }
3187         kfree(deferred_flush);
3188 free_g_iommus:
3189         kfree(g_iommus);
3190 error:
3191         return ret;
3192 }
3193
3194 /* This takes a number of _MM_ pages, not VTD pages */
3195 static struct iova *intel_alloc_iova(struct device *dev,
3196                                      struct dmar_domain *domain,
3197                                      unsigned long nrpages, uint64_t dma_mask)
3198 {
3199         struct iova *iova = NULL;
3200
3201         /* Restrict dma_mask to the width that the iommu can handle */
3202         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3203
3204         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3205                 /*
3206                  * First try to allocate an io virtual address in
3207                  * DMA_BIT_MASK(32) and if that fails then try allocating
3208                  * from higher range
3209                  */
3210                 iova = alloc_iova(&domain->iovad, nrpages,
3211                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
3212                 if (iova)
3213                         return iova;
3214         }
3215         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3216         if (unlikely(!iova)) {
3217                 pr_err("Allocating %ld-page iova for %s failed",
3218                        nrpages, dev_name(dev));
3219                 return NULL;
3220         }
3221
3222         return iova;
3223 }
3224
3225 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3226 {
3227         struct dmar_domain *domain;
3228
3229         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3230         if (!domain) {
3231                 pr_err("Allocating domain for %s failed\n",
3232                        dev_name(dev));
3233                 return NULL;
3234         }
3235
3236         return domain;
3237 }
3238
3239 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3240 {
3241         struct device_domain_info *info;
3242
3243         /* No lock here, assumes no domain exit in normal case */
3244         info = dev->archdata.iommu;
3245         if (likely(info))
3246                 return info->domain;
3247
3248         return __get_valid_domain_for_dev(dev);
3249 }
3250
3251 /* Check if the dev needs to go through non-identity map and unmap process.*/
3252 static int iommu_no_mapping(struct device *dev)
3253 {
3254         int found;
3255
3256         if (iommu_dummy(dev))
3257                 return 1;
3258
3259         if (!iommu_identity_mapping)
3260                 return 0;
3261
3262         found = identity_mapping(dev);
3263         if (found) {
3264                 if (iommu_should_identity_map(dev, 0))
3265                         return 1;
3266                 else {
3267                         /*
3268                          * 32 bit DMA is removed from si_domain and fall back
3269                          * to non-identity mapping.
3270                          */
3271                         dmar_remove_one_dev_info(si_domain, dev);
3272                         pr_info("32bit %s uses non-identity mapping\n",
3273                                 dev_name(dev));
3274                         return 0;
3275                 }
3276         } else {
3277                 /*
3278                  * In case of a detached 64 bit DMA device from vm, the device
3279                  * is put into si_domain for identity mapping.
3280                  */
3281                 if (iommu_should_identity_map(dev, 0)) {
3282                         int ret;
3283                         ret = domain_add_dev_info(si_domain, dev);
3284                         if (!ret) {
3285                                 pr_info("64bit %s uses identity mapping\n",
3286                                         dev_name(dev));
3287                                 return 1;
3288                         }
3289                 }
3290         }
3291
3292         return 0;
3293 }
3294
3295 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3296                                      size_t size, int dir, u64 dma_mask)
3297 {
3298         struct dmar_domain *domain;
3299         phys_addr_t start_paddr;
3300         struct iova *iova;
3301         int prot = 0;
3302         int ret;
3303         struct intel_iommu *iommu;
3304         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3305
3306         BUG_ON(dir == DMA_NONE);
3307
3308         if (iommu_no_mapping(dev))
3309                 return paddr;
3310
3311         domain = get_valid_domain_for_dev(dev);
3312         if (!domain)
3313                 return 0;
3314
3315         iommu = domain_get_iommu(domain);
3316         size = aligned_nrpages(paddr, size);
3317
3318         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3319         if (!iova)
3320                 goto error;
3321
3322         /*
3323          * Check if DMAR supports zero-length reads on write only
3324          * mappings..
3325          */
3326         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3327                         !cap_zlr(iommu->cap))
3328                 prot |= DMA_PTE_READ;
3329         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3330                 prot |= DMA_PTE_WRITE;
3331         /*
3332          * paddr - (paddr + size) might be partial page, we should map the whole
3333          * page.  Note: if two part of one page are separately mapped, we
3334          * might have two guest_addr mapping to the same host paddr, but this
3335          * is not a big problem
3336          */
3337         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3338                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3339         if (ret)
3340                 goto error;
3341
3342         /* it's a non-present to present mapping. Only flush if caching mode */
3343         if (cap_caching_mode(iommu->cap))
3344                 iommu_flush_iotlb_psi(iommu, domain,
3345                                       mm_to_dma_pfn(iova->pfn_lo),
3346                                       size, 0, 1);
3347         else
3348                 iommu_flush_write_buffer(iommu);
3349
3350         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3351         start_paddr += paddr & ~PAGE_MASK;
3352         return start_paddr;
3353
3354 error:
3355         if (iova)
3356                 __free_iova(&domain->iovad, iova);
3357         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3358                 dev_name(dev), size, (unsigned long long)paddr, dir);
3359         return 0;
3360 }
3361
3362 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3363                                  unsigned long offset, size_t size,
3364                                  enum dma_data_direction dir,
3365                                  struct dma_attrs *attrs)
3366 {
3367         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3368                                   dir, *dev->dma_mask);
3369 }
3370
3371 static void flush_unmaps(void)
3372 {
3373         int i, j;
3374
3375         timer_on = 0;
3376
3377         /* just flush them all */
3378         for (i = 0; i < g_num_of_iommus; i++) {
3379                 struct intel_iommu *iommu = g_iommus[i];
3380                 if (!iommu)
3381                         continue;
3382
3383                 if (!deferred_flush[i].next)
3384                         continue;
3385
3386                 /* In caching mode, global flushes turn emulation expensive */
3387                 if (!cap_caching_mode(iommu->cap))
3388                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3389                                          DMA_TLB_GLOBAL_FLUSH);
3390                 for (j = 0; j < deferred_flush[i].next; j++) {
3391                         unsigned long mask;
3392                         struct iova *iova = deferred_flush[i].iova[j];
3393                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3394
3395                         /* On real hardware multiple invalidations are expensive */
3396                         if (cap_caching_mode(iommu->cap))
3397                                 iommu_flush_iotlb_psi(iommu, domain,
3398                                         iova->pfn_lo, iova_size(iova),
3399                                         !deferred_flush[i].freelist[j], 0);
3400                         else {
3401                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3402                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3403                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3404                         }
3405                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3406                         if (deferred_flush[i].freelist[j])
3407                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3408                 }
3409                 deferred_flush[i].next = 0;
3410         }
3411
3412         list_size = 0;
3413 }
3414
3415 static void flush_unmaps_timeout(unsigned long data)
3416 {
3417         unsigned long flags;
3418
3419         spin_lock_irqsave(&async_umap_flush_lock, flags);
3420         flush_unmaps();
3421         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3422 }
3423
3424 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3425 {
3426         unsigned long flags;
3427         int next, iommu_id;
3428         struct intel_iommu *iommu;
3429
3430         spin_lock_irqsave(&async_umap_flush_lock, flags);
3431         if (list_size == HIGH_WATER_MARK)
3432                 flush_unmaps();
3433
3434         iommu = domain_get_iommu(dom);
3435         iommu_id = iommu->seq_id;
3436
3437         next = deferred_flush[iommu_id].next;
3438         deferred_flush[iommu_id].domain[next] = dom;
3439         deferred_flush[iommu_id].iova[next] = iova;
3440         deferred_flush[iommu_id].freelist[next] = freelist;
3441         deferred_flush[iommu_id].next++;
3442
3443         if (!timer_on) {
3444                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3445                 timer_on = 1;
3446         }
3447         list_size++;
3448         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3449 }
3450
3451 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3452 {
3453         struct dmar_domain *domain;
3454         unsigned long start_pfn, last_pfn;
3455         struct iova *iova;
3456         struct intel_iommu *iommu;
3457         struct page *freelist;
3458
3459         if (iommu_no_mapping(dev))
3460                 return;
3461
3462         domain = find_domain(dev);
3463         BUG_ON(!domain);
3464
3465         iommu = domain_get_iommu(domain);
3466
3467         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3468         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3469                       (unsigned long long)dev_addr))
3470                 return;
3471
3472         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3473         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3474
3475         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3476                  dev_name(dev), start_pfn, last_pfn);
3477
3478         freelist = domain_unmap(domain, start_pfn, last_pfn);
3479
3480         if (intel_iommu_strict) {
3481                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3482                                       last_pfn - start_pfn + 1, !freelist, 0);
3483                 /* free iova */
3484                 __free_iova(&domain->iovad, iova);
3485                 dma_free_pagelist(freelist);
3486         } else {
3487                 add_unmap(domain, iova, freelist);
3488                 /*
3489                  * queue up the release of the unmap to save the 1/6th of the
3490                  * cpu used up by the iotlb flush operation...
3491                  */
3492         }
3493 }
3494
3495 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3496                              size_t size, enum dma_data_direction dir,
3497                              struct dma_attrs *attrs)
3498 {
3499         intel_unmap(dev, dev_addr);
3500 }
3501
3502 static void *intel_alloc_coherent(struct device *dev, size_t size,
3503                                   dma_addr_t *dma_handle, gfp_t flags,
3504                                   struct dma_attrs *attrs)
3505 {
3506         struct page *page = NULL;
3507         int order;
3508
3509         size = PAGE_ALIGN(size);
3510         order = get_order(size);
3511
3512         if (!iommu_no_mapping(dev))
3513                 flags &= ~(GFP_DMA | GFP_DMA32);
3514         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3515                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3516                         flags |= GFP_DMA;
3517                 else
3518                         flags |= GFP_DMA32;
3519         }
3520
3521         if (flags & __GFP_WAIT) {
3522                 unsigned int count = size >> PAGE_SHIFT;
3523
3524                 page = dma_alloc_from_contiguous(dev, count, order);
3525                 if (page && iommu_no_mapping(dev) &&
3526                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3527                         dma_release_from_contiguous(dev, page, count);
3528                         page = NULL;
3529                 }
3530         }
3531
3532         if (!page)
3533                 page = alloc_pages(flags, order);
3534         if (!page)
3535                 return NULL;
3536         memset(page_address(page), 0, size);
3537
3538         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3539                                          DMA_BIDIRECTIONAL,
3540                                          dev->coherent_dma_mask);
3541         if (*dma_handle)
3542                 return page_address(page);
3543         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3544                 __free_pages(page, order);
3545
3546         return NULL;
3547 }
3548
3549 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3550                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3551 {
3552         int order;
3553         struct page *page = virt_to_page(vaddr);
3554
3555         size = PAGE_ALIGN(size);
3556         order = get_order(size);
3557
3558         intel_unmap(dev, dma_handle);
3559         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3560                 __free_pages(page, order);
3561 }
3562
3563 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3564                            int nelems, enum dma_data_direction dir,
3565                            struct dma_attrs *attrs)
3566 {
3567         intel_unmap(dev, sglist[0].dma_address);
3568 }
3569
3570 static int intel_nontranslate_map_sg(struct device *hddev,
3571         struct scatterlist *sglist, int nelems, int dir)
3572 {
3573         int i;
3574         struct scatterlist *sg;
3575
3576         for_each_sg(sglist, sg, nelems, i) {
3577                 BUG_ON(!sg_page(sg));
3578                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3579                 sg->dma_length = sg->length;
3580         }
3581         return nelems;
3582 }
3583
3584 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3585                         enum dma_data_direction dir, struct dma_attrs *attrs)
3586 {
3587         int i;
3588         struct dmar_domain *domain;
3589         size_t size = 0;
3590         int prot = 0;
3591         struct iova *iova = NULL;
3592         int ret;
3593         struct scatterlist *sg;
3594         unsigned long start_vpfn;
3595         struct intel_iommu *iommu;
3596
3597         BUG_ON(dir == DMA_NONE);
3598         if (iommu_no_mapping(dev))
3599                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3600
3601         domain = get_valid_domain_for_dev(dev);
3602         if (!domain)
3603                 return 0;
3604
3605         iommu = domain_get_iommu(domain);
3606
3607         for_each_sg(sglist, sg, nelems, i)
3608                 size += aligned_nrpages(sg->offset, sg->length);
3609
3610         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3611                                 *dev->dma_mask);
3612         if (!iova) {
3613                 sglist->dma_length = 0;
3614                 return 0;
3615         }
3616
3617         /*
3618          * Check if DMAR supports zero-length reads on write only
3619          * mappings..
3620          */
3621         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3622                         !cap_zlr(iommu->cap))
3623                 prot |= DMA_PTE_READ;
3624         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3625                 prot |= DMA_PTE_WRITE;
3626
3627         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3628
3629         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3630         if (unlikely(ret)) {
3631                 dma_pte_free_pagetable(domain, start_vpfn,
3632                                        start_vpfn + size - 1);
3633                 __free_iova(&domain->iovad, iova);
3634                 return 0;
3635         }
3636
3637         /* it's a non-present to present mapping. Only flush if caching mode */
3638         if (cap_caching_mode(iommu->cap))
3639                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3640         else
3641                 iommu_flush_write_buffer(iommu);
3642
3643         return nelems;
3644 }
3645
3646 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3647 {
3648         return !dma_addr;
3649 }
3650
3651 struct dma_map_ops intel_dma_ops = {
3652         .alloc = intel_alloc_coherent,
3653         .free = intel_free_coherent,
3654         .map_sg = intel_map_sg,
3655         .unmap_sg = intel_unmap_sg,
3656         .map_page = intel_map_page,
3657         .unmap_page = intel_unmap_page,
3658         .mapping_error = intel_mapping_error,
3659 };
3660
3661 static inline int iommu_domain_cache_init(void)
3662 {
3663         int ret = 0;
3664
3665         iommu_domain_cache = kmem_cache_create("iommu_domain",
3666                                          sizeof(struct dmar_domain),
3667                                          0,
3668                                          SLAB_HWCACHE_ALIGN,
3669
3670                                          NULL);
3671         if (!iommu_domain_cache) {
3672                 pr_err("Couldn't create iommu_domain cache\n");
3673                 ret = -ENOMEM;
3674         }
3675
3676         return ret;
3677 }
3678
3679 static inline int iommu_devinfo_cache_init(void)
3680 {
3681         int ret = 0;
3682
3683         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3684                                          sizeof(struct device_domain_info),
3685                                          0,
3686                                          SLAB_HWCACHE_ALIGN,
3687                                          NULL);
3688         if (!iommu_devinfo_cache) {
3689                 pr_err("Couldn't create devinfo cache\n");
3690                 ret = -ENOMEM;
3691         }
3692
3693         return ret;
3694 }
3695
3696 static int __init iommu_init_mempool(void)
3697 {
3698         int ret;
3699         ret = iommu_iova_cache_init();
3700         if (ret)
3701                 return ret;
3702
3703         ret = iommu_domain_cache_init();
3704         if (ret)
3705                 goto domain_error;
3706
3707         ret = iommu_devinfo_cache_init();
3708         if (!ret)
3709                 return ret;
3710
3711         kmem_cache_destroy(iommu_domain_cache);
3712 domain_error:
3713         iommu_iova_cache_destroy();
3714
3715         return -ENOMEM;
3716 }
3717
3718 static void __init iommu_exit_mempool(void)
3719 {
3720         kmem_cache_destroy(iommu_devinfo_cache);
3721         kmem_cache_destroy(iommu_domain_cache);
3722         iommu_iova_cache_destroy();
3723 }
3724
3725 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3726 {
3727         struct dmar_drhd_unit *drhd;
3728         u32 vtbar;
3729         int rc;
3730
3731         /* We know that this device on this chipset has its own IOMMU.
3732          * If we find it under a different IOMMU, then the BIOS is lying
3733          * to us. Hope that the IOMMU for this device is actually
3734          * disabled, and it needs no translation...
3735          */
3736         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3737         if (rc) {
3738                 /* "can't" happen */
3739                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3740                 return;
3741         }
3742         vtbar &= 0xffff0000;
3743
3744         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3745         drhd = dmar_find_matched_drhd_unit(pdev);
3746         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3747                             TAINT_FIRMWARE_WORKAROUND,
3748                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3749                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3750 }
3751 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3752
3753 static void __init init_no_remapping_devices(void)
3754 {
3755         struct dmar_drhd_unit *drhd;
3756         struct device *dev;
3757         int i;
3758
3759         for_each_drhd_unit(drhd) {
3760                 if (!drhd->include_all) {
3761                         for_each_active_dev_scope(drhd->devices,
3762                                                   drhd->devices_cnt, i, dev)
3763                                 break;
3764                         /* ignore DMAR unit if no devices exist */
3765                         if (i == drhd->devices_cnt)
3766                                 drhd->ignored = 1;
3767                 }
3768         }
3769
3770         for_each_active_drhd_unit(drhd) {
3771                 if (drhd->include_all)
3772                         continue;
3773
3774                 for_each_active_dev_scope(drhd->devices,
3775                                           drhd->devices_cnt, i, dev)
3776                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3777                                 break;
3778                 if (i < drhd->devices_cnt)
3779                         continue;
3780
3781                 /* This IOMMU has *only* gfx devices. Either bypass it or
3782                    set the gfx_mapped flag, as appropriate */
3783                 if (dmar_map_gfx) {
3784                         intel_iommu_gfx_mapped = 1;
3785                 } else {
3786                         drhd->ignored = 1;
3787                         for_each_active_dev_scope(drhd->devices,
3788                                                   drhd->devices_cnt, i, dev)
3789                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3790                 }
3791         }
3792 }
3793
3794 #ifdef CONFIG_SUSPEND
3795 static int init_iommu_hw(void)
3796 {
3797         struct dmar_drhd_unit *drhd;
3798         struct intel_iommu *iommu = NULL;
3799
3800         for_each_active_iommu(iommu, drhd)
3801                 if (iommu->qi)
3802                         dmar_reenable_qi(iommu);
3803
3804         for_each_iommu(iommu, drhd) {
3805                 if (drhd->ignored) {
3806                         /*
3807                          * we always have to disable PMRs or DMA may fail on
3808                          * this device
3809                          */
3810                         if (force_on)
3811                                 iommu_disable_protect_mem_regions(iommu);
3812                         continue;
3813                 }
3814         
3815                 iommu_flush_write_buffer(iommu);
3816
3817                 iommu_set_root_entry(iommu);
3818
3819                 iommu->flush.flush_context(iommu, 0, 0, 0,
3820                                            DMA_CCMD_GLOBAL_INVL);
3821                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3822                 iommu_enable_translation(iommu);
3823                 iommu_disable_protect_mem_regions(iommu);
3824         }
3825
3826         return 0;
3827 }
3828
3829 static void iommu_flush_all(void)
3830 {
3831         struct dmar_drhd_unit *drhd;
3832         struct intel_iommu *iommu;
3833
3834         for_each_active_iommu(iommu, drhd) {
3835                 iommu->flush.flush_context(iommu, 0, 0, 0,
3836                                            DMA_CCMD_GLOBAL_INVL);
3837                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3838                                          DMA_TLB_GLOBAL_FLUSH);
3839         }
3840 }
3841
3842 static int iommu_suspend(void)
3843 {
3844         struct dmar_drhd_unit *drhd;
3845         struct intel_iommu *iommu = NULL;
3846         unsigned long flag;
3847
3848         for_each_active_iommu(iommu, drhd) {
3849                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3850                                                  GFP_ATOMIC);
3851                 if (!iommu->iommu_state)
3852                         goto nomem;
3853         }
3854
3855         iommu_flush_all();
3856
3857         for_each_active_iommu(iommu, drhd) {
3858                 iommu_disable_translation(iommu);
3859
3860                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3861
3862                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3863                         readl(iommu->reg + DMAR_FECTL_REG);
3864                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3865                         readl(iommu->reg + DMAR_FEDATA_REG);
3866                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3867                         readl(iommu->reg + DMAR_FEADDR_REG);
3868                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3869                         readl(iommu->reg + DMAR_FEUADDR_REG);
3870
3871                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3872         }
3873         return 0;
3874
3875 nomem:
3876         for_each_active_iommu(iommu, drhd)
3877                 kfree(iommu->iommu_state);
3878
3879         return -ENOMEM;
3880 }
3881
3882 static void iommu_resume(void)
3883 {
3884         struct dmar_drhd_unit *drhd;
3885         struct intel_iommu *iommu = NULL;
3886         unsigned long flag;
3887
3888         if (init_iommu_hw()) {
3889                 if (force_on)
3890                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3891                 else
3892                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3893                 return;
3894         }
3895
3896         for_each_active_iommu(iommu, drhd) {
3897
3898                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3899
3900                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3901                         iommu->reg + DMAR_FECTL_REG);
3902                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3903                         iommu->reg + DMAR_FEDATA_REG);
3904                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3905                         iommu->reg + DMAR_FEADDR_REG);
3906                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3907                         iommu->reg + DMAR_FEUADDR_REG);
3908
3909                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3910         }
3911
3912         for_each_active_iommu(iommu, drhd)
3913                 kfree(iommu->iommu_state);
3914 }
3915
3916 static struct syscore_ops iommu_syscore_ops = {
3917         .resume         = iommu_resume,
3918         .suspend        = iommu_suspend,
3919 };
3920
3921 static void __init init_iommu_pm_ops(void)
3922 {
3923         register_syscore_ops(&iommu_syscore_ops);
3924 }
3925
3926 #else
3927 static inline void init_iommu_pm_ops(void) {}
3928 #endif  /* CONFIG_PM */
3929
3930
3931 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3932 {
3933         struct acpi_dmar_reserved_memory *rmrr;
3934         struct dmar_rmrr_unit *rmrru;
3935
3936         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3937         if (!rmrru)
3938                 return -ENOMEM;
3939
3940         rmrru->hdr = header;
3941         rmrr = (struct acpi_dmar_reserved_memory *)header;
3942         rmrru->base_address = rmrr->base_address;
3943         rmrru->end_address = rmrr->end_address;
3944         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3945                                 ((void *)rmrr) + rmrr->header.length,
3946                                 &rmrru->devices_cnt);
3947         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3948                 kfree(rmrru);
3949                 return -ENOMEM;
3950         }
3951
3952         list_add(&rmrru->list, &dmar_rmrr_units);
3953
3954         return 0;
3955 }
3956
3957 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3958 {
3959         struct dmar_atsr_unit *atsru;
3960         struct acpi_dmar_atsr *tmp;
3961
3962         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3963                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3964                 if (atsr->segment != tmp->segment)
3965                         continue;
3966                 if (atsr->header.length != tmp->header.length)
3967                         continue;
3968                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3969                         return atsru;
3970         }
3971
3972         return NULL;
3973 }
3974
3975 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3976 {
3977         struct acpi_dmar_atsr *atsr;
3978         struct dmar_atsr_unit *atsru;
3979
3980         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3981                 return 0;
3982
3983         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3984         atsru = dmar_find_atsr(atsr);
3985         if (atsru)
3986                 return 0;
3987
3988         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3989         if (!atsru)
3990                 return -ENOMEM;
3991
3992         /*
3993          * If memory is allocated from slab by ACPI _DSM method, we need to
3994          * copy the memory content because the memory buffer will be freed
3995          * on return.
3996          */
3997         atsru->hdr = (void *)(atsru + 1);
3998         memcpy(atsru->hdr, hdr, hdr->length);
3999         atsru->include_all = atsr->flags & 0x1;
4000         if (!atsru->include_all) {
4001                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4002                                 (void *)atsr + atsr->header.length,
4003                                 &atsru->devices_cnt);
4004                 if (atsru->devices_cnt && atsru->devices == NULL) {
4005                         kfree(atsru);
4006                         return -ENOMEM;
4007                 }
4008         }
4009
4010         list_add_rcu(&atsru->list, &dmar_atsr_units);
4011
4012         return 0;
4013 }
4014
4015 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4016 {
4017         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4018         kfree(atsru);
4019 }
4020
4021 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4022 {
4023         struct acpi_dmar_atsr *atsr;
4024         struct dmar_atsr_unit *atsru;
4025
4026         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4027         atsru = dmar_find_atsr(atsr);
4028         if (atsru) {
4029                 list_del_rcu(&atsru->list);
4030                 synchronize_rcu();
4031                 intel_iommu_free_atsr(atsru);
4032         }
4033
4034         return 0;
4035 }
4036
4037 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4038 {
4039         int i;
4040         struct device *dev;
4041         struct acpi_dmar_atsr *atsr;
4042         struct dmar_atsr_unit *atsru;
4043
4044         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4045         atsru = dmar_find_atsr(atsr);
4046         if (!atsru)
4047                 return 0;
4048
4049         if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4050                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4051                                           i, dev)
4052                         return -EBUSY;
4053
4054         return 0;
4055 }
4056
4057 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4058 {
4059         int sp, ret = 0;
4060         struct intel_iommu *iommu = dmaru->iommu;
4061
4062         if (g_iommus[iommu->seq_id])
4063                 return 0;
4064
4065         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4066                 pr_warn("%s: Doesn't support hardware pass through.\n",
4067                         iommu->name);
4068                 return -ENXIO;
4069         }
4070         if (!ecap_sc_support(iommu->ecap) &&
4071             domain_update_iommu_snooping(iommu)) {
4072                 pr_warn("%s: Doesn't support snooping.\n",
4073                         iommu->name);
4074                 return -ENXIO;
4075         }
4076         sp = domain_update_iommu_superpage(iommu) - 1;
4077         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4078                 pr_warn("%s: Doesn't support large page.\n",
4079                         iommu->name);
4080                 return -ENXIO;
4081         }
4082
4083         /*
4084          * Disable translation if already enabled prior to OS handover.
4085          */
4086         if (iommu->gcmd & DMA_GCMD_TE)
4087                 iommu_disable_translation(iommu);
4088
4089         g_iommus[iommu->seq_id] = iommu;
4090         ret = iommu_init_domains(iommu);
4091         if (ret == 0)
4092                 ret = iommu_alloc_root_entry(iommu);
4093         if (ret)
4094                 goto out;
4095
4096         if (dmaru->ignored) {
4097                 /*
4098                  * we always have to disable PMRs or DMA may fail on this device
4099                  */
4100                 if (force_on)
4101                         iommu_disable_protect_mem_regions(iommu);
4102                 return 0;
4103         }
4104
4105         intel_iommu_init_qi(iommu);
4106         iommu_flush_write_buffer(iommu);
4107         ret = dmar_set_interrupt(iommu);
4108         if (ret)
4109                 goto disable_iommu;
4110
4111         iommu_set_root_entry(iommu);
4112         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4113         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4114         iommu_enable_translation(iommu);
4115
4116         iommu_disable_protect_mem_regions(iommu);
4117         return 0;
4118
4119 disable_iommu:
4120         disable_dmar_iommu(iommu);
4121 out:
4122         free_dmar_iommu(iommu);
4123         return ret;
4124 }
4125
4126 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4127 {
4128         int ret = 0;
4129         struct intel_iommu *iommu = dmaru->iommu;
4130
4131         if (!intel_iommu_enabled)
4132                 return 0;
4133         if (iommu == NULL)
4134                 return -EINVAL;
4135
4136         if (insert) {
4137                 ret = intel_iommu_add(dmaru);
4138         } else {
4139                 disable_dmar_iommu(iommu);
4140                 free_dmar_iommu(iommu);
4141         }
4142
4143         return ret;
4144 }
4145
4146 static void intel_iommu_free_dmars(void)
4147 {
4148         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4149         struct dmar_atsr_unit *atsru, *atsr_n;
4150
4151         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4152                 list_del(&rmrru->list);
4153                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4154                 kfree(rmrru);
4155         }
4156
4157         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4158                 list_del(&atsru->list);
4159                 intel_iommu_free_atsr(atsru);
4160         }
4161 }
4162
4163 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4164 {
4165         int i, ret = 1;
4166         struct pci_bus *bus;
4167         struct pci_dev *bridge = NULL;
4168         struct device *tmp;
4169         struct acpi_dmar_atsr *atsr;
4170         struct dmar_atsr_unit *atsru;
4171
4172         dev = pci_physfn(dev);
4173         for (bus = dev->bus; bus; bus = bus->parent) {
4174                 bridge = bus->self;
4175                 if (!bridge || !pci_is_pcie(bridge) ||
4176                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4177                         return 0;
4178                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4179                         break;
4180         }
4181         if (!bridge)
4182                 return 0;
4183
4184         rcu_read_lock();
4185         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4186                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4187                 if (atsr->segment != pci_domain_nr(dev->bus))
4188                         continue;
4189
4190                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4191                         if (tmp == &bridge->dev)
4192                                 goto out;
4193
4194                 if (atsru->include_all)
4195                         goto out;
4196         }
4197         ret = 0;
4198 out:
4199         rcu_read_unlock();
4200
4201         return ret;
4202 }
4203
4204 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4205 {
4206         int ret = 0;
4207         struct dmar_rmrr_unit *rmrru;
4208         struct dmar_atsr_unit *atsru;
4209         struct acpi_dmar_atsr *atsr;
4210         struct acpi_dmar_reserved_memory *rmrr;
4211
4212         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4213                 return 0;
4214
4215         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4216                 rmrr = container_of(rmrru->hdr,
4217                                     struct acpi_dmar_reserved_memory, header);
4218                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4219                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4220                                 ((void *)rmrr) + rmrr->header.length,
4221                                 rmrr->segment, rmrru->devices,
4222                                 rmrru->devices_cnt);
4223                         if(ret < 0)
4224                                 return ret;
4225                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4226                         dmar_remove_dev_scope(info, rmrr->segment,
4227                                 rmrru->devices, rmrru->devices_cnt);
4228                 }
4229         }
4230
4231         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4232                 if (atsru->include_all)
4233                         continue;
4234
4235                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4236                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4237                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4238                                         (void *)atsr + atsr->header.length,
4239                                         atsr->segment, atsru->devices,
4240                                         atsru->devices_cnt);
4241                         if (ret > 0)
4242                                 break;
4243                         else if(ret < 0)
4244                                 return ret;
4245                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4246                         if (dmar_remove_dev_scope(info, atsr->segment,
4247                                         atsru->devices, atsru->devices_cnt))
4248                                 break;
4249                 }
4250         }
4251
4252         return 0;
4253 }
4254
4255 /*
4256  * Here we only respond to action of unbound device from driver.
4257  *
4258  * Added device is not attached to its DMAR domain here yet. That will happen
4259  * when mapping the device to iova.
4260  */
4261 static int device_notifier(struct notifier_block *nb,
4262                                   unsigned long action, void *data)
4263 {
4264         struct device *dev = data;
4265         struct dmar_domain *domain;
4266
4267         if (iommu_dummy(dev))
4268                 return 0;
4269
4270         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4271                 return 0;
4272
4273         domain = find_domain(dev);
4274         if (!domain)
4275                 return 0;
4276
4277         dmar_remove_one_dev_info(domain, dev);
4278         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4279                 domain_exit(domain);
4280
4281         return 0;
4282 }
4283
4284 static struct notifier_block device_nb = {
4285         .notifier_call = device_notifier,
4286 };
4287
4288 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4289                                        unsigned long val, void *v)
4290 {
4291         struct memory_notify *mhp = v;
4292         unsigned long long start, end;
4293         unsigned long start_vpfn, last_vpfn;
4294
4295         switch (val) {
4296         case MEM_GOING_ONLINE:
4297                 start = mhp->start_pfn << PAGE_SHIFT;
4298                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4299                 if (iommu_domain_identity_map(si_domain, start, end)) {
4300                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4301                                 start, end);
4302                         return NOTIFY_BAD;
4303                 }
4304                 break;
4305
4306         case MEM_OFFLINE:
4307         case MEM_CANCEL_ONLINE:
4308                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4309                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4310                 while (start_vpfn <= last_vpfn) {
4311                         struct iova *iova;
4312                         struct dmar_drhd_unit *drhd;
4313                         struct intel_iommu *iommu;
4314                         struct page *freelist;
4315
4316                         iova = find_iova(&si_domain->iovad, start_vpfn);
4317                         if (iova == NULL) {
4318                                 pr_debug("Failed get IOVA for PFN %lx\n",
4319                                          start_vpfn);
4320                                 break;
4321                         }
4322
4323                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4324                                                      start_vpfn, last_vpfn);
4325                         if (iova == NULL) {
4326                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4327                                         start_vpfn, last_vpfn);
4328                                 return NOTIFY_BAD;
4329                         }
4330
4331                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4332                                                iova->pfn_hi);
4333
4334                         rcu_read_lock();
4335                         for_each_active_iommu(iommu, drhd)
4336                                 iommu_flush_iotlb_psi(iommu, si_domain,
4337                                         iova->pfn_lo, iova_size(iova),
4338                                         !freelist, 0);
4339                         rcu_read_unlock();
4340                         dma_free_pagelist(freelist);
4341
4342                         start_vpfn = iova->pfn_hi + 1;
4343                         free_iova_mem(iova);
4344                 }
4345                 break;
4346         }
4347
4348         return NOTIFY_OK;
4349 }
4350
4351 static struct notifier_block intel_iommu_memory_nb = {
4352         .notifier_call = intel_iommu_memory_notifier,
4353         .priority = 0
4354 };
4355
4356
4357 static ssize_t intel_iommu_show_version(struct device *dev,
4358                                         struct device_attribute *attr,
4359                                         char *buf)
4360 {
4361         struct intel_iommu *iommu = dev_get_drvdata(dev);
4362         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4363         return sprintf(buf, "%d:%d\n",
4364                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4365 }
4366 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4367
4368 static ssize_t intel_iommu_show_address(struct device *dev,
4369                                         struct device_attribute *attr,
4370                                         char *buf)
4371 {
4372         struct intel_iommu *iommu = dev_get_drvdata(dev);
4373         return sprintf(buf, "%llx\n", iommu->reg_phys);
4374 }
4375 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4376
4377 static ssize_t intel_iommu_show_cap(struct device *dev,
4378                                     struct device_attribute *attr,
4379                                     char *buf)
4380 {
4381         struct intel_iommu *iommu = dev_get_drvdata(dev);
4382         return sprintf(buf, "%llx\n", iommu->cap);
4383 }
4384 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4385
4386 static ssize_t intel_iommu_show_ecap(struct device *dev,
4387                                     struct device_attribute *attr,
4388                                     char *buf)
4389 {
4390         struct intel_iommu *iommu = dev_get_drvdata(dev);
4391         return sprintf(buf, "%llx\n", iommu->ecap);
4392 }
4393 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4394
4395 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4396                                       struct device_attribute *attr,
4397                                       char *buf)
4398 {
4399         struct intel_iommu *iommu = dev_get_drvdata(dev);
4400         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4401 }
4402 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4403
4404 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4405                                            struct device_attribute *attr,
4406                                            char *buf)
4407 {
4408         struct intel_iommu *iommu = dev_get_drvdata(dev);
4409         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4410                                                   cap_ndoms(iommu->cap)));
4411 }
4412 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4413
4414 static struct attribute *intel_iommu_attrs[] = {
4415         &dev_attr_version.attr,
4416         &dev_attr_address.attr,
4417         &dev_attr_cap.attr,
4418         &dev_attr_ecap.attr,
4419         &dev_attr_domains_supported.attr,
4420         &dev_attr_domains_used.attr,
4421         NULL,
4422 };
4423
4424 static struct attribute_group intel_iommu_group = {
4425         .name = "intel-iommu",
4426         .attrs = intel_iommu_attrs,
4427 };
4428
4429 const struct attribute_group *intel_iommu_groups[] = {
4430         &intel_iommu_group,
4431         NULL,
4432 };
4433
4434 int __init intel_iommu_init(void)
4435 {
4436         int ret = -ENODEV;
4437         struct dmar_drhd_unit *drhd;
4438         struct intel_iommu *iommu;
4439
4440         /* VT-d is required for a TXT/tboot launch, so enforce that */
4441         force_on = tboot_force_iommu();
4442
4443         if (iommu_init_mempool()) {
4444                 if (force_on)
4445                         panic("tboot: Failed to initialize iommu memory\n");
4446                 return -ENOMEM;
4447         }
4448
4449         down_write(&dmar_global_lock);
4450         if (dmar_table_init()) {
4451                 if (force_on)
4452                         panic("tboot: Failed to initialize DMAR table\n");
4453                 goto out_free_dmar;
4454         }
4455
4456         if (dmar_dev_scope_init() < 0) {
4457                 if (force_on)
4458                         panic("tboot: Failed to initialize DMAR device scope\n");
4459                 goto out_free_dmar;
4460         }
4461
4462         if (no_iommu || dmar_disabled)
4463                 goto out_free_dmar;
4464
4465         if (list_empty(&dmar_rmrr_units))
4466                 pr_info("No RMRR found\n");
4467
4468         if (list_empty(&dmar_atsr_units))
4469                 pr_info("No ATSR found\n");
4470
4471         if (dmar_init_reserved_ranges()) {
4472                 if (force_on)
4473                         panic("tboot: Failed to reserve iommu ranges\n");
4474                 goto out_free_reserved_range;
4475         }
4476
4477         init_no_remapping_devices();
4478
4479         ret = init_dmars();
4480         if (ret) {
4481                 if (force_on)
4482                         panic("tboot: Failed to initialize DMARs\n");
4483                 pr_err("Initialization failed\n");
4484                 goto out_free_reserved_range;
4485         }
4486         up_write(&dmar_global_lock);
4487         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4488
4489         init_timer(&unmap_timer);
4490 #ifdef CONFIG_SWIOTLB
4491         swiotlb = 0;
4492 #endif
4493         dma_ops = &intel_dma_ops;
4494
4495         init_iommu_pm_ops();
4496
4497         for_each_active_iommu(iommu, drhd)
4498                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4499                                                        intel_iommu_groups,
4500                                                        "%s", iommu->name);
4501
4502         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4503         bus_register_notifier(&pci_bus_type, &device_nb);
4504         if (si_domain && !hw_pass_through)
4505                 register_memory_notifier(&intel_iommu_memory_nb);
4506
4507         intel_iommu_enabled = 1;
4508
4509         return 0;
4510
4511 out_free_reserved_range:
4512         put_iova_domain(&reserved_iova_list);
4513 out_free_dmar:
4514         intel_iommu_free_dmars();
4515         up_write(&dmar_global_lock);
4516         iommu_exit_mempool();
4517         return ret;
4518 }
4519
4520 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4521 {
4522         struct intel_iommu *iommu = opaque;
4523
4524         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4525         return 0;
4526 }
4527
4528 /*
4529  * NB - intel-iommu lacks any sort of reference counting for the users of
4530  * dependent devices.  If multiple endpoints have intersecting dependent
4531  * devices, unbinding the driver from any one of them will possibly leave
4532  * the others unable to operate.
4533  */
4534 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4535 {
4536         if (!iommu || !dev || !dev_is_pci(dev))
4537                 return;
4538
4539         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4540 }
4541
4542 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4543 {
4544         struct intel_iommu *iommu;
4545         unsigned long flags;
4546
4547         assert_spin_locked(&device_domain_lock);
4548
4549         if (WARN_ON(!info))
4550                 return;
4551
4552         iommu = info->iommu;
4553
4554         if (info->dev) {
4555                 iommu_disable_dev_iotlb(info);
4556                 domain_context_clear(iommu, info->dev);
4557         }
4558
4559         unlink_domain_info(info);
4560
4561         spin_lock_irqsave(&iommu->lock, flags);
4562         domain_detach_iommu(info->domain, iommu);
4563         spin_unlock_irqrestore(&iommu->lock, flags);
4564
4565         free_devinfo_mem(info);
4566 }
4567
4568 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4569                                      struct device *dev)
4570 {
4571         struct device_domain_info *info;
4572         unsigned long flags;
4573
4574         spin_lock_irqsave(&device_domain_lock, flags);
4575         info = dev->archdata.iommu;
4576         __dmar_remove_one_dev_info(info);
4577         spin_unlock_irqrestore(&device_domain_lock, flags);
4578 }
4579
4580 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4581 {
4582         int adjust_width;
4583
4584         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4585                         DMA_32BIT_PFN);
4586         domain_reserve_special_ranges(domain);
4587
4588         /* calculate AGAW */
4589         domain->gaw = guest_width;
4590         adjust_width = guestwidth_to_adjustwidth(guest_width);
4591         domain->agaw = width_to_agaw(adjust_width);
4592
4593         domain->iommu_coherency = 0;
4594         domain->iommu_snooping = 0;
4595         domain->iommu_superpage = 0;
4596         domain->max_addr = 0;
4597
4598         /* always allocate the top pgd */
4599         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4600         if (!domain->pgd)
4601                 return -ENOMEM;
4602         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4603         return 0;
4604 }
4605
4606 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4607 {
4608         struct dmar_domain *dmar_domain;
4609         struct iommu_domain *domain;
4610
4611         if (type != IOMMU_DOMAIN_UNMANAGED)
4612                 return NULL;
4613
4614         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4615         if (!dmar_domain) {
4616                 pr_err("Can't allocate dmar_domain\n");
4617                 return NULL;
4618         }
4619         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4620                 pr_err("Domain initialization failed\n");
4621                 domain_exit(dmar_domain);
4622                 return NULL;
4623         }
4624         domain_update_iommu_cap(dmar_domain);
4625
4626         domain = &dmar_domain->domain;
4627         domain->geometry.aperture_start = 0;
4628         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4629         domain->geometry.force_aperture = true;
4630
4631         return domain;
4632 }
4633
4634 static void intel_iommu_domain_free(struct iommu_domain *domain)
4635 {
4636         domain_exit(to_dmar_domain(domain));
4637 }
4638
4639 static int intel_iommu_attach_device(struct iommu_domain *domain,
4640                                      struct device *dev)
4641 {
4642         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4643         struct intel_iommu *iommu;
4644         int addr_width;
4645         u8 bus, devfn;
4646
4647         if (device_is_rmrr_locked(dev)) {
4648                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4649                 return -EPERM;
4650         }
4651
4652         /* normally dev is not mapped */
4653         if (unlikely(domain_context_mapped(dev))) {
4654                 struct dmar_domain *old_domain;
4655
4656                 old_domain = find_domain(dev);
4657                 if (old_domain) {
4658                         rcu_read_lock();
4659                         dmar_remove_one_dev_info(old_domain, dev);
4660                         rcu_read_unlock();
4661
4662                         if (!domain_type_is_vm_or_si(old_domain) &&
4663                              list_empty(&old_domain->devices))
4664                                 domain_exit(old_domain);
4665                 }
4666         }
4667
4668         iommu = device_to_iommu(dev, &bus, &devfn);
4669         if (!iommu)
4670                 return -ENODEV;
4671
4672         /* check if this iommu agaw is sufficient for max mapped address */
4673         addr_width = agaw_to_width(iommu->agaw);
4674         if (addr_width > cap_mgaw(iommu->cap))
4675                 addr_width = cap_mgaw(iommu->cap);
4676
4677         if (dmar_domain->max_addr > (1LL << addr_width)) {
4678                 pr_err("%s: iommu width (%d) is not "
4679                        "sufficient for the mapped address (%llx)\n",
4680                        __func__, addr_width, dmar_domain->max_addr);
4681                 return -EFAULT;
4682         }
4683         dmar_domain->gaw = addr_width;
4684
4685         /*
4686          * Knock out extra levels of page tables if necessary
4687          */
4688         while (iommu->agaw < dmar_domain->agaw) {
4689                 struct dma_pte *pte;
4690
4691                 pte = dmar_domain->pgd;
4692                 if (dma_pte_present(pte)) {
4693                         dmar_domain->pgd = (struct dma_pte *)
4694                                 phys_to_virt(dma_pte_addr(pte));
4695                         free_pgtable_page(pte);
4696                 }
4697                 dmar_domain->agaw--;
4698         }
4699
4700         return domain_add_dev_info(dmar_domain, dev);
4701 }
4702
4703 static void intel_iommu_detach_device(struct iommu_domain *domain,
4704                                       struct device *dev)
4705 {
4706         dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
4707 }
4708
4709 static int intel_iommu_map(struct iommu_domain *domain,
4710                            unsigned long iova, phys_addr_t hpa,
4711                            size_t size, int iommu_prot)
4712 {
4713         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4714         u64 max_addr;
4715         int prot = 0;
4716         int ret;
4717
4718         if (iommu_prot & IOMMU_READ)
4719                 prot |= DMA_PTE_READ;
4720         if (iommu_prot & IOMMU_WRITE)
4721                 prot |= DMA_PTE_WRITE;
4722         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4723                 prot |= DMA_PTE_SNP;
4724
4725         max_addr = iova + size;
4726         if (dmar_domain->max_addr < max_addr) {
4727                 u64 end;
4728
4729                 /* check if minimum agaw is sufficient for mapped address */
4730                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4731                 if (end < max_addr) {
4732                         pr_err("%s: iommu width (%d) is not "
4733                                "sufficient for the mapped address (%llx)\n",
4734                                __func__, dmar_domain->gaw, max_addr);
4735                         return -EFAULT;
4736                 }
4737                 dmar_domain->max_addr = max_addr;
4738         }
4739         /* Round up size to next multiple of PAGE_SIZE, if it and
4740            the low bits of hpa would take us onto the next page */
4741         size = aligned_nrpages(hpa, size);
4742         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4743                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4744         return ret;
4745 }
4746
4747 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4748                                 unsigned long iova, size_t size)
4749 {
4750         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4751         struct page *freelist = NULL;
4752         struct intel_iommu *iommu;
4753         unsigned long start_pfn, last_pfn;
4754         unsigned int npages;
4755         int iommu_id, level = 0;
4756
4757         /* Cope with horrid API which requires us to unmap more than the
4758            size argument if it happens to be a large-page mapping. */
4759         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4760                 BUG();
4761
4762         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4763                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4764
4765         start_pfn = iova >> VTD_PAGE_SHIFT;
4766         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4767
4768         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4769
4770         npages = last_pfn - start_pfn + 1;
4771
4772         for_each_domain_iommu(iommu_id, dmar_domain) {
4773                 iommu = g_iommus[iommu_id];
4774
4775                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4776                                       start_pfn, npages, !freelist, 0);
4777         }
4778
4779         dma_free_pagelist(freelist);
4780
4781         if (dmar_domain->max_addr == iova + size)
4782                 dmar_domain->max_addr = iova;
4783
4784         return size;
4785 }
4786
4787 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4788                                             dma_addr_t iova)
4789 {
4790         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4791         struct dma_pte *pte;
4792         int level = 0;
4793         u64 phys = 0;
4794
4795         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4796         if (pte)
4797                 phys = dma_pte_addr(pte);
4798
4799         return phys;
4800 }
4801
4802 static bool intel_iommu_capable(enum iommu_cap cap)
4803 {
4804         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4805                 return domain_update_iommu_snooping(NULL) == 1;
4806         if (cap == IOMMU_CAP_INTR_REMAP)
4807                 return irq_remapping_enabled == 1;
4808
4809         return false;
4810 }
4811
4812 static int intel_iommu_add_device(struct device *dev)
4813 {
4814         struct intel_iommu *iommu;
4815         struct iommu_group *group;
4816         u8 bus, devfn;
4817
4818         iommu = device_to_iommu(dev, &bus, &devfn);
4819         if (!iommu)
4820                 return -ENODEV;
4821
4822         iommu_device_link(iommu->iommu_dev, dev);
4823
4824         group = iommu_group_get_for_dev(dev);
4825
4826         if (IS_ERR(group))
4827                 return PTR_ERR(group);
4828
4829         iommu_group_put(group);
4830         return 0;
4831 }
4832
4833 static void intel_iommu_remove_device(struct device *dev)
4834 {
4835         struct intel_iommu *iommu;
4836         u8 bus, devfn;
4837
4838         iommu = device_to_iommu(dev, &bus, &devfn);
4839         if (!iommu)
4840                 return;
4841
4842         iommu_group_remove_device(dev);
4843
4844         iommu_device_unlink(iommu->iommu_dev, dev);
4845 }
4846
4847 static const struct iommu_ops intel_iommu_ops = {
4848         .capable        = intel_iommu_capable,
4849         .domain_alloc   = intel_iommu_domain_alloc,
4850         .domain_free    = intel_iommu_domain_free,
4851         .attach_dev     = intel_iommu_attach_device,
4852         .detach_dev     = intel_iommu_detach_device,
4853         .map            = intel_iommu_map,
4854         .unmap          = intel_iommu_unmap,
4855         .map_sg         = default_iommu_map_sg,
4856         .iova_to_phys   = intel_iommu_iova_to_phys,
4857         .add_device     = intel_iommu_add_device,
4858         .remove_device  = intel_iommu_remove_device,
4859         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4860 };
4861
4862 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4863 {
4864         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4865         pr_info("Disabling IOMMU for graphics on this chipset\n");
4866         dmar_map_gfx = 0;
4867 }
4868
4869 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4870 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4871 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4872 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4873 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4874 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4875 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4876
4877 static void quirk_iommu_rwbf(struct pci_dev *dev)
4878 {
4879         /*
4880          * Mobile 4 Series Chipset neglects to set RWBF capability,
4881          * but needs it. Same seems to hold for the desktop versions.
4882          */
4883         pr_info("Forcing write-buffer flush capability\n");
4884         rwbf_quirk = 1;
4885 }
4886
4887 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4888 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4889 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4890 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4891 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4892 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4893 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4894
4895 #define GGC 0x52
4896 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4897 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4898 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4899 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4900 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4901 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4902 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4903 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4904
4905 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4906 {
4907         unsigned short ggc;
4908
4909         if (pci_read_config_word(dev, GGC, &ggc))
4910                 return;
4911
4912         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4913                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4914                 dmar_map_gfx = 0;
4915         } else if (dmar_map_gfx) {
4916                 /* we have to ensure the gfx device is idle before we flush */
4917                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4918                 intel_iommu_strict = 1;
4919        }
4920 }
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4924 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4925
4926 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4927    ISOCH DMAR unit for the Azalia sound device, but not give it any
4928    TLB entries, which causes it to deadlock. Check for that.  We do
4929    this in a function called from init_dmars(), instead of in a PCI
4930    quirk, because we don't want to print the obnoxious "BIOS broken"
4931    message if VT-d is actually disabled.
4932 */
4933 static void __init check_tylersburg_isoch(void)
4934 {
4935         struct pci_dev *pdev;
4936         uint32_t vtisochctrl;
4937
4938         /* If there's no Azalia in the system anyway, forget it. */
4939         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4940         if (!pdev)
4941                 return;
4942         pci_dev_put(pdev);
4943
4944         /* System Management Registers. Might be hidden, in which case
4945            we can't do the sanity check. But that's OK, because the
4946            known-broken BIOSes _don't_ actually hide it, so far. */
4947         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4948         if (!pdev)
4949                 return;
4950
4951         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4952                 pci_dev_put(pdev);
4953                 return;
4954         }
4955
4956         pci_dev_put(pdev);
4957
4958         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4959         if (vtisochctrl & 1)
4960                 return;
4961
4962         /* Drop all bits other than the number of TLB entries */
4963         vtisochctrl &= 0x1c;
4964
4965         /* If we have the recommended number of TLB entries (16), fine. */
4966         if (vtisochctrl == 0x10)
4967                 return;
4968
4969         /* Zero TLB entries? You get to ride the short bus to school. */
4970         if (!vtisochctrl) {
4971                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4972                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4973                      dmi_get_system_info(DMI_BIOS_VENDOR),
4974                      dmi_get_system_info(DMI_BIOS_VERSION),
4975                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4976                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4977                 return;
4978         }
4979
4980         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4981                vtisochctrl);
4982 }