iommu/vt-d: Pass an iommu pointer to domain_init()
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
50
51 #include "irq_remapping.h"
52
53 #define ROOT_SIZE               VTD_PAGE_SIZE
54 #define CONTEXT_SIZE            VTD_PAGE_SIZE
55
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60
61 #define IOAPIC_RANGE_START      (0xfee00000)
62 #define IOAPIC_RANGE_END        (0xfeefffff)
63 #define IOVA_START_ADDR         (0x1000)
64
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69
70 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
76                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN          (1)
81
82 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
85
86 /* page table handling */
87 #define LEVEL_STRIDE            (9)
88 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
89
90 /*
91  * This bitmap is used to advertise the page sizes our hardware support
92  * to the IOMMU core, which will then use this information to split
93  * physically contiguous memory regions it is mapping into page sizes
94  * that we support.
95  *
96  * Traditionally the IOMMU core just handed us the mappings directly,
97  * after making sure the size is an order of a 4KiB page and that the
98  * mapping has natural alignment.
99  *
100  * To retain this behavior, we currently advertise that we support
101  * all page sizes that are an order of 4KiB.
102  *
103  * If at some point we'd like to utilize the IOMMU core's new behavior,
104  * we could change this to advertise the real page sizes we support.
105  */
106 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
107
108 static inline int agaw_to_level(int agaw)
109 {
110         return agaw + 2;
111 }
112
113 static inline int agaw_to_width(int agaw)
114 {
115         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 }
117
118 static inline int width_to_agaw(int width)
119 {
120         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 }
122
123 static inline unsigned int level_to_offset_bits(int level)
124 {
125         return (level - 1) * LEVEL_STRIDE;
126 }
127
128 static inline int pfn_level_offset(unsigned long pfn, int level)
129 {
130         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131 }
132
133 static inline unsigned long level_mask(int level)
134 {
135         return -1UL << level_to_offset_bits(level);
136 }
137
138 static inline unsigned long level_size(int level)
139 {
140         return 1UL << level_to_offset_bits(level);
141 }
142
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
144 {
145         return (pfn + level_size(level) - 1) & level_mask(level);
146 }
147
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149 {
150         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 }
152
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154    are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156 {
157         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 }
159
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161 {
162         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163 }
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
165 {
166         return mm_to_dma_pfn(page_to_pfn(pg));
167 }
168 static inline unsigned long virt_to_dma_pfn(void *p)
169 {
170         return page_to_dma_pfn(virt_to_page(p));
171 }
172
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
175
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
178
179 /*
180  * set to 1 to panic kernel if can't successfully enable VT-d
181  * (used when kernel is launched w/ TXT)
182  */
183 static int force_on = 0;
184
185 /*
186  * 0: Present
187  * 1-11: Reserved
188  * 12-63: Context Ptr (12 - (haw-1))
189  * 64-127: Reserved
190  */
191 struct root_entry {
192         u64     lo;
193         u64     hi;
194 };
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
196
197 /*
198  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199  * if marked present.
200  */
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 {
203         if (!(re->lo & 1))
204                 return 0;
205
206         return re->lo & VTD_PAGE_MASK;
207 }
208
209 /*
210  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211  * if marked present.
212  */
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 {
215         if (!(re->hi & 1))
216                 return 0;
217
218         return re->hi & VTD_PAGE_MASK;
219 }
220 /*
221  * low 64 bits:
222  * 0: present
223  * 1: fault processing disable
224  * 2-3: translation type
225  * 12-63: address space root
226  * high 64 bits:
227  * 0-2: address width
228  * 3-6: aval
229  * 8-23: domain id
230  */
231 struct context_entry {
232         u64 lo;
233         u64 hi;
234 };
235
236 static inline void context_clear_pasid_enable(struct context_entry *context)
237 {
238         context->lo &= ~(1ULL << 11);
239 }
240
241 static inline bool context_pasid_enabled(struct context_entry *context)
242 {
243         return !!(context->lo & (1ULL << 11));
244 }
245
246 static inline void context_set_copied(struct context_entry *context)
247 {
248         context->hi |= (1ull << 3);
249 }
250
251 static inline bool context_copied(struct context_entry *context)
252 {
253         return !!(context->hi & (1ULL << 3));
254 }
255
256 static inline bool __context_present(struct context_entry *context)
257 {
258         return (context->lo & 1);
259 }
260
261 static inline bool context_present(struct context_entry *context)
262 {
263         return context_pasid_enabled(context) ?
264              __context_present(context) :
265              __context_present(context) && !context_copied(context);
266 }
267
268 static inline void context_set_present(struct context_entry *context)
269 {
270         context->lo |= 1;
271 }
272
273 static inline void context_set_fault_enable(struct context_entry *context)
274 {
275         context->lo &= (((u64)-1) << 2) | 1;
276 }
277
278 static inline void context_set_translation_type(struct context_entry *context,
279                                                 unsigned long value)
280 {
281         context->lo &= (((u64)-1) << 4) | 3;
282         context->lo |= (value & 3) << 2;
283 }
284
285 static inline void context_set_address_root(struct context_entry *context,
286                                             unsigned long value)
287 {
288         context->lo &= ~VTD_PAGE_MASK;
289         context->lo |= value & VTD_PAGE_MASK;
290 }
291
292 static inline void context_set_address_width(struct context_entry *context,
293                                              unsigned long value)
294 {
295         context->hi |= value & 7;
296 }
297
298 static inline void context_set_domain_id(struct context_entry *context,
299                                          unsigned long value)
300 {
301         context->hi |= (value & ((1 << 16) - 1)) << 8;
302 }
303
304 static inline int context_domain_id(struct context_entry *c)
305 {
306         return((c->hi >> 8) & 0xffff);
307 }
308
309 static inline void context_clear_entry(struct context_entry *context)
310 {
311         context->lo = 0;
312         context->hi = 0;
313 }
314
315 /*
316  * 0: readable
317  * 1: writable
318  * 2-6: reserved
319  * 7: super page
320  * 8-10: available
321  * 11: snoop behavior
322  * 12-63: Host physcial address
323  */
324 struct dma_pte {
325         u64 val;
326 };
327
328 static inline void dma_clear_pte(struct dma_pte *pte)
329 {
330         pte->val = 0;
331 }
332
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
334 {
335 #ifdef CONFIG_64BIT
336         return pte->val & VTD_PAGE_MASK;
337 #else
338         /* Must have a full atomic 64-bit read */
339         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
340 #endif
341 }
342
343 static inline bool dma_pte_present(struct dma_pte *pte)
344 {
345         return (pte->val & 3) != 0;
346 }
347
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
349 {
350         return (pte->val & DMA_PTE_LARGE_PAGE);
351 }
352
353 static inline int first_pte_in_page(struct dma_pte *pte)
354 {
355         return !((unsigned long)pte & ~VTD_PAGE_MASK);
356 }
357
358 /*
359  * This domain is a statically identity mapping domain.
360  *      1. This domain creats a static 1:1 mapping to all usable memory.
361  *      2. It maps to each iommu if successful.
362  *      3. Each iommu mapps to this domain if successful.
363  */
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
366
367 /*
368  * Domain represents a virtual machine, more than one devices
369  * across iommus may be owned in one domain, e.g. kvm guest.
370  */
371 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
372
373 /* si_domain contains mulitple devices */
374 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
375
376 #define for_each_domain_iommu(idx, domain)                      \
377         for (idx = 0; idx < g_num_of_iommus; idx++)             \
378                 if (domain->iommu_refcnt[idx])
379
380 struct dmar_domain {
381         int     nid;                    /* node id */
382
383         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
384                                         /* Refcount of devices per iommu */
385
386
387         u16             iommu_did[DMAR_UNITS_SUPPORTED];
388                                         /* Domain ids per IOMMU. Use u16 since
389                                          * domain ids are 16 bit wide according
390                                          * to VT-d spec, section 9.3 */
391
392         struct list_head devices;       /* all devices' list */
393         struct iova_domain iovad;       /* iova's that belong to this domain */
394
395         struct dma_pte  *pgd;           /* virtual address */
396         int             gaw;            /* max guest address width */
397
398         /* adjusted guest address width, 0 is level 2 30-bit */
399         int             agaw;
400
401         int             flags;          /* flags to find out type of domain */
402
403         int             iommu_coherency;/* indicate coherency of iommu access */
404         int             iommu_snooping; /* indicate snooping control feature*/
405         int             iommu_count;    /* reference count of iommu */
406         int             iommu_superpage;/* Level of superpages supported:
407                                            0 == 4KiB (no superpages), 1 == 2MiB,
408                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409         spinlock_t      iommu_lock;     /* protect iommu set in domain */
410         u64             max_addr;       /* maximum mapped address */
411
412         struct iommu_domain domain;     /* generic domain data structure for
413                                            iommu core */
414 };
415
416 /* PCI domain-device relationship */
417 struct device_domain_info {
418         struct list_head link;  /* link to domain siblings */
419         struct list_head global; /* link to global list */
420         u8 bus;                 /* PCI bus number */
421         u8 devfn;               /* PCI devfn number */
422         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
423         struct intel_iommu *iommu; /* IOMMU used by this device */
424         struct dmar_domain *domain; /* pointer to domain */
425 };
426
427 struct dmar_rmrr_unit {
428         struct list_head list;          /* list of rmrr units   */
429         struct acpi_dmar_header *hdr;   /* ACPI header          */
430         u64     base_address;           /* reserved base address*/
431         u64     end_address;            /* reserved end address */
432         struct dmar_dev_scope *devices; /* target devices */
433         int     devices_cnt;            /* target device count */
434 };
435
436 struct dmar_atsr_unit {
437         struct list_head list;          /* list of ATSR units */
438         struct acpi_dmar_header *hdr;   /* ACPI header */
439         struct dmar_dev_scope *devices; /* target devices */
440         int devices_cnt;                /* target device count */
441         u8 include_all:1;               /* include all ports */
442 };
443
444 static LIST_HEAD(dmar_atsr_units);
445 static LIST_HEAD(dmar_rmrr_units);
446
447 #define for_each_rmrr_units(rmrr) \
448         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
449
450 static void flush_unmaps_timeout(unsigned long data);
451
452 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
453
454 #define HIGH_WATER_MARK 250
455 struct deferred_flush_tables {
456         int next;
457         struct iova *iova[HIGH_WATER_MARK];
458         struct dmar_domain *domain[HIGH_WATER_MARK];
459         struct page *freelist[HIGH_WATER_MARK];
460 };
461
462 static struct deferred_flush_tables *deferred_flush;
463
464 /* bitmap for indexing intel_iommus */
465 static int g_num_of_iommus;
466
467 static DEFINE_SPINLOCK(async_umap_flush_lock);
468 static LIST_HEAD(unmaps_to_do);
469
470 static int timer_on;
471 static long list_size;
472
473 static void domain_exit(struct dmar_domain *domain);
474 static void domain_remove_dev_info(struct dmar_domain *domain);
475 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
476                                      struct device *dev);
477 static void domain_context_clear(struct intel_iommu *iommu,
478                                  struct device *dev);
479 static int domain_detach_iommu(struct dmar_domain *domain,
480                                struct intel_iommu *iommu);
481
482 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 int dmar_disabled = 0;
484 #else
485 int dmar_disabled = 1;
486 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
487
488 int intel_iommu_enabled = 0;
489 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
490
491 static int dmar_map_gfx = 1;
492 static int dmar_forcedac;
493 static int intel_iommu_strict;
494 static int intel_iommu_superpage = 1;
495 static int intel_iommu_ecs = 1;
496
497 /* We only actually use ECS when PASID support (on the new bit 40)
498  * is also advertised. Some early implementations — the ones with
499  * PASID support on bit 28 — have issues even when we *only* use
500  * extended root/context tables. */
501 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
502                             ecap_pasid(iommu->ecap))
503
504 int intel_iommu_gfx_mapped;
505 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
506
507 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
508 static DEFINE_SPINLOCK(device_domain_lock);
509 static LIST_HEAD(device_domain_list);
510
511 static const struct iommu_ops intel_iommu_ops;
512
513 static bool translation_pre_enabled(struct intel_iommu *iommu)
514 {
515         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
516 }
517
518 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
519 {
520         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
521 }
522
523 static void init_translation_status(struct intel_iommu *iommu)
524 {
525         u32 gsts;
526
527         gsts = readl(iommu->reg + DMAR_GSTS_REG);
528         if (gsts & DMA_GSTS_TES)
529                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
530 }
531
532 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
533 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
534 {
535         return container_of(dom, struct dmar_domain, domain);
536 }
537
538 static int __init intel_iommu_setup(char *str)
539 {
540         if (!str)
541                 return -EINVAL;
542         while (*str) {
543                 if (!strncmp(str, "on", 2)) {
544                         dmar_disabled = 0;
545                         pr_info("IOMMU enabled\n");
546                 } else if (!strncmp(str, "off", 3)) {
547                         dmar_disabled = 1;
548                         pr_info("IOMMU disabled\n");
549                 } else if (!strncmp(str, "igfx_off", 8)) {
550                         dmar_map_gfx = 0;
551                         pr_info("Disable GFX device mapping\n");
552                 } else if (!strncmp(str, "forcedac", 8)) {
553                         pr_info("Forcing DAC for PCI devices\n");
554                         dmar_forcedac = 1;
555                 } else if (!strncmp(str, "strict", 6)) {
556                         pr_info("Disable batched IOTLB flush\n");
557                         intel_iommu_strict = 1;
558                 } else if (!strncmp(str, "sp_off", 6)) {
559                         pr_info("Disable supported super page\n");
560                         intel_iommu_superpage = 0;
561                 } else if (!strncmp(str, "ecs_off", 7)) {
562                         printk(KERN_INFO
563                                 "Intel-IOMMU: disable extended context table support\n");
564                         intel_iommu_ecs = 0;
565                 }
566
567                 str += strcspn(str, ",");
568                 while (*str == ',')
569                         str++;
570         }
571         return 0;
572 }
573 __setup("intel_iommu=", intel_iommu_setup);
574
575 static struct kmem_cache *iommu_domain_cache;
576 static struct kmem_cache *iommu_devinfo_cache;
577
578 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
579 {
580         struct dmar_domain **domains;
581         int idx = did >> 8;
582
583         domains = iommu->domains[idx];
584         if (!domains)
585                 return NULL;
586
587         return domains[did & 0xff];
588 }
589
590 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
591                              struct dmar_domain *domain)
592 {
593         struct dmar_domain **domains;
594         int idx = did >> 8;
595
596         if (!iommu->domains[idx]) {
597                 size_t size = 256 * sizeof(struct dmar_domain *);
598                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
599         }
600
601         domains = iommu->domains[idx];
602         if (WARN_ON(!domains))
603                 return;
604         else
605                 domains[did & 0xff] = domain;
606 }
607
608 static inline void *alloc_pgtable_page(int node)
609 {
610         struct page *page;
611         void *vaddr = NULL;
612
613         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
614         if (page)
615                 vaddr = page_address(page);
616         return vaddr;
617 }
618
619 static inline void free_pgtable_page(void *vaddr)
620 {
621         free_page((unsigned long)vaddr);
622 }
623
624 static inline void *alloc_domain_mem(void)
625 {
626         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
627 }
628
629 static void free_domain_mem(void *vaddr)
630 {
631         kmem_cache_free(iommu_domain_cache, vaddr);
632 }
633
634 static inline void * alloc_devinfo_mem(void)
635 {
636         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
637 }
638
639 static inline void free_devinfo_mem(void *vaddr)
640 {
641         kmem_cache_free(iommu_devinfo_cache, vaddr);
642 }
643
644 static inline int domain_type_is_vm(struct dmar_domain *domain)
645 {
646         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
647 }
648
649 static inline int domain_type_is_si(struct dmar_domain *domain)
650 {
651         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
652 }
653
654 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
655 {
656         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
657                                 DOMAIN_FLAG_STATIC_IDENTITY);
658 }
659
660 static inline int domain_pfn_supported(struct dmar_domain *domain,
661                                        unsigned long pfn)
662 {
663         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
664
665         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
666 }
667
668 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
669 {
670         unsigned long sagaw;
671         int agaw = -1;
672
673         sagaw = cap_sagaw(iommu->cap);
674         for (agaw = width_to_agaw(max_gaw);
675              agaw >= 0; agaw--) {
676                 if (test_bit(agaw, &sagaw))
677                         break;
678         }
679
680         return agaw;
681 }
682
683 /*
684  * Calculate max SAGAW for each iommu.
685  */
686 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
687 {
688         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
689 }
690
691 /*
692  * calculate agaw for each iommu.
693  * "SAGAW" may be different across iommus, use a default agaw, and
694  * get a supported less agaw for iommus that don't support the default agaw.
695  */
696 int iommu_calculate_agaw(struct intel_iommu *iommu)
697 {
698         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
699 }
700
701 /* This functionin only returns single iommu in a domain */
702 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
703 {
704         int iommu_id;
705
706         /* si_domain and vm domain should not get here. */
707         BUG_ON(domain_type_is_vm_or_si(domain));
708         for_each_domain_iommu(iommu_id, domain)
709                 break;
710
711         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
712                 return NULL;
713
714         return g_iommus[iommu_id];
715 }
716
717 static void domain_update_iommu_coherency(struct dmar_domain *domain)
718 {
719         struct dmar_drhd_unit *drhd;
720         struct intel_iommu *iommu;
721         bool found = false;
722         int i;
723
724         domain->iommu_coherency = 1;
725
726         for_each_domain_iommu(i, domain) {
727                 found = true;
728                 if (!ecap_coherent(g_iommus[i]->ecap)) {
729                         domain->iommu_coherency = 0;
730                         break;
731                 }
732         }
733         if (found)
734                 return;
735
736         /* No hardware attached; use lowest common denominator */
737         rcu_read_lock();
738         for_each_active_iommu(iommu, drhd) {
739                 if (!ecap_coherent(iommu->ecap)) {
740                         domain->iommu_coherency = 0;
741                         break;
742                 }
743         }
744         rcu_read_unlock();
745 }
746
747 static int domain_update_iommu_snooping(struct intel_iommu *skip)
748 {
749         struct dmar_drhd_unit *drhd;
750         struct intel_iommu *iommu;
751         int ret = 1;
752
753         rcu_read_lock();
754         for_each_active_iommu(iommu, drhd) {
755                 if (iommu != skip) {
756                         if (!ecap_sc_support(iommu->ecap)) {
757                                 ret = 0;
758                                 break;
759                         }
760                 }
761         }
762         rcu_read_unlock();
763
764         return ret;
765 }
766
767 static int domain_update_iommu_superpage(struct intel_iommu *skip)
768 {
769         struct dmar_drhd_unit *drhd;
770         struct intel_iommu *iommu;
771         int mask = 0xf;
772
773         if (!intel_iommu_superpage) {
774                 return 0;
775         }
776
777         /* set iommu_superpage to the smallest common denominator */
778         rcu_read_lock();
779         for_each_active_iommu(iommu, drhd) {
780                 if (iommu != skip) {
781                         mask &= cap_super_page_val(iommu->cap);
782                         if (!mask)
783                                 break;
784                 }
785         }
786         rcu_read_unlock();
787
788         return fls(mask);
789 }
790
791 /* Some capabilities may be different across iommus */
792 static void domain_update_iommu_cap(struct dmar_domain *domain)
793 {
794         domain_update_iommu_coherency(domain);
795         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
796         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
797 }
798
799 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
800                                                        u8 bus, u8 devfn, int alloc)
801 {
802         struct root_entry *root = &iommu->root_entry[bus];
803         struct context_entry *context;
804         u64 *entry;
805
806         if (ecs_enabled(iommu)) {
807                 if (devfn >= 0x80) {
808                         devfn -= 0x80;
809                         entry = &root->hi;
810                 }
811                 devfn *= 2;
812         }
813         entry = &root->lo;
814         if (*entry & 1)
815                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
816         else {
817                 unsigned long phy_addr;
818                 if (!alloc)
819                         return NULL;
820
821                 context = alloc_pgtable_page(iommu->node);
822                 if (!context)
823                         return NULL;
824
825                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
826                 phy_addr = virt_to_phys((void *)context);
827                 *entry = phy_addr | 1;
828                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
829         }
830         return &context[devfn];
831 }
832
833 static int iommu_dummy(struct device *dev)
834 {
835         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
836 }
837
838 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
839 {
840         struct dmar_drhd_unit *drhd = NULL;
841         struct intel_iommu *iommu;
842         struct device *tmp;
843         struct pci_dev *ptmp, *pdev = NULL;
844         u16 segment = 0;
845         int i;
846
847         if (iommu_dummy(dev))
848                 return NULL;
849
850         if (dev_is_pci(dev)) {
851                 pdev = to_pci_dev(dev);
852                 segment = pci_domain_nr(pdev->bus);
853         } else if (has_acpi_companion(dev))
854                 dev = &ACPI_COMPANION(dev)->dev;
855
856         rcu_read_lock();
857         for_each_active_iommu(iommu, drhd) {
858                 if (pdev && segment != drhd->segment)
859                         continue;
860
861                 for_each_active_dev_scope(drhd->devices,
862                                           drhd->devices_cnt, i, tmp) {
863                         if (tmp == dev) {
864                                 *bus = drhd->devices[i].bus;
865                                 *devfn = drhd->devices[i].devfn;
866                                 goto out;
867                         }
868
869                         if (!pdev || !dev_is_pci(tmp))
870                                 continue;
871
872                         ptmp = to_pci_dev(tmp);
873                         if (ptmp->subordinate &&
874                             ptmp->subordinate->number <= pdev->bus->number &&
875                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
876                                 goto got_pdev;
877                 }
878
879                 if (pdev && drhd->include_all) {
880                 got_pdev:
881                         *bus = pdev->bus->number;
882                         *devfn = pdev->devfn;
883                         goto out;
884                 }
885         }
886         iommu = NULL;
887  out:
888         rcu_read_unlock();
889
890         return iommu;
891 }
892
893 static void domain_flush_cache(struct dmar_domain *domain,
894                                void *addr, int size)
895 {
896         if (!domain->iommu_coherency)
897                 clflush_cache_range(addr, size);
898 }
899
900 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
901 {
902         struct context_entry *context;
903         int ret = 0;
904         unsigned long flags;
905
906         spin_lock_irqsave(&iommu->lock, flags);
907         context = iommu_context_addr(iommu, bus, devfn, 0);
908         if (context)
909                 ret = context_present(context);
910         spin_unlock_irqrestore(&iommu->lock, flags);
911         return ret;
912 }
913
914 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
915 {
916         struct context_entry *context;
917         unsigned long flags;
918
919         spin_lock_irqsave(&iommu->lock, flags);
920         context = iommu_context_addr(iommu, bus, devfn, 0);
921         if (context) {
922                 context_clear_entry(context);
923                 __iommu_flush_cache(iommu, context, sizeof(*context));
924         }
925         spin_unlock_irqrestore(&iommu->lock, flags);
926 }
927
928 static void free_context_table(struct intel_iommu *iommu)
929 {
930         int i;
931         unsigned long flags;
932         struct context_entry *context;
933
934         spin_lock_irqsave(&iommu->lock, flags);
935         if (!iommu->root_entry) {
936                 goto out;
937         }
938         for (i = 0; i < ROOT_ENTRY_NR; i++) {
939                 context = iommu_context_addr(iommu, i, 0, 0);
940                 if (context)
941                         free_pgtable_page(context);
942
943                 if (!ecs_enabled(iommu))
944                         continue;
945
946                 context = iommu_context_addr(iommu, i, 0x80, 0);
947                 if (context)
948                         free_pgtable_page(context);
949
950         }
951         free_pgtable_page(iommu->root_entry);
952         iommu->root_entry = NULL;
953 out:
954         spin_unlock_irqrestore(&iommu->lock, flags);
955 }
956
957 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958                                       unsigned long pfn, int *target_level)
959 {
960         struct dma_pte *parent, *pte = NULL;
961         int level = agaw_to_level(domain->agaw);
962         int offset;
963
964         BUG_ON(!domain->pgd);
965
966         if (!domain_pfn_supported(domain, pfn))
967                 /* Address beyond IOMMU's addressing capabilities. */
968                 return NULL;
969
970         parent = domain->pgd;
971
972         while (1) {
973                 void *tmp_page;
974
975                 offset = pfn_level_offset(pfn, level);
976                 pte = &parent[offset];
977                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
978                         break;
979                 if (level == *target_level)
980                         break;
981
982                 if (!dma_pte_present(pte)) {
983                         uint64_t pteval;
984
985                         tmp_page = alloc_pgtable_page(domain->nid);
986
987                         if (!tmp_page)
988                                 return NULL;
989
990                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992                         if (cmpxchg64(&pte->val, 0ULL, pteval))
993                                 /* Someone else set it while we were thinking; use theirs. */
994                                 free_pgtable_page(tmp_page);
995                         else
996                                 domain_flush_cache(domain, pte, sizeof(*pte));
997                 }
998                 if (level == 1)
999                         break;
1000
1001                 parent = phys_to_virt(dma_pte_addr(pte));
1002                 level--;
1003         }
1004
1005         if (!*target_level)
1006                 *target_level = level;
1007
1008         return pte;
1009 }
1010
1011
1012 /* return address's pte at specific level */
1013 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1014                                          unsigned long pfn,
1015                                          int level, int *large_page)
1016 {
1017         struct dma_pte *parent, *pte = NULL;
1018         int total = agaw_to_level(domain->agaw);
1019         int offset;
1020
1021         parent = domain->pgd;
1022         while (level <= total) {
1023                 offset = pfn_level_offset(pfn, total);
1024                 pte = &parent[offset];
1025                 if (level == total)
1026                         return pte;
1027
1028                 if (!dma_pte_present(pte)) {
1029                         *large_page = total;
1030                         break;
1031                 }
1032
1033                 if (dma_pte_superpage(pte)) {
1034                         *large_page = total;
1035                         return pte;
1036                 }
1037
1038                 parent = phys_to_virt(dma_pte_addr(pte));
1039                 total--;
1040         }
1041         return NULL;
1042 }
1043
1044 /* clear last level pte, a tlb flush should be followed */
1045 static void dma_pte_clear_range(struct dmar_domain *domain,
1046                                 unsigned long start_pfn,
1047                                 unsigned long last_pfn)
1048 {
1049         unsigned int large_page = 1;
1050         struct dma_pte *first_pte, *pte;
1051
1052         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1053         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054         BUG_ON(start_pfn > last_pfn);
1055
1056         /* we don't need lock here; nobody else touches the iova range */
1057         do {
1058                 large_page = 1;
1059                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1060                 if (!pte) {
1061                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1062                         continue;
1063                 }
1064                 do {
1065                         dma_clear_pte(pte);
1066                         start_pfn += lvl_to_nr_pages(large_page);
1067                         pte++;
1068                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1069
1070                 domain_flush_cache(domain, first_pte,
1071                                    (void *)pte - (void *)first_pte);
1072
1073         } while (start_pfn && start_pfn <= last_pfn);
1074 }
1075
1076 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1077                                struct dma_pte *pte, unsigned long pfn,
1078                                unsigned long start_pfn, unsigned long last_pfn)
1079 {
1080         pfn = max(start_pfn, pfn);
1081         pte = &pte[pfn_level_offset(pfn, level)];
1082
1083         do {
1084                 unsigned long level_pfn;
1085                 struct dma_pte *level_pte;
1086
1087                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1088                         goto next;
1089
1090                 level_pfn = pfn & level_mask(level - 1);
1091                 level_pte = phys_to_virt(dma_pte_addr(pte));
1092
1093                 if (level > 2)
1094                         dma_pte_free_level(domain, level - 1, level_pte,
1095                                            level_pfn, start_pfn, last_pfn);
1096
1097                 /* If range covers entire pagetable, free it */
1098                 if (!(start_pfn > level_pfn ||
1099                       last_pfn < level_pfn + level_size(level) - 1)) {
1100                         dma_clear_pte(pte);
1101                         domain_flush_cache(domain, pte, sizeof(*pte));
1102                         free_pgtable_page(level_pte);
1103                 }
1104 next:
1105                 pfn += level_size(level);
1106         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1107 }
1108
1109 /* free page table pages. last level pte should already be cleared */
1110 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111                                    unsigned long start_pfn,
1112                                    unsigned long last_pfn)
1113 {
1114         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1115         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116         BUG_ON(start_pfn > last_pfn);
1117
1118         dma_pte_clear_range(domain, start_pfn, last_pfn);
1119
1120         /* We don't need lock here; nobody else touches the iova range */
1121         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1122                            domain->pgd, 0, start_pfn, last_pfn);
1123
1124         /* free pgd */
1125         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126                 free_pgtable_page(domain->pgd);
1127                 domain->pgd = NULL;
1128         }
1129 }
1130
1131 /* When a page at a given level is being unlinked from its parent, we don't
1132    need to *modify* it at all. All we need to do is make a list of all the
1133    pages which can be freed just as soon as we've flushed the IOTLB and we
1134    know the hardware page-walk will no longer touch them.
1135    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1136    be freed. */
1137 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1138                                             int level, struct dma_pte *pte,
1139                                             struct page *freelist)
1140 {
1141         struct page *pg;
1142
1143         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1144         pg->freelist = freelist;
1145         freelist = pg;
1146
1147         if (level == 1)
1148                 return freelist;
1149
1150         pte = page_address(pg);
1151         do {
1152                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1153                         freelist = dma_pte_list_pagetables(domain, level - 1,
1154                                                            pte, freelist);
1155                 pte++;
1156         } while (!first_pte_in_page(pte));
1157
1158         return freelist;
1159 }
1160
1161 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1162                                         struct dma_pte *pte, unsigned long pfn,
1163                                         unsigned long start_pfn,
1164                                         unsigned long last_pfn,
1165                                         struct page *freelist)
1166 {
1167         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1168
1169         pfn = max(start_pfn, pfn);
1170         pte = &pte[pfn_level_offset(pfn, level)];
1171
1172         do {
1173                 unsigned long level_pfn;
1174
1175                 if (!dma_pte_present(pte))
1176                         goto next;
1177
1178                 level_pfn = pfn & level_mask(level);
1179
1180                 /* If range covers entire pagetable, free it */
1181                 if (start_pfn <= level_pfn &&
1182                     last_pfn >= level_pfn + level_size(level) - 1) {
1183                         /* These suborbinate page tables are going away entirely. Don't
1184                            bother to clear them; we're just going to *free* them. */
1185                         if (level > 1 && !dma_pte_superpage(pte))
1186                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1187
1188                         dma_clear_pte(pte);
1189                         if (!first_pte)
1190                                 first_pte = pte;
1191                         last_pte = pte;
1192                 } else if (level > 1) {
1193                         /* Recurse down into a level that isn't *entirely* obsolete */
1194                         freelist = dma_pte_clear_level(domain, level - 1,
1195                                                        phys_to_virt(dma_pte_addr(pte)),
1196                                                        level_pfn, start_pfn, last_pfn,
1197                                                        freelist);
1198                 }
1199 next:
1200                 pfn += level_size(level);
1201         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1202
1203         if (first_pte)
1204                 domain_flush_cache(domain, first_pte,
1205                                    (void *)++last_pte - (void *)first_pte);
1206
1207         return freelist;
1208 }
1209
1210 /* We can't just free the pages because the IOMMU may still be walking
1211    the page tables, and may have cached the intermediate levels. The
1212    pages can only be freed after the IOTLB flush has been done. */
1213 struct page *domain_unmap(struct dmar_domain *domain,
1214                           unsigned long start_pfn,
1215                           unsigned long last_pfn)
1216 {
1217         struct page *freelist = NULL;
1218
1219         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1220         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221         BUG_ON(start_pfn > last_pfn);
1222
1223         /* we don't need lock here; nobody else touches the iova range */
1224         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1225                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1226
1227         /* free pgd */
1228         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1229                 struct page *pgd_page = virt_to_page(domain->pgd);
1230                 pgd_page->freelist = freelist;
1231                 freelist = pgd_page;
1232
1233                 domain->pgd = NULL;
1234         }
1235
1236         return freelist;
1237 }
1238
1239 void dma_free_pagelist(struct page *freelist)
1240 {
1241         struct page *pg;
1242
1243         while ((pg = freelist)) {
1244                 freelist = pg->freelist;
1245                 free_pgtable_page(page_address(pg));
1246         }
1247 }
1248
1249 /* iommu handling */
1250 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1251 {
1252         struct root_entry *root;
1253         unsigned long flags;
1254
1255         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1256         if (!root) {
1257                 pr_err("Allocating root entry for %s failed\n",
1258                         iommu->name);
1259                 return -ENOMEM;
1260         }
1261
1262         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1263
1264         spin_lock_irqsave(&iommu->lock, flags);
1265         iommu->root_entry = root;
1266         spin_unlock_irqrestore(&iommu->lock, flags);
1267
1268         return 0;
1269 }
1270
1271 static void iommu_set_root_entry(struct intel_iommu *iommu)
1272 {
1273         u64 addr;
1274         u32 sts;
1275         unsigned long flag;
1276
1277         addr = virt_to_phys(iommu->root_entry);
1278         if (ecs_enabled(iommu))
1279                 addr |= DMA_RTADDR_RTT;
1280
1281         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1283
1284         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1285
1286         /* Make sure hardware complete it */
1287         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288                       readl, (sts & DMA_GSTS_RTPS), sts);
1289
1290         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1291 }
1292
1293 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1294 {
1295         u32 val;
1296         unsigned long flag;
1297
1298         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1299                 return;
1300
1301         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1303
1304         /* Make sure hardware complete it */
1305         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306                       readl, (!(val & DMA_GSTS_WBFS)), val);
1307
1308         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1309 }
1310
1311 /* return value determine if we need a write buffer flush */
1312 static void __iommu_flush_context(struct intel_iommu *iommu,
1313                                   u16 did, u16 source_id, u8 function_mask,
1314                                   u64 type)
1315 {
1316         u64 val = 0;
1317         unsigned long flag;
1318
1319         switch (type) {
1320         case DMA_CCMD_GLOBAL_INVL:
1321                 val = DMA_CCMD_GLOBAL_INVL;
1322                 break;
1323         case DMA_CCMD_DOMAIN_INVL:
1324                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1325                 break;
1326         case DMA_CCMD_DEVICE_INVL:
1327                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1328                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1329                 break;
1330         default:
1331                 BUG();
1332         }
1333         val |= DMA_CCMD_ICC;
1334
1335         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1337
1338         /* Make sure hardware complete it */
1339         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1340                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1341
1342         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1343 }
1344
1345 /* return value determine if we need a write buffer flush */
1346 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1347                                 u64 addr, unsigned int size_order, u64 type)
1348 {
1349         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1350         u64 val = 0, val_iva = 0;
1351         unsigned long flag;
1352
1353         switch (type) {
1354         case DMA_TLB_GLOBAL_FLUSH:
1355                 /* global flush doesn't need set IVA_REG */
1356                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1357                 break;
1358         case DMA_TLB_DSI_FLUSH:
1359                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1360                 break;
1361         case DMA_TLB_PSI_FLUSH:
1362                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363                 /* IH bit is passed in as part of address */
1364                 val_iva = size_order | addr;
1365                 break;
1366         default:
1367                 BUG();
1368         }
1369         /* Note: set drain read/write */
1370 #if 0
1371         /*
1372          * This is probably to be super secure.. Looks like we can
1373          * ignore it without any impact.
1374          */
1375         if (cap_read_drain(iommu->cap))
1376                 val |= DMA_TLB_READ_DRAIN;
1377 #endif
1378         if (cap_write_drain(iommu->cap))
1379                 val |= DMA_TLB_WRITE_DRAIN;
1380
1381         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382         /* Note: Only uses first TLB reg currently */
1383         if (val_iva)
1384                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1385         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1386
1387         /* Make sure hardware complete it */
1388         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1389                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1390
1391         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1392
1393         /* check IOTLB invalidation granularity */
1394         if (DMA_TLB_IAIG(val) == 0)
1395                 pr_err("Flush IOTLB failed\n");
1396         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1397                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1398                         (unsigned long long)DMA_TLB_IIRG(type),
1399                         (unsigned long long)DMA_TLB_IAIG(val));
1400 }
1401
1402 static struct device_domain_info *
1403 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1404                          u8 bus, u8 devfn)
1405 {
1406         bool found = false;
1407         unsigned long flags;
1408         struct device_domain_info *info;
1409         struct pci_dev *pdev;
1410
1411         if (!ecap_dev_iotlb_support(iommu->ecap))
1412                 return NULL;
1413
1414         if (!iommu->qi)
1415                 return NULL;
1416
1417         spin_lock_irqsave(&device_domain_lock, flags);
1418         list_for_each_entry(info, &domain->devices, link)
1419                 if (info->iommu == iommu && info->bus == bus &&
1420                     info->devfn == devfn) {
1421                         found = true;
1422                         break;
1423                 }
1424         spin_unlock_irqrestore(&device_domain_lock, flags);
1425
1426         if (!found || !info->dev || !dev_is_pci(info->dev))
1427                 return NULL;
1428
1429         pdev = to_pci_dev(info->dev);
1430
1431         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1432                 return NULL;
1433
1434         if (!dmar_find_matched_atsr_unit(pdev))
1435                 return NULL;
1436
1437         return info;
1438 }
1439
1440 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1441 {
1442         if (!info || !dev_is_pci(info->dev))
1443                 return;
1444
1445         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1446 }
1447
1448 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1449 {
1450         if (!info->dev || !dev_is_pci(info->dev) ||
1451             !pci_ats_enabled(to_pci_dev(info->dev)))
1452                 return;
1453
1454         pci_disable_ats(to_pci_dev(info->dev));
1455 }
1456
1457 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1458                                   u64 addr, unsigned mask)
1459 {
1460         u16 sid, qdep;
1461         unsigned long flags;
1462         struct device_domain_info *info;
1463
1464         spin_lock_irqsave(&device_domain_lock, flags);
1465         list_for_each_entry(info, &domain->devices, link) {
1466                 struct pci_dev *pdev;
1467                 if (!info->dev || !dev_is_pci(info->dev))
1468                         continue;
1469
1470                 pdev = to_pci_dev(info->dev);
1471                 if (!pci_ats_enabled(pdev))
1472                         continue;
1473
1474                 sid = info->bus << 8 | info->devfn;
1475                 qdep = pci_ats_queue_depth(pdev);
1476                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1477         }
1478         spin_unlock_irqrestore(&device_domain_lock, flags);
1479 }
1480
1481 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1482                                   struct dmar_domain *domain,
1483                                   unsigned long pfn, unsigned int pages,
1484                                   int ih, int map)
1485 {
1486         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1487         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1488         u16 did = domain->iommu_did[iommu->seq_id];
1489
1490         BUG_ON(pages == 0);
1491
1492         if (ih)
1493                 ih = 1 << 6;
1494         /*
1495          * Fallback to domain selective flush if no PSI support or the size is
1496          * too big.
1497          * PSI requires page size to be 2 ^ x, and the base address is naturally
1498          * aligned to the size
1499          */
1500         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1501                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1502                                                 DMA_TLB_DSI_FLUSH);
1503         else
1504                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1505                                                 DMA_TLB_PSI_FLUSH);
1506
1507         /*
1508          * In caching mode, changes of pages from non-present to present require
1509          * flush. However, device IOTLB doesn't need to be flushed in this case.
1510          */
1511         if (!cap_caching_mode(iommu->cap) || !map)
1512                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1513                                       addr, mask);
1514 }
1515
1516 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1517 {
1518         u32 pmen;
1519         unsigned long flags;
1520
1521         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1522         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1523         pmen &= ~DMA_PMEN_EPM;
1524         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1525
1526         /* wait for the protected region status bit to clear */
1527         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1528                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1529
1530         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1531 }
1532
1533 static void iommu_enable_translation(struct intel_iommu *iommu)
1534 {
1535         u32 sts;
1536         unsigned long flags;
1537
1538         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1539         iommu->gcmd |= DMA_GCMD_TE;
1540         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1541
1542         /* Make sure hardware complete it */
1543         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1544                       readl, (sts & DMA_GSTS_TES), sts);
1545
1546         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1547 }
1548
1549 static void iommu_disable_translation(struct intel_iommu *iommu)
1550 {
1551         u32 sts;
1552         unsigned long flag;
1553
1554         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1555         iommu->gcmd &= ~DMA_GCMD_TE;
1556         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1557
1558         /* Make sure hardware complete it */
1559         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1560                       readl, (!(sts & DMA_GSTS_TES)), sts);
1561
1562         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1563 }
1564
1565
1566 static int iommu_init_domains(struct intel_iommu *iommu)
1567 {
1568         u32 ndomains, nlongs;
1569         size_t size;
1570
1571         ndomains = cap_ndoms(iommu->cap);
1572         pr_debug("%s: Number of Domains supported <%d>\n",
1573                  iommu->name, ndomains);
1574         nlongs = BITS_TO_LONGS(ndomains);
1575
1576         spin_lock_init(&iommu->lock);
1577
1578         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1579         if (!iommu->domain_ids) {
1580                 pr_err("%s: Allocating domain id array failed\n",
1581                        iommu->name);
1582                 return -ENOMEM;
1583         }
1584
1585         size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1586         iommu->domains = kzalloc(size, GFP_KERNEL);
1587
1588         if (iommu->domains) {
1589                 size = 256 * sizeof(struct dmar_domain *);
1590                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1591         }
1592
1593         if (!iommu->domains || !iommu->domains[0]) {
1594                 pr_err("%s: Allocating domain array failed\n",
1595                        iommu->name);
1596                 kfree(iommu->domain_ids);
1597                 kfree(iommu->domains);
1598                 iommu->domain_ids = NULL;
1599                 iommu->domains    = NULL;
1600                 return -ENOMEM;
1601         }
1602
1603
1604
1605         /*
1606          * If Caching mode is set, then invalid translations are tagged
1607          * with domain-id 0, hence we need to pre-allocate it. We also
1608          * use domain-id 0 as a marker for non-allocated domain-id, so
1609          * make sure it is not used for a real domain.
1610          */
1611         set_bit(0, iommu->domain_ids);
1612
1613         return 0;
1614 }
1615
1616 static void disable_dmar_iommu(struct intel_iommu *iommu)
1617 {
1618         struct device_domain_info *info, *tmp;
1619
1620         if (!iommu->domains || !iommu->domain_ids)
1621                 return;
1622
1623         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1624                 struct dmar_domain *domain;
1625
1626                 if (info->iommu != iommu)
1627                         continue;
1628
1629                 if (!info->dev || !info->domain)
1630                         continue;
1631
1632                 domain = info->domain;
1633
1634                 dmar_remove_one_dev_info(domain, info->dev);
1635
1636                 if (!domain_type_is_vm_or_si(domain))
1637                         domain_exit(domain);
1638         }
1639
1640         if (iommu->gcmd & DMA_GCMD_TE)
1641                 iommu_disable_translation(iommu);
1642 }
1643
1644 static void free_dmar_iommu(struct intel_iommu *iommu)
1645 {
1646         if ((iommu->domains) && (iommu->domain_ids)) {
1647                 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1648                 int i;
1649
1650                 for (i = 0; i < elems; i++)
1651                         kfree(iommu->domains[i]);
1652                 kfree(iommu->domains);
1653                 kfree(iommu->domain_ids);
1654                 iommu->domains = NULL;
1655                 iommu->domain_ids = NULL;
1656         }
1657
1658         g_iommus[iommu->seq_id] = NULL;
1659
1660         /* free context mapping */
1661         free_context_table(iommu);
1662 }
1663
1664 static struct dmar_domain *alloc_domain(int flags)
1665 {
1666         struct dmar_domain *domain;
1667
1668         domain = alloc_domain_mem();
1669         if (!domain)
1670                 return NULL;
1671
1672         memset(domain, 0, sizeof(*domain));
1673         domain->nid = -1;
1674         domain->flags = flags;
1675         spin_lock_init(&domain->iommu_lock);
1676         INIT_LIST_HEAD(&domain->devices);
1677
1678         return domain;
1679 }
1680
1681 static int __iommu_attach_domain(struct dmar_domain *domain,
1682                                  struct intel_iommu *iommu)
1683 {
1684         int num;
1685         unsigned long ndomains;
1686
1687         num = domain->iommu_did[iommu->seq_id];
1688         if (num)
1689                 return num;
1690
1691         ndomains = cap_ndoms(iommu->cap);
1692         num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1693
1694         if (num < ndomains) {
1695                 set_bit(num, iommu->domain_ids);
1696                 set_iommu_domain(iommu, num, domain);
1697                 domain->iommu_did[iommu->seq_id] = num;
1698         } else {
1699                 num = -ENOSPC;
1700         }
1701
1702         if (num < 0)
1703                 pr_err("%s: No free domain ids\n", iommu->name);
1704
1705         return num;
1706 }
1707
1708 static int iommu_attach_domain(struct dmar_domain *domain,
1709                                struct intel_iommu *iommu)
1710 {
1711         int num;
1712         unsigned long flags;
1713
1714         spin_lock_irqsave(&iommu->lock, flags);
1715         num = __iommu_attach_domain(domain, iommu);
1716         spin_unlock_irqrestore(&iommu->lock, flags);
1717
1718         return num;
1719 }
1720
1721 static void iommu_detach_domain(struct dmar_domain *domain,
1722                                 struct intel_iommu *iommu)
1723 {
1724         unsigned long flags;
1725         int num;
1726
1727         spin_lock_irqsave(&iommu->lock, flags);
1728
1729         num = domain->iommu_did[iommu->seq_id];
1730
1731         if (num == 0)
1732                 return;
1733
1734         clear_bit(num, iommu->domain_ids);
1735         set_iommu_domain(iommu, num, NULL);
1736
1737         spin_unlock_irqrestore(&iommu->lock, flags);
1738 }
1739
1740 static void domain_attach_iommu(struct dmar_domain *domain,
1741                                struct intel_iommu *iommu)
1742 {
1743         unsigned long flags;
1744
1745         spin_lock_irqsave(&domain->iommu_lock, flags);
1746         domain->iommu_refcnt[iommu->seq_id] += 1;
1747         domain->iommu_count += 1;
1748         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1749                 domain->nid = iommu->node;
1750                 domain_update_iommu_cap(domain);
1751         }
1752         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1753 }
1754
1755 static int domain_detach_iommu(struct dmar_domain *domain,
1756                                struct intel_iommu *iommu)
1757 {
1758         unsigned long flags;
1759         int count = INT_MAX;
1760
1761         spin_lock_irqsave(&domain->iommu_lock, flags);
1762         domain->iommu_refcnt[iommu->seq_id] -= 1;
1763         count = --domain->iommu_count;
1764         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1765                 domain_update_iommu_cap(domain);
1766                 domain->iommu_did[iommu->seq_id] = 0;
1767         }
1768         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1769
1770         return count;
1771 }
1772
1773 static struct iova_domain reserved_iova_list;
1774 static struct lock_class_key reserved_rbtree_key;
1775
1776 static int dmar_init_reserved_ranges(void)
1777 {
1778         struct pci_dev *pdev = NULL;
1779         struct iova *iova;
1780         int i;
1781
1782         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1783                         DMA_32BIT_PFN);
1784
1785         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1786                 &reserved_rbtree_key);
1787
1788         /* IOAPIC ranges shouldn't be accessed by DMA */
1789         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1790                 IOVA_PFN(IOAPIC_RANGE_END));
1791         if (!iova) {
1792                 pr_err("Reserve IOAPIC range failed\n");
1793                 return -ENODEV;
1794         }
1795
1796         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1797         for_each_pci_dev(pdev) {
1798                 struct resource *r;
1799
1800                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1801                         r = &pdev->resource[i];
1802                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1803                                 continue;
1804                         iova = reserve_iova(&reserved_iova_list,
1805                                             IOVA_PFN(r->start),
1806                                             IOVA_PFN(r->end));
1807                         if (!iova) {
1808                                 pr_err("Reserve iova failed\n");
1809                                 return -ENODEV;
1810                         }
1811                 }
1812         }
1813         return 0;
1814 }
1815
1816 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1817 {
1818         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1819 }
1820
1821 static inline int guestwidth_to_adjustwidth(int gaw)
1822 {
1823         int agaw;
1824         int r = (gaw - 12) % 9;
1825
1826         if (r == 0)
1827                 agaw = gaw;
1828         else
1829                 agaw = gaw + 9 - r;
1830         if (agaw > 64)
1831                 agaw = 64;
1832         return agaw;
1833 }
1834
1835 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1836                        int guest_width)
1837 {
1838         int adjust_width, agaw;
1839         unsigned long sagaw;
1840
1841         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1842                         DMA_32BIT_PFN);
1843         domain_reserve_special_ranges(domain);
1844
1845         /* calculate AGAW */
1846         if (guest_width > cap_mgaw(iommu->cap))
1847                 guest_width = cap_mgaw(iommu->cap);
1848         domain->gaw = guest_width;
1849         adjust_width = guestwidth_to_adjustwidth(guest_width);
1850         agaw = width_to_agaw(adjust_width);
1851         sagaw = cap_sagaw(iommu->cap);
1852         if (!test_bit(agaw, &sagaw)) {
1853                 /* hardware doesn't support it, choose a bigger one */
1854                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1855                 agaw = find_next_bit(&sagaw, 5, agaw);
1856                 if (agaw >= 5)
1857                         return -ENODEV;
1858         }
1859         domain->agaw = agaw;
1860
1861         if (ecap_coherent(iommu->ecap))
1862                 domain->iommu_coherency = 1;
1863         else
1864                 domain->iommu_coherency = 0;
1865
1866         if (ecap_sc_support(iommu->ecap))
1867                 domain->iommu_snooping = 1;
1868         else
1869                 domain->iommu_snooping = 0;
1870
1871         if (intel_iommu_superpage)
1872                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1873         else
1874                 domain->iommu_superpage = 0;
1875
1876         domain->nid = iommu->node;
1877
1878         /* always allocate the top pgd */
1879         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1880         if (!domain->pgd)
1881                 return -ENOMEM;
1882         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1883         return 0;
1884 }
1885
1886 static void domain_exit(struct dmar_domain *domain)
1887 {
1888         struct page *freelist = NULL;
1889         int i;
1890
1891         /* Domain 0 is reserved, so dont process it */
1892         if (!domain)
1893                 return;
1894
1895         /* Flush any lazy unmaps that may reference this domain */
1896         if (!intel_iommu_strict)
1897                 flush_unmaps_timeout(0);
1898
1899         /* remove associated devices */
1900         domain_remove_dev_info(domain);
1901
1902         /* destroy iovas */
1903         put_iova_domain(&domain->iovad);
1904
1905         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1906
1907         /* clear attached or cached domains */
1908         rcu_read_lock();
1909         for_each_domain_iommu(i, domain)
1910                 iommu_detach_domain(domain, g_iommus[i]);
1911         rcu_read_unlock();
1912
1913         dma_free_pagelist(freelist);
1914
1915         free_domain_mem(domain);
1916 }
1917
1918 static int domain_context_mapping_one(struct dmar_domain *domain,
1919                                       struct intel_iommu *iommu,
1920                                       u8 bus, u8 devfn)
1921 {
1922         int translation = CONTEXT_TT_MULTI_LEVEL;
1923         struct device_domain_info *info = NULL;
1924         struct context_entry *context;
1925         unsigned long flags;
1926         struct dma_pte *pgd;
1927         int id;
1928         int agaw;
1929
1930         if (hw_pass_through && domain_type_is_si(domain))
1931                 translation = CONTEXT_TT_PASS_THROUGH;
1932
1933         pr_debug("Set context mapping for %02x:%02x.%d\n",
1934                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1935
1936         BUG_ON(!domain->pgd);
1937
1938         spin_lock_irqsave(&iommu->lock, flags);
1939         context = iommu_context_addr(iommu, bus, devfn, 1);
1940         spin_unlock_irqrestore(&iommu->lock, flags);
1941         if (!context)
1942                 return -ENOMEM;
1943         spin_lock_irqsave(&iommu->lock, flags);
1944         if (context_present(context)) {
1945                 spin_unlock_irqrestore(&iommu->lock, flags);
1946                 return 0;
1947         }
1948
1949         pgd = domain->pgd;
1950
1951         id = __iommu_attach_domain(domain, iommu);
1952         if (id < 0) {
1953                 spin_unlock_irqrestore(&iommu->lock, flags);
1954                 pr_err("%s: No free domain ids\n", iommu->name);
1955                 return -EFAULT;
1956         }
1957
1958         context_clear_entry(context);
1959         context_set_domain_id(context, id);
1960
1961         /*
1962          * Skip top levels of page tables for iommu which has less agaw
1963          * than default.  Unnecessary for PT mode.
1964          */
1965         if (translation != CONTEXT_TT_PASS_THROUGH) {
1966                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1967                         pgd = phys_to_virt(dma_pte_addr(pgd));
1968                         if (!dma_pte_present(pgd)) {
1969                                 spin_unlock_irqrestore(&iommu->lock, flags);
1970                                 return -ENOMEM;
1971                         }
1972                 }
1973
1974                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1975                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1976                                      CONTEXT_TT_MULTI_LEVEL;
1977
1978                 context_set_address_root(context, virt_to_phys(pgd));
1979                 context_set_address_width(context, iommu->agaw);
1980         } else {
1981                 /*
1982                  * In pass through mode, AW must be programmed to
1983                  * indicate the largest AGAW value supported by
1984                  * hardware. And ASR is ignored by hardware.
1985                  */
1986                 context_set_address_width(context, iommu->msagaw);
1987         }
1988
1989         context_set_translation_type(context, translation);
1990         context_set_fault_enable(context);
1991         context_set_present(context);
1992         domain_flush_cache(domain, context, sizeof(*context));
1993
1994         /*
1995          * It's a non-present to present mapping. If hardware doesn't cache
1996          * non-present entry we only need to flush the write-buffer. If the
1997          * _does_ cache non-present entries, then it does so in the special
1998          * domain #0, which we have to flush:
1999          */
2000         if (cap_caching_mode(iommu->cap)) {
2001                 iommu->flush.flush_context(iommu, 0,
2002                                            (((u16)bus) << 8) | devfn,
2003                                            DMA_CCMD_MASK_NOBIT,
2004                                            DMA_CCMD_DEVICE_INVL);
2005                 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
2006         } else {
2007                 iommu_flush_write_buffer(iommu);
2008         }
2009         iommu_enable_dev_iotlb(info);
2010         spin_unlock_irqrestore(&iommu->lock, flags);
2011
2012         domain_attach_iommu(domain, iommu);
2013
2014         return 0;
2015 }
2016
2017 struct domain_context_mapping_data {
2018         struct dmar_domain *domain;
2019         struct intel_iommu *iommu;
2020 };
2021
2022 static int domain_context_mapping_cb(struct pci_dev *pdev,
2023                                      u16 alias, void *opaque)
2024 {
2025         struct domain_context_mapping_data *data = opaque;
2026
2027         return domain_context_mapping_one(data->domain, data->iommu,
2028                                           PCI_BUS_NUM(alias), alias & 0xff);
2029 }
2030
2031 static int
2032 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2033 {
2034         struct intel_iommu *iommu;
2035         u8 bus, devfn;
2036         struct domain_context_mapping_data data;
2037
2038         iommu = device_to_iommu(dev, &bus, &devfn);
2039         if (!iommu)
2040                 return -ENODEV;
2041
2042         if (!dev_is_pci(dev))
2043                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2044
2045         data.domain = domain;
2046         data.iommu = iommu;
2047
2048         return pci_for_each_dma_alias(to_pci_dev(dev),
2049                                       &domain_context_mapping_cb, &data);
2050 }
2051
2052 static int domain_context_mapped_cb(struct pci_dev *pdev,
2053                                     u16 alias, void *opaque)
2054 {
2055         struct intel_iommu *iommu = opaque;
2056
2057         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2058 }
2059
2060 static int domain_context_mapped(struct device *dev)
2061 {
2062         struct intel_iommu *iommu;
2063         u8 bus, devfn;
2064
2065         iommu = device_to_iommu(dev, &bus, &devfn);
2066         if (!iommu)
2067                 return -ENODEV;
2068
2069         if (!dev_is_pci(dev))
2070                 return device_context_mapped(iommu, bus, devfn);
2071
2072         return !pci_for_each_dma_alias(to_pci_dev(dev),
2073                                        domain_context_mapped_cb, iommu);
2074 }
2075
2076 /* Returns a number of VTD pages, but aligned to MM page size */
2077 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2078                                             size_t size)
2079 {
2080         host_addr &= ~PAGE_MASK;
2081         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2082 }
2083
2084 /* Return largest possible superpage level for a given mapping */
2085 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2086                                           unsigned long iov_pfn,
2087                                           unsigned long phy_pfn,
2088                                           unsigned long pages)
2089 {
2090         int support, level = 1;
2091         unsigned long pfnmerge;
2092
2093         support = domain->iommu_superpage;
2094
2095         /* To use a large page, the virtual *and* physical addresses
2096            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2097            of them will mean we have to use smaller pages. So just
2098            merge them and check both at once. */
2099         pfnmerge = iov_pfn | phy_pfn;
2100
2101         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2102                 pages >>= VTD_STRIDE_SHIFT;
2103                 if (!pages)
2104                         break;
2105                 pfnmerge >>= VTD_STRIDE_SHIFT;
2106                 level++;
2107                 support--;
2108         }
2109         return level;
2110 }
2111
2112 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2113                             struct scatterlist *sg, unsigned long phys_pfn,
2114                             unsigned long nr_pages, int prot)
2115 {
2116         struct dma_pte *first_pte = NULL, *pte = NULL;
2117         phys_addr_t uninitialized_var(pteval);
2118         unsigned long sg_res = 0;
2119         unsigned int largepage_lvl = 0;
2120         unsigned long lvl_pages = 0;
2121
2122         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2123
2124         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2125                 return -EINVAL;
2126
2127         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2128
2129         if (!sg) {
2130                 sg_res = nr_pages;
2131                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2132         }
2133
2134         while (nr_pages > 0) {
2135                 uint64_t tmp;
2136
2137                 if (!sg_res) {
2138                         sg_res = aligned_nrpages(sg->offset, sg->length);
2139                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2140                         sg->dma_length = sg->length;
2141                         pteval = page_to_phys(sg_page(sg)) | prot;
2142                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2143                 }
2144
2145                 if (!pte) {
2146                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2147
2148                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2149                         if (!pte)
2150                                 return -ENOMEM;
2151                         /* It is large page*/
2152                         if (largepage_lvl > 1) {
2153                                 pteval |= DMA_PTE_LARGE_PAGE;
2154                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2155                                 /*
2156                                  * Ensure that old small page tables are
2157                                  * removed to make room for superpage,
2158                                  * if they exist.
2159                                  */
2160                                 dma_pte_free_pagetable(domain, iov_pfn,
2161                                                        iov_pfn + lvl_pages - 1);
2162                         } else {
2163                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2164                         }
2165
2166                 }
2167                 /* We don't need lock here, nobody else
2168                  * touches the iova range
2169                  */
2170                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2171                 if (tmp) {
2172                         static int dumps = 5;
2173                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2174                                 iov_pfn, tmp, (unsigned long long)pteval);
2175                         if (dumps) {
2176                                 dumps--;
2177                                 debug_dma_dump_mappings(NULL);
2178                         }
2179                         WARN_ON(1);
2180                 }
2181
2182                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2183
2184                 BUG_ON(nr_pages < lvl_pages);
2185                 BUG_ON(sg_res < lvl_pages);
2186
2187                 nr_pages -= lvl_pages;
2188                 iov_pfn += lvl_pages;
2189                 phys_pfn += lvl_pages;
2190                 pteval += lvl_pages * VTD_PAGE_SIZE;
2191                 sg_res -= lvl_pages;
2192
2193                 /* If the next PTE would be the first in a new page, then we
2194                    need to flush the cache on the entries we've just written.
2195                    And then we'll need to recalculate 'pte', so clear it and
2196                    let it get set again in the if (!pte) block above.
2197
2198                    If we're done (!nr_pages) we need to flush the cache too.
2199
2200                    Also if we've been setting superpages, we may need to
2201                    recalculate 'pte' and switch back to smaller pages for the
2202                    end of the mapping, if the trailing size is not enough to
2203                    use another superpage (i.e. sg_res < lvl_pages). */
2204                 pte++;
2205                 if (!nr_pages || first_pte_in_page(pte) ||
2206                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2207                         domain_flush_cache(domain, first_pte,
2208                                            (void *)pte - (void *)first_pte);
2209                         pte = NULL;
2210                 }
2211
2212                 if (!sg_res && nr_pages)
2213                         sg = sg_next(sg);
2214         }
2215         return 0;
2216 }
2217
2218 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2219                                     struct scatterlist *sg, unsigned long nr_pages,
2220                                     int prot)
2221 {
2222         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2223 }
2224
2225 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2226                                      unsigned long phys_pfn, unsigned long nr_pages,
2227                                      int prot)
2228 {
2229         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2230 }
2231
2232 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2233 {
2234         if (!iommu)
2235                 return;
2236
2237         clear_context_table(iommu, bus, devfn);
2238         iommu->flush.flush_context(iommu, 0, 0, 0,
2239                                            DMA_CCMD_GLOBAL_INVL);
2240         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2241 }
2242
2243 static inline void unlink_domain_info(struct device_domain_info *info)
2244 {
2245         assert_spin_locked(&device_domain_lock);
2246         list_del(&info->link);
2247         list_del(&info->global);
2248         if (info->dev)
2249                 info->dev->archdata.iommu = NULL;
2250 }
2251
2252 static void domain_remove_dev_info(struct dmar_domain *domain)
2253 {
2254         struct device_domain_info *info, *tmp;
2255
2256         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2257                 dmar_remove_one_dev_info(domain, info->dev);
2258 }
2259
2260 /*
2261  * find_domain
2262  * Note: we use struct device->archdata.iommu stores the info
2263  */
2264 static struct dmar_domain *find_domain(struct device *dev)
2265 {
2266         struct device_domain_info *info;
2267
2268         /* No lock here, assumes no domain exit in normal case */
2269         info = dev->archdata.iommu;
2270         if (info)
2271                 return info->domain;
2272         return NULL;
2273 }
2274
2275 static inline struct device_domain_info *
2276 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2277 {
2278         struct device_domain_info *info;
2279
2280         list_for_each_entry(info, &device_domain_list, global)
2281                 if (info->iommu->segment == segment && info->bus == bus &&
2282                     info->devfn == devfn)
2283                         return info;
2284
2285         return NULL;
2286 }
2287
2288 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2289                                                     int bus, int devfn,
2290                                                     struct device *dev,
2291                                                     struct dmar_domain *domain)
2292 {
2293         struct dmar_domain *found = NULL;
2294         struct device_domain_info *info;
2295         unsigned long flags;
2296
2297         info = alloc_devinfo_mem();
2298         if (!info)
2299                 return NULL;
2300
2301         info->bus = bus;
2302         info->devfn = devfn;
2303         info->dev = dev;
2304         info->domain = domain;
2305         info->iommu = iommu;
2306
2307         spin_lock_irqsave(&device_domain_lock, flags);
2308         if (dev)
2309                 found = find_domain(dev);
2310         else {
2311                 struct device_domain_info *info2;
2312                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2313                 if (info2)
2314                         found = info2->domain;
2315         }
2316         if (found) {
2317                 spin_unlock_irqrestore(&device_domain_lock, flags);
2318                 free_devinfo_mem(info);
2319                 /* Caller must free the original domain */
2320                 return found;
2321         }
2322
2323         list_add(&info->link, &domain->devices);
2324         list_add(&info->global, &device_domain_list);
2325         if (dev)
2326                 dev->archdata.iommu = info;
2327         spin_unlock_irqrestore(&device_domain_lock, flags);
2328
2329         if (dev && domain_context_mapping(domain, dev)) {
2330                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2331                 dmar_remove_one_dev_info(domain, dev);
2332                 return NULL;
2333         }
2334
2335         return domain;
2336 }
2337
2338 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2339 {
2340         *(u16 *)opaque = alias;
2341         return 0;
2342 }
2343
2344 /* domain is initialized */
2345 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2346 {
2347         struct device_domain_info *info = NULL;
2348         struct dmar_domain *domain, *tmp;
2349         struct intel_iommu *iommu;
2350         unsigned long flags;
2351         u16 dma_alias;
2352         u8 bus, devfn;
2353
2354         domain = find_domain(dev);
2355         if (domain)
2356                 return domain;
2357
2358         iommu = device_to_iommu(dev, &bus, &devfn);
2359         if (!iommu)
2360                 return NULL;
2361
2362         if (dev_is_pci(dev)) {
2363                 struct pci_dev *pdev = to_pci_dev(dev);
2364
2365                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2366
2367                 spin_lock_irqsave(&device_domain_lock, flags);
2368                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2369                                                       PCI_BUS_NUM(dma_alias),
2370                                                       dma_alias & 0xff);
2371                 if (info) {
2372                         iommu = info->iommu;
2373                         domain = info->domain;
2374                 }
2375                 spin_unlock_irqrestore(&device_domain_lock, flags);
2376
2377                 /* DMA alias already has a domain, uses it */
2378                 if (info)
2379                         goto found_domain;
2380         }
2381
2382         /* Allocate and initialize new domain for the device */
2383         domain = alloc_domain(0);
2384         if (!domain)
2385                 return NULL;
2386         if (iommu_attach_domain(domain, iommu) < 0) {
2387                 free_domain_mem(domain);
2388                 return NULL;
2389         }
2390         domain_attach_iommu(domain, iommu);
2391         if (domain_init(domain, iommu, gaw)) {
2392                 domain_exit(domain);
2393                 return NULL;
2394         }
2395
2396         /* register PCI DMA alias device */
2397         if (dev_is_pci(dev)) {
2398                 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2399                                                dma_alias & 0xff, NULL, domain);
2400
2401                 if (!tmp || tmp != domain) {
2402                         domain_exit(domain);
2403                         domain = tmp;
2404                 }
2405
2406                 if (!domain)
2407                         return NULL;
2408         }
2409
2410 found_domain:
2411         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2412
2413         if (!tmp || tmp != domain) {
2414                 domain_exit(domain);
2415                 domain = tmp;
2416         }
2417
2418         return domain;
2419 }
2420
2421 static int iommu_identity_mapping;
2422 #define IDENTMAP_ALL            1
2423 #define IDENTMAP_GFX            2
2424 #define IDENTMAP_AZALIA         4
2425
2426 static int iommu_domain_identity_map(struct dmar_domain *domain,
2427                                      unsigned long long start,
2428                                      unsigned long long end)
2429 {
2430         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2431         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2432
2433         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2434                           dma_to_mm_pfn(last_vpfn))) {
2435                 pr_err("Reserving iova failed\n");
2436                 return -ENOMEM;
2437         }
2438
2439         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2440         /*
2441          * RMRR range might have overlap with physical memory range,
2442          * clear it first
2443          */
2444         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2445
2446         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2447                                   last_vpfn - first_vpfn + 1,
2448                                   DMA_PTE_READ|DMA_PTE_WRITE);
2449 }
2450
2451 static int iommu_prepare_identity_map(struct device *dev,
2452                                       unsigned long long start,
2453                                       unsigned long long end)
2454 {
2455         struct dmar_domain *domain;
2456         int ret;
2457
2458         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2459         if (!domain)
2460                 return -ENOMEM;
2461
2462         /* For _hardware_ passthrough, don't bother. But for software
2463            passthrough, we do it anyway -- it may indicate a memory
2464            range which is reserved in E820, so which didn't get set
2465            up to start with in si_domain */
2466         if (domain == si_domain && hw_pass_through) {
2467                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2468                         dev_name(dev), start, end);
2469                 return 0;
2470         }
2471
2472         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2473                 dev_name(dev), start, end);
2474
2475         if (end < start) {
2476                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2477                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2478                         dmi_get_system_info(DMI_BIOS_VENDOR),
2479                         dmi_get_system_info(DMI_BIOS_VERSION),
2480                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2481                 ret = -EIO;
2482                 goto error;
2483         }
2484
2485         if (end >> agaw_to_width(domain->agaw)) {
2486                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2487                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2488                      agaw_to_width(domain->agaw),
2489                      dmi_get_system_info(DMI_BIOS_VENDOR),
2490                      dmi_get_system_info(DMI_BIOS_VERSION),
2491                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2492                 ret = -EIO;
2493                 goto error;
2494         }
2495
2496         ret = iommu_domain_identity_map(domain, start, end);
2497         if (ret)
2498                 goto error;
2499
2500         return 0;
2501
2502  error:
2503         domain_exit(domain);
2504         return ret;
2505 }
2506
2507 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2508                                          struct device *dev)
2509 {
2510         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2511                 return 0;
2512         return iommu_prepare_identity_map(dev, rmrr->base_address,
2513                                           rmrr->end_address);
2514 }
2515
2516 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2517 static inline void iommu_prepare_isa(void)
2518 {
2519         struct pci_dev *pdev;
2520         int ret;
2521
2522         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2523         if (!pdev)
2524                 return;
2525
2526         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2527         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2528
2529         if (ret)
2530                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2531
2532         pci_dev_put(pdev);
2533 }
2534 #else
2535 static inline void iommu_prepare_isa(void)
2536 {
2537         return;
2538 }
2539 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2540
2541 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2542
2543 static int __init si_domain_init(int hw)
2544 {
2545         int nid, ret = 0;
2546
2547         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2548         if (!si_domain)
2549                 return -EFAULT;
2550
2551         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2552                 domain_exit(si_domain);
2553                 return -EFAULT;
2554         }
2555
2556         pr_debug("Identity mapping domain allocated\n");
2557
2558         if (hw)
2559                 return 0;
2560
2561         for_each_online_node(nid) {
2562                 unsigned long start_pfn, end_pfn;
2563                 int i;
2564
2565                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2566                         ret = iommu_domain_identity_map(si_domain,
2567                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2568                         if (ret)
2569                                 return ret;
2570                 }
2571         }
2572
2573         return 0;
2574 }
2575
2576 static int identity_mapping(struct device *dev)
2577 {
2578         struct device_domain_info *info;
2579
2580         if (likely(!iommu_identity_mapping))
2581                 return 0;
2582
2583         info = dev->archdata.iommu;
2584         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2585                 return (info->domain == si_domain);
2586
2587         return 0;
2588 }
2589
2590 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2591 {
2592         struct dmar_domain *ndomain;
2593         struct intel_iommu *iommu;
2594         u8 bus, devfn;
2595
2596         iommu = device_to_iommu(dev, &bus, &devfn);
2597         if (!iommu)
2598                 return -ENODEV;
2599
2600         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2601         if (ndomain != domain)
2602                 return -EBUSY;
2603
2604         return 0;
2605 }
2606
2607 static bool device_has_rmrr(struct device *dev)
2608 {
2609         struct dmar_rmrr_unit *rmrr;
2610         struct device *tmp;
2611         int i;
2612
2613         rcu_read_lock();
2614         for_each_rmrr_units(rmrr) {
2615                 /*
2616                  * Return TRUE if this RMRR contains the device that
2617                  * is passed in.
2618                  */
2619                 for_each_active_dev_scope(rmrr->devices,
2620                                           rmrr->devices_cnt, i, tmp)
2621                         if (tmp == dev) {
2622                                 rcu_read_unlock();
2623                                 return true;
2624                         }
2625         }
2626         rcu_read_unlock();
2627         return false;
2628 }
2629
2630 /*
2631  * There are a couple cases where we need to restrict the functionality of
2632  * devices associated with RMRRs.  The first is when evaluating a device for
2633  * identity mapping because problems exist when devices are moved in and out
2634  * of domains and their respective RMRR information is lost.  This means that
2635  * a device with associated RMRRs will never be in a "passthrough" domain.
2636  * The second is use of the device through the IOMMU API.  This interface
2637  * expects to have full control of the IOVA space for the device.  We cannot
2638  * satisfy both the requirement that RMRR access is maintained and have an
2639  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2640  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2641  * We therefore prevent devices associated with an RMRR from participating in
2642  * the IOMMU API, which eliminates them from device assignment.
2643  *
2644  * In both cases we assume that PCI USB devices with RMRRs have them largely
2645  * for historical reasons and that the RMRR space is not actively used post
2646  * boot.  This exclusion may change if vendors begin to abuse it.
2647  *
2648  * The same exception is made for graphics devices, with the requirement that
2649  * any use of the RMRR regions will be torn down before assigning the device
2650  * to a guest.
2651  */
2652 static bool device_is_rmrr_locked(struct device *dev)
2653 {
2654         if (!device_has_rmrr(dev))
2655                 return false;
2656
2657         if (dev_is_pci(dev)) {
2658                 struct pci_dev *pdev = to_pci_dev(dev);
2659
2660                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2661                         return false;
2662         }
2663
2664         return true;
2665 }
2666
2667 static int iommu_should_identity_map(struct device *dev, int startup)
2668 {
2669
2670         if (dev_is_pci(dev)) {
2671                 struct pci_dev *pdev = to_pci_dev(dev);
2672
2673                 if (device_is_rmrr_locked(dev))
2674                         return 0;
2675
2676                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2677                         return 1;
2678
2679                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2680                         return 1;
2681
2682                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2683                         return 0;
2684
2685                 /*
2686                  * We want to start off with all devices in the 1:1 domain, and
2687                  * take them out later if we find they can't access all of memory.
2688                  *
2689                  * However, we can't do this for PCI devices behind bridges,
2690                  * because all PCI devices behind the same bridge will end up
2691                  * with the same source-id on their transactions.
2692                  *
2693                  * Practically speaking, we can't change things around for these
2694                  * devices at run-time, because we can't be sure there'll be no
2695                  * DMA transactions in flight for any of their siblings.
2696                  *
2697                  * So PCI devices (unless they're on the root bus) as well as
2698                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2699                  * the 1:1 domain, just in _case_ one of their siblings turns out
2700                  * not to be able to map all of memory.
2701                  */
2702                 if (!pci_is_pcie(pdev)) {
2703                         if (!pci_is_root_bus(pdev->bus))
2704                                 return 0;
2705                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2706                                 return 0;
2707                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2708                         return 0;
2709         } else {
2710                 if (device_has_rmrr(dev))
2711                         return 0;
2712         }
2713
2714         /*
2715          * At boot time, we don't yet know if devices will be 64-bit capable.
2716          * Assume that they will — if they turn out not to be, then we can
2717          * take them out of the 1:1 domain later.
2718          */
2719         if (!startup) {
2720                 /*
2721                  * If the device's dma_mask is less than the system's memory
2722                  * size then this is not a candidate for identity mapping.
2723                  */
2724                 u64 dma_mask = *dev->dma_mask;
2725
2726                 if (dev->coherent_dma_mask &&
2727                     dev->coherent_dma_mask < dma_mask)
2728                         dma_mask = dev->coherent_dma_mask;
2729
2730                 return dma_mask >= dma_get_required_mask(dev);
2731         }
2732
2733         return 1;
2734 }
2735
2736 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2737 {
2738         int ret;
2739
2740         if (!iommu_should_identity_map(dev, 1))
2741                 return 0;
2742
2743         ret = domain_add_dev_info(si_domain, dev);
2744         if (!ret)
2745                 pr_info("%s identity mapping for device %s\n",
2746                         hw ? "Hardware" : "Software", dev_name(dev));
2747         else if (ret == -ENODEV)
2748                 /* device not associated with an iommu */
2749                 ret = 0;
2750
2751         return ret;
2752 }
2753
2754
2755 static int __init iommu_prepare_static_identity_mapping(int hw)
2756 {
2757         struct pci_dev *pdev = NULL;
2758         struct dmar_drhd_unit *drhd;
2759         struct intel_iommu *iommu;
2760         struct device *dev;
2761         int i;
2762         int ret = 0;
2763
2764         for_each_pci_dev(pdev) {
2765                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2766                 if (ret)
2767                         return ret;
2768         }
2769
2770         for_each_active_iommu(iommu, drhd)
2771                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2772                         struct acpi_device_physical_node *pn;
2773                         struct acpi_device *adev;
2774
2775                         if (dev->bus != &acpi_bus_type)
2776                                 continue;
2777
2778                         adev= to_acpi_device(dev);
2779                         mutex_lock(&adev->physical_node_lock);
2780                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2781                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2782                                 if (ret)
2783                                         break;
2784                         }
2785                         mutex_unlock(&adev->physical_node_lock);
2786                         if (ret)
2787                                 return ret;
2788                 }
2789
2790         return 0;
2791 }
2792
2793 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2794 {
2795         /*
2796          * Start from the sane iommu hardware state.
2797          * If the queued invalidation is already initialized by us
2798          * (for example, while enabling interrupt-remapping) then
2799          * we got the things already rolling from a sane state.
2800          */
2801         if (!iommu->qi) {
2802                 /*
2803                  * Clear any previous faults.
2804                  */
2805                 dmar_fault(-1, iommu);
2806                 /*
2807                  * Disable queued invalidation if supported and already enabled
2808                  * before OS handover.
2809                  */
2810                 dmar_disable_qi(iommu);
2811         }
2812
2813         if (dmar_enable_qi(iommu)) {
2814                 /*
2815                  * Queued Invalidate not enabled, use Register Based Invalidate
2816                  */
2817                 iommu->flush.flush_context = __iommu_flush_context;
2818                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2819                 pr_info("%s: Using Register based invalidation\n",
2820                         iommu->name);
2821         } else {
2822                 iommu->flush.flush_context = qi_flush_context;
2823                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2824                 pr_info("%s: Using Queued invalidation\n", iommu->name);
2825         }
2826 }
2827
2828 static int copy_context_table(struct intel_iommu *iommu,
2829                               struct root_entry *old_re,
2830                               struct context_entry **tbl,
2831                               int bus, bool ext)
2832 {
2833         struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2834         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2835         phys_addr_t old_ce_phys;
2836
2837         tbl_idx = ext ? bus * 2 : bus;
2838
2839         for (devfn = 0; devfn < 256; devfn++) {
2840                 /* First calculate the correct index */
2841                 idx = (ext ? devfn * 2 : devfn) % 256;
2842
2843                 if (idx == 0) {
2844                         /* First save what we may have and clean up */
2845                         if (new_ce) {
2846                                 tbl[tbl_idx] = new_ce;
2847                                 __iommu_flush_cache(iommu, new_ce,
2848                                                     VTD_PAGE_SIZE);
2849                                 pos = 1;
2850                         }
2851
2852                         if (old_ce)
2853                                 iounmap(old_ce);
2854
2855                         ret = 0;
2856                         if (devfn < 0x80)
2857                                 old_ce_phys = root_entry_lctp(old_re);
2858                         else
2859                                 old_ce_phys = root_entry_uctp(old_re);
2860
2861                         if (!old_ce_phys) {
2862                                 if (ext && devfn == 0) {
2863                                         /* No LCTP, try UCTP */
2864                                         devfn = 0x7f;
2865                                         continue;
2866                                 } else {
2867                                         goto out;
2868                                 }
2869                         }
2870
2871                         ret = -ENOMEM;
2872                         old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2873                         if (!old_ce)
2874                                 goto out;
2875
2876                         new_ce = alloc_pgtable_page(iommu->node);
2877                         if (!new_ce)
2878                                 goto out_unmap;
2879
2880                         ret = 0;
2881                 }
2882
2883                 /* Now copy the context entry */
2884                 ce = old_ce[idx];
2885
2886                 if (!__context_present(&ce))
2887                         continue;
2888
2889                 did = context_domain_id(&ce);
2890                 if (did >= 0 && did < cap_ndoms(iommu->cap))
2891                         set_bit(did, iommu->domain_ids);
2892
2893                 /*
2894                  * We need a marker for copied context entries. This
2895                  * marker needs to work for the old format as well as
2896                  * for extended context entries.
2897                  *
2898                  * Bit 67 of the context entry is used. In the old
2899                  * format this bit is available to software, in the
2900                  * extended format it is the PGE bit, but PGE is ignored
2901                  * by HW if PASIDs are disabled (and thus still
2902                  * available).
2903                  *
2904                  * So disable PASIDs first and then mark the entry
2905                  * copied. This means that we don't copy PASID
2906                  * translations from the old kernel, but this is fine as
2907                  * faults there are not fatal.
2908                  */
2909                 context_clear_pasid_enable(&ce);
2910                 context_set_copied(&ce);
2911
2912                 new_ce[idx] = ce;
2913         }
2914
2915         tbl[tbl_idx + pos] = new_ce;
2916
2917         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2918
2919 out_unmap:
2920         iounmap(old_ce);
2921
2922 out:
2923         return ret;
2924 }
2925
2926 static int copy_translation_tables(struct intel_iommu *iommu)
2927 {
2928         struct context_entry **ctxt_tbls;
2929         struct root_entry *old_rt;
2930         phys_addr_t old_rt_phys;
2931         int ctxt_table_entries;
2932         unsigned long flags;
2933         u64 rtaddr_reg;
2934         int bus, ret;
2935         bool new_ext, ext;
2936
2937         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2938         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
2939         new_ext    = !!ecap_ecs(iommu->ecap);
2940
2941         /*
2942          * The RTT bit can only be changed when translation is disabled,
2943          * but disabling translation means to open a window for data
2944          * corruption. So bail out and don't copy anything if we would
2945          * have to change the bit.
2946          */
2947         if (new_ext != ext)
2948                 return -EINVAL;
2949
2950         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2951         if (!old_rt_phys)
2952                 return -EINVAL;
2953
2954         old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2955         if (!old_rt)
2956                 return -ENOMEM;
2957
2958         /* This is too big for the stack - allocate it from slab */
2959         ctxt_table_entries = ext ? 512 : 256;
2960         ret = -ENOMEM;
2961         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2962         if (!ctxt_tbls)
2963                 goto out_unmap;
2964
2965         for (bus = 0; bus < 256; bus++) {
2966                 ret = copy_context_table(iommu, &old_rt[bus],
2967                                          ctxt_tbls, bus, ext);
2968                 if (ret) {
2969                         pr_err("%s: Failed to copy context table for bus %d\n",
2970                                 iommu->name, bus);
2971                         continue;
2972                 }
2973         }
2974
2975         spin_lock_irqsave(&iommu->lock, flags);
2976
2977         /* Context tables are copied, now write them to the root_entry table */
2978         for (bus = 0; bus < 256; bus++) {
2979                 int idx = ext ? bus * 2 : bus;
2980                 u64 val;
2981
2982                 if (ctxt_tbls[idx]) {
2983                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
2984                         iommu->root_entry[bus].lo = val;
2985                 }
2986
2987                 if (!ext || !ctxt_tbls[idx + 1])
2988                         continue;
2989
2990                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2991                 iommu->root_entry[bus].hi = val;
2992         }
2993
2994         spin_unlock_irqrestore(&iommu->lock, flags);
2995
2996         kfree(ctxt_tbls);
2997
2998         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2999
3000         ret = 0;
3001
3002 out_unmap:
3003         iounmap(old_rt);
3004
3005         return ret;
3006 }
3007
3008 static int __init init_dmars(void)
3009 {
3010         struct dmar_drhd_unit *drhd;
3011         struct dmar_rmrr_unit *rmrr;
3012         bool copied_tables = false;
3013         struct device *dev;
3014         struct intel_iommu *iommu;
3015         int i, ret;
3016
3017         /*
3018          * for each drhd
3019          *    allocate root
3020          *    initialize and program root entry to not present
3021          * endfor
3022          */
3023         for_each_drhd_unit(drhd) {
3024                 /*
3025                  * lock not needed as this is only incremented in the single
3026                  * threaded kernel __init code path all other access are read
3027                  * only
3028                  */
3029                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3030                         g_num_of_iommus++;
3031                         continue;
3032                 }
3033                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3034         }
3035
3036         /* Preallocate enough resources for IOMMU hot-addition */
3037         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3038                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3039
3040         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3041                         GFP_KERNEL);
3042         if (!g_iommus) {
3043                 pr_err("Allocating global iommu array failed\n");
3044                 ret = -ENOMEM;
3045                 goto error;
3046         }
3047
3048         deferred_flush = kzalloc(g_num_of_iommus *
3049                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3050         if (!deferred_flush) {
3051                 ret = -ENOMEM;
3052                 goto free_g_iommus;
3053         }
3054
3055         for_each_active_iommu(iommu, drhd) {
3056                 g_iommus[iommu->seq_id] = iommu;
3057
3058                 intel_iommu_init_qi(iommu);
3059
3060                 ret = iommu_init_domains(iommu);
3061                 if (ret)
3062                         goto free_iommu;
3063
3064                 init_translation_status(iommu);
3065
3066                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3067                         iommu_disable_translation(iommu);
3068                         clear_translation_pre_enabled(iommu);
3069                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3070                                 iommu->name);
3071                 }
3072
3073                 /*
3074                  * TBD:
3075                  * we could share the same root & context tables
3076                  * among all IOMMU's. Need to Split it later.
3077                  */
3078                 ret = iommu_alloc_root_entry(iommu);
3079                 if (ret)
3080                         goto free_iommu;
3081
3082                 if (translation_pre_enabled(iommu)) {
3083                         pr_info("Translation already enabled - trying to copy translation structures\n");
3084
3085                         ret = copy_translation_tables(iommu);
3086                         if (ret) {
3087                                 /*
3088                                  * We found the IOMMU with translation
3089                                  * enabled - but failed to copy over the
3090                                  * old root-entry table. Try to proceed
3091                                  * by disabling translation now and
3092                                  * allocating a clean root-entry table.
3093                                  * This might cause DMAR faults, but
3094                                  * probably the dump will still succeed.
3095                                  */
3096                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3097                                        iommu->name);
3098                                 iommu_disable_translation(iommu);
3099                                 clear_translation_pre_enabled(iommu);
3100                         } else {
3101                                 pr_info("Copied translation tables from previous kernel for %s\n",
3102                                         iommu->name);
3103                                 copied_tables = true;
3104                         }
3105                 }
3106
3107                 iommu_flush_write_buffer(iommu);
3108                 iommu_set_root_entry(iommu);
3109                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3110                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3111
3112                 if (!ecap_pass_through(iommu->ecap))
3113                         hw_pass_through = 0;
3114         }
3115
3116         if (iommu_pass_through)
3117                 iommu_identity_mapping |= IDENTMAP_ALL;
3118
3119 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3120         iommu_identity_mapping |= IDENTMAP_GFX;
3121 #endif
3122
3123         if (iommu_identity_mapping) {
3124                 ret = si_domain_init(hw_pass_through);
3125                 if (ret)
3126                         goto free_iommu;
3127         }
3128
3129         check_tylersburg_isoch();
3130
3131         /*
3132          * If we copied translations from a previous kernel in the kdump
3133          * case, we can not assign the devices to domains now, as that
3134          * would eliminate the old mappings. So skip this part and defer
3135          * the assignment to device driver initialization time.
3136          */
3137         if (copied_tables)
3138                 goto domains_done;
3139
3140         /*
3141          * If pass through is not set or not enabled, setup context entries for
3142          * identity mappings for rmrr, gfx, and isa and may fall back to static
3143          * identity mapping if iommu_identity_mapping is set.
3144          */
3145         if (iommu_identity_mapping) {
3146                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3147                 if (ret) {
3148                         pr_crit("Failed to setup IOMMU pass-through\n");
3149                         goto free_iommu;
3150                 }
3151         }
3152         /*
3153          * For each rmrr
3154          *   for each dev attached to rmrr
3155          *   do
3156          *     locate drhd for dev, alloc domain for dev
3157          *     allocate free domain
3158          *     allocate page table entries for rmrr
3159          *     if context not allocated for bus
3160          *           allocate and init context
3161          *           set present in root table for this bus
3162          *     init context with domain, translation etc
3163          *    endfor
3164          * endfor
3165          */
3166         pr_info("Setting RMRR:\n");
3167         for_each_rmrr_units(rmrr) {
3168                 /* some BIOS lists non-exist devices in DMAR table. */
3169                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3170                                           i, dev) {
3171                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3172                         if (ret)
3173                                 pr_err("Mapping reserved region failed\n");
3174                 }
3175         }
3176
3177         iommu_prepare_isa();
3178
3179 domains_done:
3180
3181         /*
3182          * for each drhd
3183          *   enable fault log
3184          *   global invalidate context cache
3185          *   global invalidate iotlb
3186          *   enable translation
3187          */
3188         for_each_iommu(iommu, drhd) {
3189                 if (drhd->ignored) {
3190                         /*
3191                          * we always have to disable PMRs or DMA may fail on
3192                          * this device
3193                          */
3194                         if (force_on)
3195                                 iommu_disable_protect_mem_regions(iommu);
3196                         continue;
3197                 }
3198
3199                 iommu_flush_write_buffer(iommu);
3200
3201                 ret = dmar_set_interrupt(iommu);
3202                 if (ret)
3203                         goto free_iommu;
3204
3205                 if (!translation_pre_enabled(iommu))
3206                         iommu_enable_translation(iommu);
3207
3208                 iommu_disable_protect_mem_regions(iommu);
3209         }
3210
3211         return 0;
3212
3213 free_iommu:
3214         for_each_active_iommu(iommu, drhd) {
3215                 disable_dmar_iommu(iommu);
3216                 free_dmar_iommu(iommu);
3217         }
3218         kfree(deferred_flush);
3219 free_g_iommus:
3220         kfree(g_iommus);
3221 error:
3222         return ret;
3223 }
3224
3225 /* This takes a number of _MM_ pages, not VTD pages */
3226 static struct iova *intel_alloc_iova(struct device *dev,
3227                                      struct dmar_domain *domain,
3228                                      unsigned long nrpages, uint64_t dma_mask)
3229 {
3230         struct iova *iova = NULL;
3231
3232         /* Restrict dma_mask to the width that the iommu can handle */
3233         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3234
3235         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3236                 /*
3237                  * First try to allocate an io virtual address in
3238                  * DMA_BIT_MASK(32) and if that fails then try allocating
3239                  * from higher range
3240                  */
3241                 iova = alloc_iova(&domain->iovad, nrpages,
3242                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
3243                 if (iova)
3244                         return iova;
3245         }
3246         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3247         if (unlikely(!iova)) {
3248                 pr_err("Allocating %ld-page iova for %s failed",
3249                        nrpages, dev_name(dev));
3250                 return NULL;
3251         }
3252
3253         return iova;
3254 }
3255
3256 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3257 {
3258         struct dmar_domain *domain;
3259
3260         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3261         if (!domain) {
3262                 pr_err("Allocating domain for %s failed\n",
3263                        dev_name(dev));
3264                 return NULL;
3265         }
3266
3267         return domain;
3268 }
3269
3270 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3271 {
3272         struct device_domain_info *info;
3273
3274         /* No lock here, assumes no domain exit in normal case */
3275         info = dev->archdata.iommu;
3276         if (likely(info))
3277                 return info->domain;
3278
3279         return __get_valid_domain_for_dev(dev);
3280 }
3281
3282 /* Check if the dev needs to go through non-identity map and unmap process.*/
3283 static int iommu_no_mapping(struct device *dev)
3284 {
3285         int found;
3286
3287         if (iommu_dummy(dev))
3288                 return 1;
3289
3290         if (!iommu_identity_mapping)
3291                 return 0;
3292
3293         found = identity_mapping(dev);
3294         if (found) {
3295                 if (iommu_should_identity_map(dev, 0))
3296                         return 1;
3297                 else {
3298                         /*
3299                          * 32 bit DMA is removed from si_domain and fall back
3300                          * to non-identity mapping.
3301                          */
3302                         dmar_remove_one_dev_info(si_domain, dev);
3303                         pr_info("32bit %s uses non-identity mapping\n",
3304                                 dev_name(dev));
3305                         return 0;
3306                 }
3307         } else {
3308                 /*
3309                  * In case of a detached 64 bit DMA device from vm, the device
3310                  * is put into si_domain for identity mapping.
3311                  */
3312                 if (iommu_should_identity_map(dev, 0)) {
3313                         int ret;
3314                         ret = domain_add_dev_info(si_domain, dev);
3315                         if (!ret) {
3316                                 pr_info("64bit %s uses identity mapping\n",
3317                                         dev_name(dev));
3318                                 return 1;
3319                         }
3320                 }
3321         }
3322
3323         return 0;
3324 }
3325
3326 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3327                                      size_t size, int dir, u64 dma_mask)
3328 {
3329         struct dmar_domain *domain;
3330         phys_addr_t start_paddr;
3331         struct iova *iova;
3332         int prot = 0;
3333         int ret;
3334         struct intel_iommu *iommu;
3335         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3336
3337         BUG_ON(dir == DMA_NONE);
3338
3339         if (iommu_no_mapping(dev))
3340                 return paddr;
3341
3342         domain = get_valid_domain_for_dev(dev);
3343         if (!domain)
3344                 return 0;
3345
3346         iommu = domain_get_iommu(domain);
3347         size = aligned_nrpages(paddr, size);
3348
3349         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3350         if (!iova)
3351                 goto error;
3352
3353         /*
3354          * Check if DMAR supports zero-length reads on write only
3355          * mappings..
3356          */
3357         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3358                         !cap_zlr(iommu->cap))
3359                 prot |= DMA_PTE_READ;
3360         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3361                 prot |= DMA_PTE_WRITE;
3362         /*
3363          * paddr - (paddr + size) might be partial page, we should map the whole
3364          * page.  Note: if two part of one page are separately mapped, we
3365          * might have two guest_addr mapping to the same host paddr, but this
3366          * is not a big problem
3367          */
3368         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3369                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3370         if (ret)
3371                 goto error;
3372
3373         /* it's a non-present to present mapping. Only flush if caching mode */
3374         if (cap_caching_mode(iommu->cap))
3375                 iommu_flush_iotlb_psi(iommu, domain,
3376                                       mm_to_dma_pfn(iova->pfn_lo),
3377                                       size, 0, 1);
3378         else
3379                 iommu_flush_write_buffer(iommu);
3380
3381         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3382         start_paddr += paddr & ~PAGE_MASK;
3383         return start_paddr;
3384
3385 error:
3386         if (iova)
3387                 __free_iova(&domain->iovad, iova);
3388         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3389                 dev_name(dev), size, (unsigned long long)paddr, dir);
3390         return 0;
3391 }
3392
3393 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3394                                  unsigned long offset, size_t size,
3395                                  enum dma_data_direction dir,
3396                                  struct dma_attrs *attrs)
3397 {
3398         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3399                                   dir, *dev->dma_mask);
3400 }
3401
3402 static void flush_unmaps(void)
3403 {
3404         int i, j;
3405
3406         timer_on = 0;
3407
3408         /* just flush them all */
3409         for (i = 0; i < g_num_of_iommus; i++) {
3410                 struct intel_iommu *iommu = g_iommus[i];
3411                 if (!iommu)
3412                         continue;
3413
3414                 if (!deferred_flush[i].next)
3415                         continue;
3416
3417                 /* In caching mode, global flushes turn emulation expensive */
3418                 if (!cap_caching_mode(iommu->cap))
3419                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3420                                          DMA_TLB_GLOBAL_FLUSH);
3421                 for (j = 0; j < deferred_flush[i].next; j++) {
3422                         unsigned long mask;
3423                         struct iova *iova = deferred_flush[i].iova[j];
3424                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3425
3426                         /* On real hardware multiple invalidations are expensive */
3427                         if (cap_caching_mode(iommu->cap))
3428                                 iommu_flush_iotlb_psi(iommu, domain,
3429                                         iova->pfn_lo, iova_size(iova),
3430                                         !deferred_flush[i].freelist[j], 0);
3431                         else {
3432                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3433                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3434                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3435                         }
3436                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3437                         if (deferred_flush[i].freelist[j])
3438                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3439                 }
3440                 deferred_flush[i].next = 0;
3441         }
3442
3443         list_size = 0;
3444 }
3445
3446 static void flush_unmaps_timeout(unsigned long data)
3447 {
3448         unsigned long flags;
3449
3450         spin_lock_irqsave(&async_umap_flush_lock, flags);
3451         flush_unmaps();
3452         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3453 }
3454
3455 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3456 {
3457         unsigned long flags;
3458         int next, iommu_id;
3459         struct intel_iommu *iommu;
3460
3461         spin_lock_irqsave(&async_umap_flush_lock, flags);
3462         if (list_size == HIGH_WATER_MARK)
3463                 flush_unmaps();
3464
3465         iommu = domain_get_iommu(dom);
3466         iommu_id = iommu->seq_id;
3467
3468         next = deferred_flush[iommu_id].next;
3469         deferred_flush[iommu_id].domain[next] = dom;
3470         deferred_flush[iommu_id].iova[next] = iova;
3471         deferred_flush[iommu_id].freelist[next] = freelist;
3472         deferred_flush[iommu_id].next++;
3473
3474         if (!timer_on) {
3475                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3476                 timer_on = 1;
3477         }
3478         list_size++;
3479         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3480 }
3481
3482 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3483 {
3484         struct dmar_domain *domain;
3485         unsigned long start_pfn, last_pfn;
3486         struct iova *iova;
3487         struct intel_iommu *iommu;
3488         struct page *freelist;
3489
3490         if (iommu_no_mapping(dev))
3491                 return;
3492
3493         domain = find_domain(dev);
3494         BUG_ON(!domain);
3495
3496         iommu = domain_get_iommu(domain);
3497
3498         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3499         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3500                       (unsigned long long)dev_addr))
3501                 return;
3502
3503         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3504         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3505
3506         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3507                  dev_name(dev), start_pfn, last_pfn);
3508
3509         freelist = domain_unmap(domain, start_pfn, last_pfn);
3510
3511         if (intel_iommu_strict) {
3512                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3513                                       last_pfn - start_pfn + 1, !freelist, 0);
3514                 /* free iova */
3515                 __free_iova(&domain->iovad, iova);
3516                 dma_free_pagelist(freelist);
3517         } else {
3518                 add_unmap(domain, iova, freelist);
3519                 /*
3520                  * queue up the release of the unmap to save the 1/6th of the
3521                  * cpu used up by the iotlb flush operation...
3522                  */
3523         }
3524 }
3525
3526 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3527                              size_t size, enum dma_data_direction dir,
3528                              struct dma_attrs *attrs)
3529 {
3530         intel_unmap(dev, dev_addr);
3531 }
3532
3533 static void *intel_alloc_coherent(struct device *dev, size_t size,
3534                                   dma_addr_t *dma_handle, gfp_t flags,
3535                                   struct dma_attrs *attrs)
3536 {
3537         struct page *page = NULL;
3538         int order;
3539
3540         size = PAGE_ALIGN(size);
3541         order = get_order(size);
3542
3543         if (!iommu_no_mapping(dev))
3544                 flags &= ~(GFP_DMA | GFP_DMA32);
3545         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3546                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3547                         flags |= GFP_DMA;
3548                 else
3549                         flags |= GFP_DMA32;
3550         }
3551
3552         if (flags & __GFP_WAIT) {
3553                 unsigned int count = size >> PAGE_SHIFT;
3554
3555                 page = dma_alloc_from_contiguous(dev, count, order);
3556                 if (page && iommu_no_mapping(dev) &&
3557                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3558                         dma_release_from_contiguous(dev, page, count);
3559                         page = NULL;
3560                 }
3561         }
3562
3563         if (!page)
3564                 page = alloc_pages(flags, order);
3565         if (!page)
3566                 return NULL;
3567         memset(page_address(page), 0, size);
3568
3569         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3570                                          DMA_BIDIRECTIONAL,
3571                                          dev->coherent_dma_mask);
3572         if (*dma_handle)
3573                 return page_address(page);
3574         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3575                 __free_pages(page, order);
3576
3577         return NULL;
3578 }
3579
3580 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3581                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3582 {
3583         int order;
3584         struct page *page = virt_to_page(vaddr);
3585
3586         size = PAGE_ALIGN(size);
3587         order = get_order(size);
3588
3589         intel_unmap(dev, dma_handle);
3590         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3591                 __free_pages(page, order);
3592 }
3593
3594 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3595                            int nelems, enum dma_data_direction dir,
3596                            struct dma_attrs *attrs)
3597 {
3598         intel_unmap(dev, sglist[0].dma_address);
3599 }
3600
3601 static int intel_nontranslate_map_sg(struct device *hddev,
3602         struct scatterlist *sglist, int nelems, int dir)
3603 {
3604         int i;
3605         struct scatterlist *sg;
3606
3607         for_each_sg(sglist, sg, nelems, i) {
3608                 BUG_ON(!sg_page(sg));
3609                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3610                 sg->dma_length = sg->length;
3611         }
3612         return nelems;
3613 }
3614
3615 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3616                         enum dma_data_direction dir, struct dma_attrs *attrs)
3617 {
3618         int i;
3619         struct dmar_domain *domain;
3620         size_t size = 0;
3621         int prot = 0;
3622         struct iova *iova = NULL;
3623         int ret;
3624         struct scatterlist *sg;
3625         unsigned long start_vpfn;
3626         struct intel_iommu *iommu;
3627
3628         BUG_ON(dir == DMA_NONE);
3629         if (iommu_no_mapping(dev))
3630                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3631
3632         domain = get_valid_domain_for_dev(dev);
3633         if (!domain)
3634                 return 0;
3635
3636         iommu = domain_get_iommu(domain);
3637
3638         for_each_sg(sglist, sg, nelems, i)
3639                 size += aligned_nrpages(sg->offset, sg->length);
3640
3641         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3642                                 *dev->dma_mask);
3643         if (!iova) {
3644                 sglist->dma_length = 0;
3645                 return 0;
3646         }
3647
3648         /*
3649          * Check if DMAR supports zero-length reads on write only
3650          * mappings..
3651          */
3652         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3653                         !cap_zlr(iommu->cap))
3654                 prot |= DMA_PTE_READ;
3655         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3656                 prot |= DMA_PTE_WRITE;
3657
3658         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3659
3660         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3661         if (unlikely(ret)) {
3662                 dma_pte_free_pagetable(domain, start_vpfn,
3663                                        start_vpfn + size - 1);
3664                 __free_iova(&domain->iovad, iova);
3665                 return 0;
3666         }
3667
3668         /* it's a non-present to present mapping. Only flush if caching mode */
3669         if (cap_caching_mode(iommu->cap))
3670                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3671         else
3672                 iommu_flush_write_buffer(iommu);
3673
3674         return nelems;
3675 }
3676
3677 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3678 {
3679         return !dma_addr;
3680 }
3681
3682 struct dma_map_ops intel_dma_ops = {
3683         .alloc = intel_alloc_coherent,
3684         .free = intel_free_coherent,
3685         .map_sg = intel_map_sg,
3686         .unmap_sg = intel_unmap_sg,
3687         .map_page = intel_map_page,
3688         .unmap_page = intel_unmap_page,
3689         .mapping_error = intel_mapping_error,
3690 };
3691
3692 static inline int iommu_domain_cache_init(void)
3693 {
3694         int ret = 0;
3695
3696         iommu_domain_cache = kmem_cache_create("iommu_domain",
3697                                          sizeof(struct dmar_domain),
3698                                          0,
3699                                          SLAB_HWCACHE_ALIGN,
3700
3701                                          NULL);
3702         if (!iommu_domain_cache) {
3703                 pr_err("Couldn't create iommu_domain cache\n");
3704                 ret = -ENOMEM;
3705         }
3706
3707         return ret;
3708 }
3709
3710 static inline int iommu_devinfo_cache_init(void)
3711 {
3712         int ret = 0;
3713
3714         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3715                                          sizeof(struct device_domain_info),
3716                                          0,
3717                                          SLAB_HWCACHE_ALIGN,
3718                                          NULL);
3719         if (!iommu_devinfo_cache) {
3720                 pr_err("Couldn't create devinfo cache\n");
3721                 ret = -ENOMEM;
3722         }
3723
3724         return ret;
3725 }
3726
3727 static int __init iommu_init_mempool(void)
3728 {
3729         int ret;
3730         ret = iommu_iova_cache_init();
3731         if (ret)
3732                 return ret;
3733
3734         ret = iommu_domain_cache_init();
3735         if (ret)
3736                 goto domain_error;
3737
3738         ret = iommu_devinfo_cache_init();
3739         if (!ret)
3740                 return ret;
3741
3742         kmem_cache_destroy(iommu_domain_cache);
3743 domain_error:
3744         iommu_iova_cache_destroy();
3745
3746         return -ENOMEM;
3747 }
3748
3749 static void __init iommu_exit_mempool(void)
3750 {
3751         kmem_cache_destroy(iommu_devinfo_cache);
3752         kmem_cache_destroy(iommu_domain_cache);
3753         iommu_iova_cache_destroy();
3754 }
3755
3756 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3757 {
3758         struct dmar_drhd_unit *drhd;
3759         u32 vtbar;
3760         int rc;
3761
3762         /* We know that this device on this chipset has its own IOMMU.
3763          * If we find it under a different IOMMU, then the BIOS is lying
3764          * to us. Hope that the IOMMU for this device is actually
3765          * disabled, and it needs no translation...
3766          */
3767         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3768         if (rc) {
3769                 /* "can't" happen */
3770                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3771                 return;
3772         }
3773         vtbar &= 0xffff0000;
3774
3775         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3776         drhd = dmar_find_matched_drhd_unit(pdev);
3777         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3778                             TAINT_FIRMWARE_WORKAROUND,
3779                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3780                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3781 }
3782 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3783
3784 static void __init init_no_remapping_devices(void)
3785 {
3786         struct dmar_drhd_unit *drhd;
3787         struct device *dev;
3788         int i;
3789
3790         for_each_drhd_unit(drhd) {
3791                 if (!drhd->include_all) {
3792                         for_each_active_dev_scope(drhd->devices,
3793                                                   drhd->devices_cnt, i, dev)
3794                                 break;
3795                         /* ignore DMAR unit if no devices exist */
3796                         if (i == drhd->devices_cnt)
3797                                 drhd->ignored = 1;
3798                 }
3799         }
3800
3801         for_each_active_drhd_unit(drhd) {
3802                 if (drhd->include_all)
3803                         continue;
3804
3805                 for_each_active_dev_scope(drhd->devices,
3806                                           drhd->devices_cnt, i, dev)
3807                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3808                                 break;
3809                 if (i < drhd->devices_cnt)
3810                         continue;
3811
3812                 /* This IOMMU has *only* gfx devices. Either bypass it or
3813                    set the gfx_mapped flag, as appropriate */
3814                 if (dmar_map_gfx) {
3815                         intel_iommu_gfx_mapped = 1;
3816                 } else {
3817                         drhd->ignored = 1;
3818                         for_each_active_dev_scope(drhd->devices,
3819                                                   drhd->devices_cnt, i, dev)
3820                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3821                 }
3822         }
3823 }
3824
3825 #ifdef CONFIG_SUSPEND
3826 static int init_iommu_hw(void)
3827 {
3828         struct dmar_drhd_unit *drhd;
3829         struct intel_iommu *iommu = NULL;
3830
3831         for_each_active_iommu(iommu, drhd)
3832                 if (iommu->qi)
3833                         dmar_reenable_qi(iommu);
3834
3835         for_each_iommu(iommu, drhd) {
3836                 if (drhd->ignored) {
3837                         /*
3838                          * we always have to disable PMRs or DMA may fail on
3839                          * this device
3840                          */
3841                         if (force_on)
3842                                 iommu_disable_protect_mem_regions(iommu);
3843                         continue;
3844                 }
3845         
3846                 iommu_flush_write_buffer(iommu);
3847
3848                 iommu_set_root_entry(iommu);
3849
3850                 iommu->flush.flush_context(iommu, 0, 0, 0,
3851                                            DMA_CCMD_GLOBAL_INVL);
3852                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3853                 iommu_enable_translation(iommu);
3854                 iommu_disable_protect_mem_regions(iommu);
3855         }
3856
3857         return 0;
3858 }
3859
3860 static void iommu_flush_all(void)
3861 {
3862         struct dmar_drhd_unit *drhd;
3863         struct intel_iommu *iommu;
3864
3865         for_each_active_iommu(iommu, drhd) {
3866                 iommu->flush.flush_context(iommu, 0, 0, 0,
3867                                            DMA_CCMD_GLOBAL_INVL);
3868                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3869                                          DMA_TLB_GLOBAL_FLUSH);
3870         }
3871 }
3872
3873 static int iommu_suspend(void)
3874 {
3875         struct dmar_drhd_unit *drhd;
3876         struct intel_iommu *iommu = NULL;
3877         unsigned long flag;
3878
3879         for_each_active_iommu(iommu, drhd) {
3880                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3881                                                  GFP_ATOMIC);
3882                 if (!iommu->iommu_state)
3883                         goto nomem;
3884         }
3885
3886         iommu_flush_all();
3887
3888         for_each_active_iommu(iommu, drhd) {
3889                 iommu_disable_translation(iommu);
3890
3891                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3892
3893                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3894                         readl(iommu->reg + DMAR_FECTL_REG);
3895                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3896                         readl(iommu->reg + DMAR_FEDATA_REG);
3897                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3898                         readl(iommu->reg + DMAR_FEADDR_REG);
3899                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3900                         readl(iommu->reg + DMAR_FEUADDR_REG);
3901
3902                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3903         }
3904         return 0;
3905
3906 nomem:
3907         for_each_active_iommu(iommu, drhd)
3908                 kfree(iommu->iommu_state);
3909
3910         return -ENOMEM;
3911 }
3912
3913 static void iommu_resume(void)
3914 {
3915         struct dmar_drhd_unit *drhd;
3916         struct intel_iommu *iommu = NULL;
3917         unsigned long flag;
3918
3919         if (init_iommu_hw()) {
3920                 if (force_on)
3921                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3922                 else
3923                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3924                 return;
3925         }
3926
3927         for_each_active_iommu(iommu, drhd) {
3928
3929                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3930
3931                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3932                         iommu->reg + DMAR_FECTL_REG);
3933                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3934                         iommu->reg + DMAR_FEDATA_REG);
3935                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3936                         iommu->reg + DMAR_FEADDR_REG);
3937                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3938                         iommu->reg + DMAR_FEUADDR_REG);
3939
3940                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3941         }
3942
3943         for_each_active_iommu(iommu, drhd)
3944                 kfree(iommu->iommu_state);
3945 }
3946
3947 static struct syscore_ops iommu_syscore_ops = {
3948         .resume         = iommu_resume,
3949         .suspend        = iommu_suspend,
3950 };
3951
3952 static void __init init_iommu_pm_ops(void)
3953 {
3954         register_syscore_ops(&iommu_syscore_ops);
3955 }
3956
3957 #else
3958 static inline void init_iommu_pm_ops(void) {}
3959 #endif  /* CONFIG_PM */
3960
3961
3962 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3963 {
3964         struct acpi_dmar_reserved_memory *rmrr;
3965         struct dmar_rmrr_unit *rmrru;
3966
3967         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3968         if (!rmrru)
3969                 return -ENOMEM;
3970
3971         rmrru->hdr = header;
3972         rmrr = (struct acpi_dmar_reserved_memory *)header;
3973         rmrru->base_address = rmrr->base_address;
3974         rmrru->end_address = rmrr->end_address;
3975         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3976                                 ((void *)rmrr) + rmrr->header.length,
3977                                 &rmrru->devices_cnt);
3978         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3979                 kfree(rmrru);
3980                 return -ENOMEM;
3981         }
3982
3983         list_add(&rmrru->list, &dmar_rmrr_units);
3984
3985         return 0;
3986 }
3987
3988 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3989 {
3990         struct dmar_atsr_unit *atsru;
3991         struct acpi_dmar_atsr *tmp;
3992
3993         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3994                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3995                 if (atsr->segment != tmp->segment)
3996                         continue;
3997                 if (atsr->header.length != tmp->header.length)
3998                         continue;
3999                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4000                         return atsru;
4001         }
4002
4003         return NULL;
4004 }
4005
4006 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4007 {
4008         struct acpi_dmar_atsr *atsr;
4009         struct dmar_atsr_unit *atsru;
4010
4011         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4012                 return 0;
4013
4014         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4015         atsru = dmar_find_atsr(atsr);
4016         if (atsru)
4017                 return 0;
4018
4019         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4020         if (!atsru)
4021                 return -ENOMEM;
4022
4023         /*
4024          * If memory is allocated from slab by ACPI _DSM method, we need to
4025          * copy the memory content because the memory buffer will be freed
4026          * on return.
4027          */
4028         atsru->hdr = (void *)(atsru + 1);
4029         memcpy(atsru->hdr, hdr, hdr->length);
4030         atsru->include_all = atsr->flags & 0x1;
4031         if (!atsru->include_all) {
4032                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4033                                 (void *)atsr + atsr->header.length,
4034                                 &atsru->devices_cnt);
4035                 if (atsru->devices_cnt && atsru->devices == NULL) {
4036                         kfree(atsru);
4037                         return -ENOMEM;
4038                 }
4039         }
4040
4041         list_add_rcu(&atsru->list, &dmar_atsr_units);
4042
4043         return 0;
4044 }
4045
4046 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4047 {
4048         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4049         kfree(atsru);
4050 }
4051
4052 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4053 {
4054         struct acpi_dmar_atsr *atsr;
4055         struct dmar_atsr_unit *atsru;
4056
4057         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4058         atsru = dmar_find_atsr(atsr);
4059         if (atsru) {
4060                 list_del_rcu(&atsru->list);
4061                 synchronize_rcu();
4062                 intel_iommu_free_atsr(atsru);
4063         }
4064
4065         return 0;
4066 }
4067
4068 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4069 {
4070         int i;
4071         struct device *dev;
4072         struct acpi_dmar_atsr *atsr;
4073         struct dmar_atsr_unit *atsru;
4074
4075         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4076         atsru = dmar_find_atsr(atsr);
4077         if (!atsru)
4078                 return 0;
4079
4080         if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4081                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4082                                           i, dev)
4083                         return -EBUSY;
4084
4085         return 0;
4086 }
4087
4088 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4089 {
4090         int sp, ret = 0;
4091         struct intel_iommu *iommu = dmaru->iommu;
4092
4093         if (g_iommus[iommu->seq_id])
4094                 return 0;
4095
4096         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4097                 pr_warn("%s: Doesn't support hardware pass through.\n",
4098                         iommu->name);
4099                 return -ENXIO;
4100         }
4101         if (!ecap_sc_support(iommu->ecap) &&
4102             domain_update_iommu_snooping(iommu)) {
4103                 pr_warn("%s: Doesn't support snooping.\n",
4104                         iommu->name);
4105                 return -ENXIO;
4106         }
4107         sp = domain_update_iommu_superpage(iommu) - 1;
4108         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4109                 pr_warn("%s: Doesn't support large page.\n",
4110                         iommu->name);
4111                 return -ENXIO;
4112         }
4113
4114         /*
4115          * Disable translation if already enabled prior to OS handover.
4116          */
4117         if (iommu->gcmd & DMA_GCMD_TE)
4118                 iommu_disable_translation(iommu);
4119
4120         g_iommus[iommu->seq_id] = iommu;
4121         ret = iommu_init_domains(iommu);
4122         if (ret == 0)
4123                 ret = iommu_alloc_root_entry(iommu);
4124         if (ret)
4125                 goto out;
4126
4127         if (dmaru->ignored) {
4128                 /*
4129                  * we always have to disable PMRs or DMA may fail on this device
4130                  */
4131                 if (force_on)
4132                         iommu_disable_protect_mem_regions(iommu);
4133                 return 0;
4134         }
4135
4136         intel_iommu_init_qi(iommu);
4137         iommu_flush_write_buffer(iommu);
4138         ret = dmar_set_interrupt(iommu);
4139         if (ret)
4140                 goto disable_iommu;
4141
4142         iommu_set_root_entry(iommu);
4143         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4144         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4145         iommu_enable_translation(iommu);
4146
4147         iommu_disable_protect_mem_regions(iommu);
4148         return 0;
4149
4150 disable_iommu:
4151         disable_dmar_iommu(iommu);
4152 out:
4153         free_dmar_iommu(iommu);
4154         return ret;
4155 }
4156
4157 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4158 {
4159         int ret = 0;
4160         struct intel_iommu *iommu = dmaru->iommu;
4161
4162         if (!intel_iommu_enabled)
4163                 return 0;
4164         if (iommu == NULL)
4165                 return -EINVAL;
4166
4167         if (insert) {
4168                 ret = intel_iommu_add(dmaru);
4169         } else {
4170                 disable_dmar_iommu(iommu);
4171                 free_dmar_iommu(iommu);
4172         }
4173
4174         return ret;
4175 }
4176
4177 static void intel_iommu_free_dmars(void)
4178 {
4179         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4180         struct dmar_atsr_unit *atsru, *atsr_n;
4181
4182         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4183                 list_del(&rmrru->list);
4184                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4185                 kfree(rmrru);
4186         }
4187
4188         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4189                 list_del(&atsru->list);
4190                 intel_iommu_free_atsr(atsru);
4191         }
4192 }
4193
4194 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4195 {
4196         int i, ret = 1;
4197         struct pci_bus *bus;
4198         struct pci_dev *bridge = NULL;
4199         struct device *tmp;
4200         struct acpi_dmar_atsr *atsr;
4201         struct dmar_atsr_unit *atsru;
4202
4203         dev = pci_physfn(dev);
4204         for (bus = dev->bus; bus; bus = bus->parent) {
4205                 bridge = bus->self;
4206                 if (!bridge || !pci_is_pcie(bridge) ||
4207                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4208                         return 0;
4209                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4210                         break;
4211         }
4212         if (!bridge)
4213                 return 0;
4214
4215         rcu_read_lock();
4216         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4217                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4218                 if (atsr->segment != pci_domain_nr(dev->bus))
4219                         continue;
4220
4221                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4222                         if (tmp == &bridge->dev)
4223                                 goto out;
4224
4225                 if (atsru->include_all)
4226                         goto out;
4227         }
4228         ret = 0;
4229 out:
4230         rcu_read_unlock();
4231
4232         return ret;
4233 }
4234
4235 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4236 {
4237         int ret = 0;
4238         struct dmar_rmrr_unit *rmrru;
4239         struct dmar_atsr_unit *atsru;
4240         struct acpi_dmar_atsr *atsr;
4241         struct acpi_dmar_reserved_memory *rmrr;
4242
4243         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4244                 return 0;
4245
4246         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4247                 rmrr = container_of(rmrru->hdr,
4248                                     struct acpi_dmar_reserved_memory, header);
4249                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4250                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4251                                 ((void *)rmrr) + rmrr->header.length,
4252                                 rmrr->segment, rmrru->devices,
4253                                 rmrru->devices_cnt);
4254                         if(ret < 0)
4255                                 return ret;
4256                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4257                         dmar_remove_dev_scope(info, rmrr->segment,
4258                                 rmrru->devices, rmrru->devices_cnt);
4259                 }
4260         }
4261
4262         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4263                 if (atsru->include_all)
4264                         continue;
4265
4266                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4267                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4268                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4269                                         (void *)atsr + atsr->header.length,
4270                                         atsr->segment, atsru->devices,
4271                                         atsru->devices_cnt);
4272                         if (ret > 0)
4273                                 break;
4274                         else if(ret < 0)
4275                                 return ret;
4276                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4277                         if (dmar_remove_dev_scope(info, atsr->segment,
4278                                         atsru->devices, atsru->devices_cnt))
4279                                 break;
4280                 }
4281         }
4282
4283         return 0;
4284 }
4285
4286 /*
4287  * Here we only respond to action of unbound device from driver.
4288  *
4289  * Added device is not attached to its DMAR domain here yet. That will happen
4290  * when mapping the device to iova.
4291  */
4292 static int device_notifier(struct notifier_block *nb,
4293                                   unsigned long action, void *data)
4294 {
4295         struct device *dev = data;
4296         struct dmar_domain *domain;
4297
4298         if (iommu_dummy(dev))
4299                 return 0;
4300
4301         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4302                 return 0;
4303
4304         domain = find_domain(dev);
4305         if (!domain)
4306                 return 0;
4307
4308         down_read(&dmar_global_lock);
4309         dmar_remove_one_dev_info(domain, dev);
4310         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4311                 domain_exit(domain);
4312         up_read(&dmar_global_lock);
4313
4314         return 0;
4315 }
4316
4317 static struct notifier_block device_nb = {
4318         .notifier_call = device_notifier,
4319 };
4320
4321 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4322                                        unsigned long val, void *v)
4323 {
4324         struct memory_notify *mhp = v;
4325         unsigned long long start, end;
4326         unsigned long start_vpfn, last_vpfn;
4327
4328         switch (val) {
4329         case MEM_GOING_ONLINE:
4330                 start = mhp->start_pfn << PAGE_SHIFT;
4331                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4332                 if (iommu_domain_identity_map(si_domain, start, end)) {
4333                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4334                                 start, end);
4335                         return NOTIFY_BAD;
4336                 }
4337                 break;
4338
4339         case MEM_OFFLINE:
4340         case MEM_CANCEL_ONLINE:
4341                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4342                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4343                 while (start_vpfn <= last_vpfn) {
4344                         struct iova *iova;
4345                         struct dmar_drhd_unit *drhd;
4346                         struct intel_iommu *iommu;
4347                         struct page *freelist;
4348
4349                         iova = find_iova(&si_domain->iovad, start_vpfn);
4350                         if (iova == NULL) {
4351                                 pr_debug("Failed get IOVA for PFN %lx\n",
4352                                          start_vpfn);
4353                                 break;
4354                         }
4355
4356                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4357                                                      start_vpfn, last_vpfn);
4358                         if (iova == NULL) {
4359                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4360                                         start_vpfn, last_vpfn);
4361                                 return NOTIFY_BAD;
4362                         }
4363
4364                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4365                                                iova->pfn_hi);
4366
4367                         rcu_read_lock();
4368                         for_each_active_iommu(iommu, drhd)
4369                                 iommu_flush_iotlb_psi(iommu, si_domain,
4370                                         iova->pfn_lo, iova_size(iova),
4371                                         !freelist, 0);
4372                         rcu_read_unlock();
4373                         dma_free_pagelist(freelist);
4374
4375                         start_vpfn = iova->pfn_hi + 1;
4376                         free_iova_mem(iova);
4377                 }
4378                 break;
4379         }
4380
4381         return NOTIFY_OK;
4382 }
4383
4384 static struct notifier_block intel_iommu_memory_nb = {
4385         .notifier_call = intel_iommu_memory_notifier,
4386         .priority = 0
4387 };
4388
4389
4390 static ssize_t intel_iommu_show_version(struct device *dev,
4391                                         struct device_attribute *attr,
4392                                         char *buf)
4393 {
4394         struct intel_iommu *iommu = dev_get_drvdata(dev);
4395         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4396         return sprintf(buf, "%d:%d\n",
4397                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4398 }
4399 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4400
4401 static ssize_t intel_iommu_show_address(struct device *dev,
4402                                         struct device_attribute *attr,
4403                                         char *buf)
4404 {
4405         struct intel_iommu *iommu = dev_get_drvdata(dev);
4406         return sprintf(buf, "%llx\n", iommu->reg_phys);
4407 }
4408 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4409
4410 static ssize_t intel_iommu_show_cap(struct device *dev,
4411                                     struct device_attribute *attr,
4412                                     char *buf)
4413 {
4414         struct intel_iommu *iommu = dev_get_drvdata(dev);
4415         return sprintf(buf, "%llx\n", iommu->cap);
4416 }
4417 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4418
4419 static ssize_t intel_iommu_show_ecap(struct device *dev,
4420                                     struct device_attribute *attr,
4421                                     char *buf)
4422 {
4423         struct intel_iommu *iommu = dev_get_drvdata(dev);
4424         return sprintf(buf, "%llx\n", iommu->ecap);
4425 }
4426 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4427
4428 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4429                                       struct device_attribute *attr,
4430                                       char *buf)
4431 {
4432         struct intel_iommu *iommu = dev_get_drvdata(dev);
4433         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4434 }
4435 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4436
4437 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4438                                            struct device_attribute *attr,
4439                                            char *buf)
4440 {
4441         struct intel_iommu *iommu = dev_get_drvdata(dev);
4442         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4443                                                   cap_ndoms(iommu->cap)));
4444 }
4445 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4446
4447 static struct attribute *intel_iommu_attrs[] = {
4448         &dev_attr_version.attr,
4449         &dev_attr_address.attr,
4450         &dev_attr_cap.attr,
4451         &dev_attr_ecap.attr,
4452         &dev_attr_domains_supported.attr,
4453         &dev_attr_domains_used.attr,
4454         NULL,
4455 };
4456
4457 static struct attribute_group intel_iommu_group = {
4458         .name = "intel-iommu",
4459         .attrs = intel_iommu_attrs,
4460 };
4461
4462 const struct attribute_group *intel_iommu_groups[] = {
4463         &intel_iommu_group,
4464         NULL,
4465 };
4466
4467 int __init intel_iommu_init(void)
4468 {
4469         int ret = -ENODEV;
4470         struct dmar_drhd_unit *drhd;
4471         struct intel_iommu *iommu;
4472
4473         /* VT-d is required for a TXT/tboot launch, so enforce that */
4474         force_on = tboot_force_iommu();
4475
4476         if (iommu_init_mempool()) {
4477                 if (force_on)
4478                         panic("tboot: Failed to initialize iommu memory\n");
4479                 return -ENOMEM;
4480         }
4481
4482         down_write(&dmar_global_lock);
4483         if (dmar_table_init()) {
4484                 if (force_on)
4485                         panic("tboot: Failed to initialize DMAR table\n");
4486                 goto out_free_dmar;
4487         }
4488
4489         if (dmar_dev_scope_init() < 0) {
4490                 if (force_on)
4491                         panic("tboot: Failed to initialize DMAR device scope\n");
4492                 goto out_free_dmar;
4493         }
4494
4495         if (no_iommu || dmar_disabled)
4496                 goto out_free_dmar;
4497
4498         if (list_empty(&dmar_rmrr_units))
4499                 pr_info("No RMRR found\n");
4500
4501         if (list_empty(&dmar_atsr_units))
4502                 pr_info("No ATSR found\n");
4503
4504         if (dmar_init_reserved_ranges()) {
4505                 if (force_on)
4506                         panic("tboot: Failed to reserve iommu ranges\n");
4507                 goto out_free_reserved_range;
4508         }
4509
4510         init_no_remapping_devices();
4511
4512         ret = init_dmars();
4513         if (ret) {
4514                 if (force_on)
4515                         panic("tboot: Failed to initialize DMARs\n");
4516                 pr_err("Initialization failed\n");
4517                 goto out_free_reserved_range;
4518         }
4519         up_write(&dmar_global_lock);
4520         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4521
4522         init_timer(&unmap_timer);
4523 #ifdef CONFIG_SWIOTLB
4524         swiotlb = 0;
4525 #endif
4526         dma_ops = &intel_dma_ops;
4527
4528         init_iommu_pm_ops();
4529
4530         for_each_active_iommu(iommu, drhd)
4531                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4532                                                        intel_iommu_groups,
4533                                                        "%s", iommu->name);
4534
4535         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4536         bus_register_notifier(&pci_bus_type, &device_nb);
4537         if (si_domain && !hw_pass_through)
4538                 register_memory_notifier(&intel_iommu_memory_nb);
4539
4540         intel_iommu_enabled = 1;
4541
4542         return 0;
4543
4544 out_free_reserved_range:
4545         put_iova_domain(&reserved_iova_list);
4546 out_free_dmar:
4547         intel_iommu_free_dmars();
4548         up_write(&dmar_global_lock);
4549         iommu_exit_mempool();
4550         return ret;
4551 }
4552
4553 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4554 {
4555         struct intel_iommu *iommu = opaque;
4556
4557         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4558         return 0;
4559 }
4560
4561 /*
4562  * NB - intel-iommu lacks any sort of reference counting for the users of
4563  * dependent devices.  If multiple endpoints have intersecting dependent
4564  * devices, unbinding the driver from any one of them will possibly leave
4565  * the others unable to operate.
4566  */
4567 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4568 {
4569         if (!iommu || !dev || !dev_is_pci(dev))
4570                 return;
4571
4572         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4573 }
4574
4575 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4576                                      struct device *dev)
4577 {
4578         struct device_domain_info *info;
4579         struct intel_iommu *iommu;
4580         unsigned long flags;
4581         u8 bus, devfn;
4582
4583         iommu = device_to_iommu(dev, &bus, &devfn);
4584         if (!iommu)
4585                 return;
4586
4587         info = dev->archdata.iommu;
4588
4589         if (WARN_ON(!info))
4590                 return;
4591
4592         spin_lock_irqsave(&device_domain_lock, flags);
4593         unlink_domain_info(info);
4594         spin_unlock_irqrestore(&device_domain_lock, flags);
4595
4596         iommu_disable_dev_iotlb(info);
4597         domain_context_clear(iommu, dev);
4598         free_devinfo_mem(info);
4599         domain_detach_iommu(domain, iommu);
4600
4601         spin_lock_irqsave(&domain->iommu_lock, flags);
4602         if (!domain->iommu_refcnt[iommu->seq_id])
4603                 iommu_detach_domain(domain, iommu);
4604         spin_unlock_irqrestore(&domain->iommu_lock, flags);
4605 }
4606
4607 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4608 {
4609         int adjust_width;
4610
4611         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4612                         DMA_32BIT_PFN);
4613         domain_reserve_special_ranges(domain);
4614
4615         /* calculate AGAW */
4616         domain->gaw = guest_width;
4617         adjust_width = guestwidth_to_adjustwidth(guest_width);
4618         domain->agaw = width_to_agaw(adjust_width);
4619
4620         domain->iommu_coherency = 0;
4621         domain->iommu_snooping = 0;
4622         domain->iommu_superpage = 0;
4623         domain->max_addr = 0;
4624
4625         /* always allocate the top pgd */
4626         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4627         if (!domain->pgd)
4628                 return -ENOMEM;
4629         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4630         return 0;
4631 }
4632
4633 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4634 {
4635         struct dmar_domain *dmar_domain;
4636         struct iommu_domain *domain;
4637
4638         if (type != IOMMU_DOMAIN_UNMANAGED)
4639                 return NULL;
4640
4641         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4642         if (!dmar_domain) {
4643                 pr_err("Can't allocate dmar_domain\n");
4644                 return NULL;
4645         }
4646         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4647                 pr_err("Domain initialization failed\n");
4648                 domain_exit(dmar_domain);
4649                 return NULL;
4650         }
4651         domain_update_iommu_cap(dmar_domain);
4652
4653         domain = &dmar_domain->domain;
4654         domain->geometry.aperture_start = 0;
4655         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4656         domain->geometry.force_aperture = true;
4657
4658         return domain;
4659 }
4660
4661 static void intel_iommu_domain_free(struct iommu_domain *domain)
4662 {
4663         domain_exit(to_dmar_domain(domain));
4664 }
4665
4666 static int intel_iommu_attach_device(struct iommu_domain *domain,
4667                                      struct device *dev)
4668 {
4669         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4670         struct intel_iommu *iommu;
4671         int addr_width;
4672         u8 bus, devfn;
4673
4674         if (device_is_rmrr_locked(dev)) {
4675                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4676                 return -EPERM;
4677         }
4678
4679         /* normally dev is not mapped */
4680         if (unlikely(domain_context_mapped(dev))) {
4681                 struct dmar_domain *old_domain;
4682
4683                 old_domain = find_domain(dev);
4684                 if (old_domain) {
4685                         if (domain_type_is_vm_or_si(dmar_domain))
4686                                 dmar_remove_one_dev_info(old_domain, dev);
4687                         else
4688                                 domain_remove_dev_info(old_domain);
4689
4690                         if (!domain_type_is_vm_or_si(old_domain) &&
4691                              list_empty(&old_domain->devices))
4692                                 domain_exit(old_domain);
4693                 }
4694         }
4695
4696         iommu = device_to_iommu(dev, &bus, &devfn);
4697         if (!iommu)
4698                 return -ENODEV;
4699
4700         /* check if this iommu agaw is sufficient for max mapped address */
4701         addr_width = agaw_to_width(iommu->agaw);
4702         if (addr_width > cap_mgaw(iommu->cap))
4703                 addr_width = cap_mgaw(iommu->cap);
4704
4705         if (dmar_domain->max_addr > (1LL << addr_width)) {
4706                 pr_err("%s: iommu width (%d) is not "
4707                        "sufficient for the mapped address (%llx)\n",
4708                        __func__, addr_width, dmar_domain->max_addr);
4709                 return -EFAULT;
4710         }
4711         dmar_domain->gaw = addr_width;
4712
4713         /*
4714          * Knock out extra levels of page tables if necessary
4715          */
4716         while (iommu->agaw < dmar_domain->agaw) {
4717                 struct dma_pte *pte;
4718
4719                 pte = dmar_domain->pgd;
4720                 if (dma_pte_present(pte)) {
4721                         dmar_domain->pgd = (struct dma_pte *)
4722                                 phys_to_virt(dma_pte_addr(pte));
4723                         free_pgtable_page(pte);
4724                 }
4725                 dmar_domain->agaw--;
4726         }
4727
4728         return domain_add_dev_info(dmar_domain, dev);
4729 }
4730
4731 static void intel_iommu_detach_device(struct iommu_domain *domain,
4732                                       struct device *dev)
4733 {
4734         dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
4735 }
4736
4737 static int intel_iommu_map(struct iommu_domain *domain,
4738                            unsigned long iova, phys_addr_t hpa,
4739                            size_t size, int iommu_prot)
4740 {
4741         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4742         u64 max_addr;
4743         int prot = 0;
4744         int ret;
4745
4746         if (iommu_prot & IOMMU_READ)
4747                 prot |= DMA_PTE_READ;
4748         if (iommu_prot & IOMMU_WRITE)
4749                 prot |= DMA_PTE_WRITE;
4750         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4751                 prot |= DMA_PTE_SNP;
4752
4753         max_addr = iova + size;
4754         if (dmar_domain->max_addr < max_addr) {
4755                 u64 end;
4756
4757                 /* check if minimum agaw is sufficient for mapped address */
4758                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4759                 if (end < max_addr) {
4760                         pr_err("%s: iommu width (%d) is not "
4761                                "sufficient for the mapped address (%llx)\n",
4762                                __func__, dmar_domain->gaw, max_addr);
4763                         return -EFAULT;
4764                 }
4765                 dmar_domain->max_addr = max_addr;
4766         }
4767         /* Round up size to next multiple of PAGE_SIZE, if it and
4768            the low bits of hpa would take us onto the next page */
4769         size = aligned_nrpages(hpa, size);
4770         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4771                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4772         return ret;
4773 }
4774
4775 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4776                                 unsigned long iova, size_t size)
4777 {
4778         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4779         struct page *freelist = NULL;
4780         struct intel_iommu *iommu;
4781         unsigned long start_pfn, last_pfn;
4782         unsigned int npages;
4783         int iommu_id, level = 0;
4784
4785         /* Cope with horrid API which requires us to unmap more than the
4786            size argument if it happens to be a large-page mapping. */
4787         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4788                 BUG();
4789
4790         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4791                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4792
4793         start_pfn = iova >> VTD_PAGE_SHIFT;
4794         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4795
4796         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4797
4798         npages = last_pfn - start_pfn + 1;
4799
4800         for_each_domain_iommu(iommu_id, dmar_domain) {
4801                 iommu = g_iommus[iommu_id];
4802
4803                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4804                                       start_pfn, npages, !freelist, 0);
4805         }
4806
4807         dma_free_pagelist(freelist);
4808
4809         if (dmar_domain->max_addr == iova + size)
4810                 dmar_domain->max_addr = iova;
4811
4812         return size;
4813 }
4814
4815 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4816                                             dma_addr_t iova)
4817 {
4818         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4819         struct dma_pte *pte;
4820         int level = 0;
4821         u64 phys = 0;
4822
4823         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4824         if (pte)
4825                 phys = dma_pte_addr(pte);
4826
4827         return phys;
4828 }
4829
4830 static bool intel_iommu_capable(enum iommu_cap cap)
4831 {
4832         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4833                 return domain_update_iommu_snooping(NULL) == 1;
4834         if (cap == IOMMU_CAP_INTR_REMAP)
4835                 return irq_remapping_enabled == 1;
4836
4837         return false;
4838 }
4839
4840 static int intel_iommu_add_device(struct device *dev)
4841 {
4842         struct intel_iommu *iommu;
4843         struct iommu_group *group;
4844         u8 bus, devfn;
4845
4846         iommu = device_to_iommu(dev, &bus, &devfn);
4847         if (!iommu)
4848                 return -ENODEV;
4849
4850         iommu_device_link(iommu->iommu_dev, dev);
4851
4852         group = iommu_group_get_for_dev(dev);
4853
4854         if (IS_ERR(group))
4855                 return PTR_ERR(group);
4856
4857         iommu_group_put(group);
4858         return 0;
4859 }
4860
4861 static void intel_iommu_remove_device(struct device *dev)
4862 {
4863         struct intel_iommu *iommu;
4864         u8 bus, devfn;
4865
4866         iommu = device_to_iommu(dev, &bus, &devfn);
4867         if (!iommu)
4868                 return;
4869
4870         iommu_group_remove_device(dev);
4871
4872         iommu_device_unlink(iommu->iommu_dev, dev);
4873 }
4874
4875 static const struct iommu_ops intel_iommu_ops = {
4876         .capable        = intel_iommu_capable,
4877         .domain_alloc   = intel_iommu_domain_alloc,
4878         .domain_free    = intel_iommu_domain_free,
4879         .attach_dev     = intel_iommu_attach_device,
4880         .detach_dev     = intel_iommu_detach_device,
4881         .map            = intel_iommu_map,
4882         .unmap          = intel_iommu_unmap,
4883         .map_sg         = default_iommu_map_sg,
4884         .iova_to_phys   = intel_iommu_iova_to_phys,
4885         .add_device     = intel_iommu_add_device,
4886         .remove_device  = intel_iommu_remove_device,
4887         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4888 };
4889
4890 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4891 {
4892         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4893         pr_info("Disabling IOMMU for graphics on this chipset\n");
4894         dmar_map_gfx = 0;
4895 }
4896
4897 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4898 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4899 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4900 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4901 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4903 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4904
4905 static void quirk_iommu_rwbf(struct pci_dev *dev)
4906 {
4907         /*
4908          * Mobile 4 Series Chipset neglects to set RWBF capability,
4909          * but needs it. Same seems to hold for the desktop versions.
4910          */
4911         pr_info("Forcing write-buffer flush capability\n");
4912         rwbf_quirk = 1;
4913 }
4914
4915 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4916 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4917 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4918 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4919 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4920 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4922
4923 #define GGC 0x52
4924 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4925 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4926 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4927 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4928 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4929 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4930 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4931 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4932
4933 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4934 {
4935         unsigned short ggc;
4936
4937         if (pci_read_config_word(dev, GGC, &ggc))
4938                 return;
4939
4940         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4941                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4942                 dmar_map_gfx = 0;
4943         } else if (dmar_map_gfx) {
4944                 /* we have to ensure the gfx device is idle before we flush */
4945                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4946                 intel_iommu_strict = 1;
4947        }
4948 }
4949 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4950 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4951 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4952 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4953
4954 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4955    ISOCH DMAR unit for the Azalia sound device, but not give it any
4956    TLB entries, which causes it to deadlock. Check for that.  We do
4957    this in a function called from init_dmars(), instead of in a PCI
4958    quirk, because we don't want to print the obnoxious "BIOS broken"
4959    message if VT-d is actually disabled.
4960 */
4961 static void __init check_tylersburg_isoch(void)
4962 {
4963         struct pci_dev *pdev;
4964         uint32_t vtisochctrl;
4965
4966         /* If there's no Azalia in the system anyway, forget it. */
4967         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4968         if (!pdev)
4969                 return;
4970         pci_dev_put(pdev);
4971
4972         /* System Management Registers. Might be hidden, in which case
4973            we can't do the sanity check. But that's OK, because the
4974            known-broken BIOSes _don't_ actually hide it, so far. */
4975         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4976         if (!pdev)
4977                 return;
4978
4979         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4980                 pci_dev_put(pdev);
4981                 return;
4982         }
4983
4984         pci_dev_put(pdev);
4985
4986         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4987         if (vtisochctrl & 1)
4988                 return;
4989
4990         /* Drop all bits other than the number of TLB entries */
4991         vtisochctrl &= 0x1c;
4992
4993         /* If we have the recommended number of TLB entries (16), fine. */
4994         if (vtisochctrl == 0x10)
4995                 return;
4996
4997         /* Zero TLB entries? You get to ride the short bus to school. */
4998         if (!vtisochctrl) {
4999                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5000                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5001                      dmi_get_system_info(DMI_BIOS_VENDOR),
5002                      dmi_get_system_info(DMI_BIOS_VERSION),
5003                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5004                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5005                 return;
5006         }
5007
5008         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5009                vtisochctrl);
5010 }