iommu/vt-d: Move context-mapping into dmar_insert_dev_info
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
50
51 #include "irq_remapping.h"
52
53 #define ROOT_SIZE               VTD_PAGE_SIZE
54 #define CONTEXT_SIZE            VTD_PAGE_SIZE
55
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60
61 #define IOAPIC_RANGE_START      (0xfee00000)
62 #define IOAPIC_RANGE_END        (0xfeefffff)
63 #define IOVA_START_ADDR         (0x1000)
64
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
66
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69
70 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
72
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
76                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN          (1)
81
82 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
85
86 /* page table handling */
87 #define LEVEL_STRIDE            (9)
88 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
89
90 /*
91  * This bitmap is used to advertise the page sizes our hardware support
92  * to the IOMMU core, which will then use this information to split
93  * physically contiguous memory regions it is mapping into page sizes
94  * that we support.
95  *
96  * Traditionally the IOMMU core just handed us the mappings directly,
97  * after making sure the size is an order of a 4KiB page and that the
98  * mapping has natural alignment.
99  *
100  * To retain this behavior, we currently advertise that we support
101  * all page sizes that are an order of 4KiB.
102  *
103  * If at some point we'd like to utilize the IOMMU core's new behavior,
104  * we could change this to advertise the real page sizes we support.
105  */
106 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
107
108 static inline int agaw_to_level(int agaw)
109 {
110         return agaw + 2;
111 }
112
113 static inline int agaw_to_width(int agaw)
114 {
115         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 }
117
118 static inline int width_to_agaw(int width)
119 {
120         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 }
122
123 static inline unsigned int level_to_offset_bits(int level)
124 {
125         return (level - 1) * LEVEL_STRIDE;
126 }
127
128 static inline int pfn_level_offset(unsigned long pfn, int level)
129 {
130         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131 }
132
133 static inline unsigned long level_mask(int level)
134 {
135         return -1UL << level_to_offset_bits(level);
136 }
137
138 static inline unsigned long level_size(int level)
139 {
140         return 1UL << level_to_offset_bits(level);
141 }
142
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
144 {
145         return (pfn + level_size(level) - 1) & level_mask(level);
146 }
147
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149 {
150         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 }
152
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154    are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156 {
157         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 }
159
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161 {
162         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163 }
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
165 {
166         return mm_to_dma_pfn(page_to_pfn(pg));
167 }
168 static inline unsigned long virt_to_dma_pfn(void *p)
169 {
170         return page_to_dma_pfn(virt_to_page(p));
171 }
172
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
175
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
178
179 /*
180  * set to 1 to panic kernel if can't successfully enable VT-d
181  * (used when kernel is launched w/ TXT)
182  */
183 static int force_on = 0;
184
185 /*
186  * 0: Present
187  * 1-11: Reserved
188  * 12-63: Context Ptr (12 - (haw-1))
189  * 64-127: Reserved
190  */
191 struct root_entry {
192         u64     lo;
193         u64     hi;
194 };
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
196
197 /*
198  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
199  * if marked present.
200  */
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 {
203         if (!(re->lo & 1))
204                 return 0;
205
206         return re->lo & VTD_PAGE_MASK;
207 }
208
209 /*
210  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
211  * if marked present.
212  */
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 {
215         if (!(re->hi & 1))
216                 return 0;
217
218         return re->hi & VTD_PAGE_MASK;
219 }
220 /*
221  * low 64 bits:
222  * 0: present
223  * 1: fault processing disable
224  * 2-3: translation type
225  * 12-63: address space root
226  * high 64 bits:
227  * 0-2: address width
228  * 3-6: aval
229  * 8-23: domain id
230  */
231 struct context_entry {
232         u64 lo;
233         u64 hi;
234 };
235
236 static inline void context_clear_pasid_enable(struct context_entry *context)
237 {
238         context->lo &= ~(1ULL << 11);
239 }
240
241 static inline bool context_pasid_enabled(struct context_entry *context)
242 {
243         return !!(context->lo & (1ULL << 11));
244 }
245
246 static inline void context_set_copied(struct context_entry *context)
247 {
248         context->hi |= (1ull << 3);
249 }
250
251 static inline bool context_copied(struct context_entry *context)
252 {
253         return !!(context->hi & (1ULL << 3));
254 }
255
256 static inline bool __context_present(struct context_entry *context)
257 {
258         return (context->lo & 1);
259 }
260
261 static inline bool context_present(struct context_entry *context)
262 {
263         return context_pasid_enabled(context) ?
264              __context_present(context) :
265              __context_present(context) && !context_copied(context);
266 }
267
268 static inline void context_set_present(struct context_entry *context)
269 {
270         context->lo |= 1;
271 }
272
273 static inline void context_set_fault_enable(struct context_entry *context)
274 {
275         context->lo &= (((u64)-1) << 2) | 1;
276 }
277
278 static inline void context_set_translation_type(struct context_entry *context,
279                                                 unsigned long value)
280 {
281         context->lo &= (((u64)-1) << 4) | 3;
282         context->lo |= (value & 3) << 2;
283 }
284
285 static inline void context_set_address_root(struct context_entry *context,
286                                             unsigned long value)
287 {
288         context->lo &= ~VTD_PAGE_MASK;
289         context->lo |= value & VTD_PAGE_MASK;
290 }
291
292 static inline void context_set_address_width(struct context_entry *context,
293                                              unsigned long value)
294 {
295         context->hi |= value & 7;
296 }
297
298 static inline void context_set_domain_id(struct context_entry *context,
299                                          unsigned long value)
300 {
301         context->hi |= (value & ((1 << 16) - 1)) << 8;
302 }
303
304 static inline int context_domain_id(struct context_entry *c)
305 {
306         return((c->hi >> 8) & 0xffff);
307 }
308
309 static inline void context_clear_entry(struct context_entry *context)
310 {
311         context->lo = 0;
312         context->hi = 0;
313 }
314
315 /*
316  * 0: readable
317  * 1: writable
318  * 2-6: reserved
319  * 7: super page
320  * 8-10: available
321  * 11: snoop behavior
322  * 12-63: Host physcial address
323  */
324 struct dma_pte {
325         u64 val;
326 };
327
328 static inline void dma_clear_pte(struct dma_pte *pte)
329 {
330         pte->val = 0;
331 }
332
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
334 {
335 #ifdef CONFIG_64BIT
336         return pte->val & VTD_PAGE_MASK;
337 #else
338         /* Must have a full atomic 64-bit read */
339         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
340 #endif
341 }
342
343 static inline bool dma_pte_present(struct dma_pte *pte)
344 {
345         return (pte->val & 3) != 0;
346 }
347
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
349 {
350         return (pte->val & DMA_PTE_LARGE_PAGE);
351 }
352
353 static inline int first_pte_in_page(struct dma_pte *pte)
354 {
355         return !((unsigned long)pte & ~VTD_PAGE_MASK);
356 }
357
358 /*
359  * This domain is a statically identity mapping domain.
360  *      1. This domain creats a static 1:1 mapping to all usable memory.
361  *      2. It maps to each iommu if successful.
362  *      3. Each iommu mapps to this domain if successful.
363  */
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
366
367 /*
368  * Domain represents a virtual machine, more than one devices
369  * across iommus may be owned in one domain, e.g. kvm guest.
370  */
371 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
372
373 /* si_domain contains mulitple devices */
374 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
375
376 #define for_each_domain_iommu(idx, domain)                      \
377         for (idx = 0; idx < g_num_of_iommus; idx++)             \
378                 if (domain->iommu_refcnt[idx])
379
380 struct dmar_domain {
381         int     nid;                    /* node id */
382
383         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
384                                         /* Refcount of devices per iommu */
385
386
387         u16             iommu_did[DMAR_UNITS_SUPPORTED];
388                                         /* Domain ids per IOMMU. Use u16 since
389                                          * domain ids are 16 bit wide according
390                                          * to VT-d spec, section 9.3 */
391
392         struct list_head devices;       /* all devices' list */
393         struct iova_domain iovad;       /* iova's that belong to this domain */
394
395         struct dma_pte  *pgd;           /* virtual address */
396         int             gaw;            /* max guest address width */
397
398         /* adjusted guest address width, 0 is level 2 30-bit */
399         int             agaw;
400
401         int             flags;          /* flags to find out type of domain */
402
403         int             iommu_coherency;/* indicate coherency of iommu access */
404         int             iommu_snooping; /* indicate snooping control feature*/
405         int             iommu_count;    /* reference count of iommu */
406         int             iommu_superpage;/* Level of superpages supported:
407                                            0 == 4KiB (no superpages), 1 == 2MiB,
408                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409         spinlock_t      iommu_lock;     /* protect iommu set in domain */
410         u64             max_addr;       /* maximum mapped address */
411
412         struct iommu_domain domain;     /* generic domain data structure for
413                                            iommu core */
414 };
415
416 /* PCI domain-device relationship */
417 struct device_domain_info {
418         struct list_head link;  /* link to domain siblings */
419         struct list_head global; /* link to global list */
420         u8 bus;                 /* PCI bus number */
421         u8 devfn;               /* PCI devfn number */
422         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
423         struct intel_iommu *iommu; /* IOMMU used by this device */
424         struct dmar_domain *domain; /* pointer to domain */
425 };
426
427 struct dmar_rmrr_unit {
428         struct list_head list;          /* list of rmrr units   */
429         struct acpi_dmar_header *hdr;   /* ACPI header          */
430         u64     base_address;           /* reserved base address*/
431         u64     end_address;            /* reserved end address */
432         struct dmar_dev_scope *devices; /* target devices */
433         int     devices_cnt;            /* target device count */
434 };
435
436 struct dmar_atsr_unit {
437         struct list_head list;          /* list of ATSR units */
438         struct acpi_dmar_header *hdr;   /* ACPI header */
439         struct dmar_dev_scope *devices; /* target devices */
440         int devices_cnt;                /* target device count */
441         u8 include_all:1;               /* include all ports */
442 };
443
444 static LIST_HEAD(dmar_atsr_units);
445 static LIST_HEAD(dmar_rmrr_units);
446
447 #define for_each_rmrr_units(rmrr) \
448         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
449
450 static void flush_unmaps_timeout(unsigned long data);
451
452 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
453
454 #define HIGH_WATER_MARK 250
455 struct deferred_flush_tables {
456         int next;
457         struct iova *iova[HIGH_WATER_MARK];
458         struct dmar_domain *domain[HIGH_WATER_MARK];
459         struct page *freelist[HIGH_WATER_MARK];
460 };
461
462 static struct deferred_flush_tables *deferred_flush;
463
464 /* bitmap for indexing intel_iommus */
465 static int g_num_of_iommus;
466
467 static DEFINE_SPINLOCK(async_umap_flush_lock);
468 static LIST_HEAD(unmaps_to_do);
469
470 static int timer_on;
471 static long list_size;
472
473 static void domain_exit(struct dmar_domain *domain);
474 static void domain_remove_dev_info(struct dmar_domain *domain);
475 static void domain_remove_one_dev_info(struct dmar_domain *domain,
476                                        struct device *dev);
477 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
478                                            struct device *dev);
479 static int domain_detach_iommu(struct dmar_domain *domain,
480                                struct intel_iommu *iommu);
481
482 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
483 int dmar_disabled = 0;
484 #else
485 int dmar_disabled = 1;
486 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
487
488 int intel_iommu_enabled = 0;
489 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
490
491 static int dmar_map_gfx = 1;
492 static int dmar_forcedac;
493 static int intel_iommu_strict;
494 static int intel_iommu_superpage = 1;
495 static int intel_iommu_ecs = 1;
496
497 /* We only actually use ECS when PASID support (on the new bit 40)
498  * is also advertised. Some early implementations — the ones with
499  * PASID support on bit 28 — have issues even when we *only* use
500  * extended root/context tables. */
501 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
502                             ecap_pasid(iommu->ecap))
503
504 int intel_iommu_gfx_mapped;
505 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
506
507 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
508 static DEFINE_SPINLOCK(device_domain_lock);
509 static LIST_HEAD(device_domain_list);
510
511 static const struct iommu_ops intel_iommu_ops;
512
513 static bool translation_pre_enabled(struct intel_iommu *iommu)
514 {
515         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
516 }
517
518 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
519 {
520         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
521 }
522
523 static void init_translation_status(struct intel_iommu *iommu)
524 {
525         u32 gsts;
526
527         gsts = readl(iommu->reg + DMAR_GSTS_REG);
528         if (gsts & DMA_GSTS_TES)
529                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
530 }
531
532 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
533 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
534 {
535         return container_of(dom, struct dmar_domain, domain);
536 }
537
538 static int __init intel_iommu_setup(char *str)
539 {
540         if (!str)
541                 return -EINVAL;
542         while (*str) {
543                 if (!strncmp(str, "on", 2)) {
544                         dmar_disabled = 0;
545                         pr_info("IOMMU enabled\n");
546                 } else if (!strncmp(str, "off", 3)) {
547                         dmar_disabled = 1;
548                         pr_info("IOMMU disabled\n");
549                 } else if (!strncmp(str, "igfx_off", 8)) {
550                         dmar_map_gfx = 0;
551                         pr_info("Disable GFX device mapping\n");
552                 } else if (!strncmp(str, "forcedac", 8)) {
553                         pr_info("Forcing DAC for PCI devices\n");
554                         dmar_forcedac = 1;
555                 } else if (!strncmp(str, "strict", 6)) {
556                         pr_info("Disable batched IOTLB flush\n");
557                         intel_iommu_strict = 1;
558                 } else if (!strncmp(str, "sp_off", 6)) {
559                         pr_info("Disable supported super page\n");
560                         intel_iommu_superpage = 0;
561                 } else if (!strncmp(str, "ecs_off", 7)) {
562                         printk(KERN_INFO
563                                 "Intel-IOMMU: disable extended context table support\n");
564                         intel_iommu_ecs = 0;
565                 }
566
567                 str += strcspn(str, ",");
568                 while (*str == ',')
569                         str++;
570         }
571         return 0;
572 }
573 __setup("intel_iommu=", intel_iommu_setup);
574
575 static struct kmem_cache *iommu_domain_cache;
576 static struct kmem_cache *iommu_devinfo_cache;
577
578 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
579 {
580         struct dmar_domain **domains;
581         int idx = did >> 8;
582
583         domains = iommu->domains[idx];
584         if (!domains)
585                 return NULL;
586
587         return domains[did & 0xff];
588 }
589
590 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
591                              struct dmar_domain *domain)
592 {
593         struct dmar_domain **domains;
594         int idx = did >> 8;
595
596         if (!iommu->domains[idx]) {
597                 size_t size = 256 * sizeof(struct dmar_domain *);
598                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
599         }
600
601         domains = iommu->domains[idx];
602         if (WARN_ON(!domains))
603                 return;
604         else
605                 domains[did & 0xff] = domain;
606 }
607
608 static inline void *alloc_pgtable_page(int node)
609 {
610         struct page *page;
611         void *vaddr = NULL;
612
613         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
614         if (page)
615                 vaddr = page_address(page);
616         return vaddr;
617 }
618
619 static inline void free_pgtable_page(void *vaddr)
620 {
621         free_page((unsigned long)vaddr);
622 }
623
624 static inline void *alloc_domain_mem(void)
625 {
626         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
627 }
628
629 static void free_domain_mem(void *vaddr)
630 {
631         kmem_cache_free(iommu_domain_cache, vaddr);
632 }
633
634 static inline void * alloc_devinfo_mem(void)
635 {
636         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
637 }
638
639 static inline void free_devinfo_mem(void *vaddr)
640 {
641         kmem_cache_free(iommu_devinfo_cache, vaddr);
642 }
643
644 static inline int domain_type_is_vm(struct dmar_domain *domain)
645 {
646         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
647 }
648
649 static inline int domain_type_is_si(struct dmar_domain *domain)
650 {
651         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
652 }
653
654 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
655 {
656         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
657                                 DOMAIN_FLAG_STATIC_IDENTITY);
658 }
659
660 static inline int domain_pfn_supported(struct dmar_domain *domain,
661                                        unsigned long pfn)
662 {
663         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
664
665         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
666 }
667
668 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
669 {
670         unsigned long sagaw;
671         int agaw = -1;
672
673         sagaw = cap_sagaw(iommu->cap);
674         for (agaw = width_to_agaw(max_gaw);
675              agaw >= 0; agaw--) {
676                 if (test_bit(agaw, &sagaw))
677                         break;
678         }
679
680         return agaw;
681 }
682
683 /*
684  * Calculate max SAGAW for each iommu.
685  */
686 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
687 {
688         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
689 }
690
691 /*
692  * calculate agaw for each iommu.
693  * "SAGAW" may be different across iommus, use a default agaw, and
694  * get a supported less agaw for iommus that don't support the default agaw.
695  */
696 int iommu_calculate_agaw(struct intel_iommu *iommu)
697 {
698         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
699 }
700
701 /* This functionin only returns single iommu in a domain */
702 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
703 {
704         int iommu_id;
705
706         /* si_domain and vm domain should not get here. */
707         BUG_ON(domain_type_is_vm_or_si(domain));
708         for_each_domain_iommu(iommu_id, domain)
709                 break;
710
711         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
712                 return NULL;
713
714         return g_iommus[iommu_id];
715 }
716
717 static void domain_update_iommu_coherency(struct dmar_domain *domain)
718 {
719         struct dmar_drhd_unit *drhd;
720         struct intel_iommu *iommu;
721         bool found = false;
722         int i;
723
724         domain->iommu_coherency = 1;
725
726         for_each_domain_iommu(i, domain) {
727                 found = true;
728                 if (!ecap_coherent(g_iommus[i]->ecap)) {
729                         domain->iommu_coherency = 0;
730                         break;
731                 }
732         }
733         if (found)
734                 return;
735
736         /* No hardware attached; use lowest common denominator */
737         rcu_read_lock();
738         for_each_active_iommu(iommu, drhd) {
739                 if (!ecap_coherent(iommu->ecap)) {
740                         domain->iommu_coherency = 0;
741                         break;
742                 }
743         }
744         rcu_read_unlock();
745 }
746
747 static int domain_update_iommu_snooping(struct intel_iommu *skip)
748 {
749         struct dmar_drhd_unit *drhd;
750         struct intel_iommu *iommu;
751         int ret = 1;
752
753         rcu_read_lock();
754         for_each_active_iommu(iommu, drhd) {
755                 if (iommu != skip) {
756                         if (!ecap_sc_support(iommu->ecap)) {
757                                 ret = 0;
758                                 break;
759                         }
760                 }
761         }
762         rcu_read_unlock();
763
764         return ret;
765 }
766
767 static int domain_update_iommu_superpage(struct intel_iommu *skip)
768 {
769         struct dmar_drhd_unit *drhd;
770         struct intel_iommu *iommu;
771         int mask = 0xf;
772
773         if (!intel_iommu_superpage) {
774                 return 0;
775         }
776
777         /* set iommu_superpage to the smallest common denominator */
778         rcu_read_lock();
779         for_each_active_iommu(iommu, drhd) {
780                 if (iommu != skip) {
781                         mask &= cap_super_page_val(iommu->cap);
782                         if (!mask)
783                                 break;
784                 }
785         }
786         rcu_read_unlock();
787
788         return fls(mask);
789 }
790
791 /* Some capabilities may be different across iommus */
792 static void domain_update_iommu_cap(struct dmar_domain *domain)
793 {
794         domain_update_iommu_coherency(domain);
795         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
796         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
797 }
798
799 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
800                                                        u8 bus, u8 devfn, int alloc)
801 {
802         struct root_entry *root = &iommu->root_entry[bus];
803         struct context_entry *context;
804         u64 *entry;
805
806         if (ecs_enabled(iommu)) {
807                 if (devfn >= 0x80) {
808                         devfn -= 0x80;
809                         entry = &root->hi;
810                 }
811                 devfn *= 2;
812         }
813         entry = &root->lo;
814         if (*entry & 1)
815                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
816         else {
817                 unsigned long phy_addr;
818                 if (!alloc)
819                         return NULL;
820
821                 context = alloc_pgtable_page(iommu->node);
822                 if (!context)
823                         return NULL;
824
825                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
826                 phy_addr = virt_to_phys((void *)context);
827                 *entry = phy_addr | 1;
828                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
829         }
830         return &context[devfn];
831 }
832
833 static int iommu_dummy(struct device *dev)
834 {
835         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
836 }
837
838 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
839 {
840         struct dmar_drhd_unit *drhd = NULL;
841         struct intel_iommu *iommu;
842         struct device *tmp;
843         struct pci_dev *ptmp, *pdev = NULL;
844         u16 segment = 0;
845         int i;
846
847         if (iommu_dummy(dev))
848                 return NULL;
849
850         if (dev_is_pci(dev)) {
851                 pdev = to_pci_dev(dev);
852                 segment = pci_domain_nr(pdev->bus);
853         } else if (has_acpi_companion(dev))
854                 dev = &ACPI_COMPANION(dev)->dev;
855
856         rcu_read_lock();
857         for_each_active_iommu(iommu, drhd) {
858                 if (pdev && segment != drhd->segment)
859                         continue;
860
861                 for_each_active_dev_scope(drhd->devices,
862                                           drhd->devices_cnt, i, tmp) {
863                         if (tmp == dev) {
864                                 *bus = drhd->devices[i].bus;
865                                 *devfn = drhd->devices[i].devfn;
866                                 goto out;
867                         }
868
869                         if (!pdev || !dev_is_pci(tmp))
870                                 continue;
871
872                         ptmp = to_pci_dev(tmp);
873                         if (ptmp->subordinate &&
874                             ptmp->subordinate->number <= pdev->bus->number &&
875                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
876                                 goto got_pdev;
877                 }
878
879                 if (pdev && drhd->include_all) {
880                 got_pdev:
881                         *bus = pdev->bus->number;
882                         *devfn = pdev->devfn;
883                         goto out;
884                 }
885         }
886         iommu = NULL;
887  out:
888         rcu_read_unlock();
889
890         return iommu;
891 }
892
893 static void domain_flush_cache(struct dmar_domain *domain,
894                                void *addr, int size)
895 {
896         if (!domain->iommu_coherency)
897                 clflush_cache_range(addr, size);
898 }
899
900 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
901 {
902         struct context_entry *context;
903         int ret = 0;
904         unsigned long flags;
905
906         spin_lock_irqsave(&iommu->lock, flags);
907         context = iommu_context_addr(iommu, bus, devfn, 0);
908         if (context)
909                 ret = context_present(context);
910         spin_unlock_irqrestore(&iommu->lock, flags);
911         return ret;
912 }
913
914 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
915 {
916         struct context_entry *context;
917         unsigned long flags;
918
919         spin_lock_irqsave(&iommu->lock, flags);
920         context = iommu_context_addr(iommu, bus, devfn, 0);
921         if (context) {
922                 context_clear_entry(context);
923                 __iommu_flush_cache(iommu, context, sizeof(*context));
924         }
925         spin_unlock_irqrestore(&iommu->lock, flags);
926 }
927
928 static void free_context_table(struct intel_iommu *iommu)
929 {
930         int i;
931         unsigned long flags;
932         struct context_entry *context;
933
934         spin_lock_irqsave(&iommu->lock, flags);
935         if (!iommu->root_entry) {
936                 goto out;
937         }
938         for (i = 0; i < ROOT_ENTRY_NR; i++) {
939                 context = iommu_context_addr(iommu, i, 0, 0);
940                 if (context)
941                         free_pgtable_page(context);
942
943                 if (!ecs_enabled(iommu))
944                         continue;
945
946                 context = iommu_context_addr(iommu, i, 0x80, 0);
947                 if (context)
948                         free_pgtable_page(context);
949
950         }
951         free_pgtable_page(iommu->root_entry);
952         iommu->root_entry = NULL;
953 out:
954         spin_unlock_irqrestore(&iommu->lock, flags);
955 }
956
957 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
958                                       unsigned long pfn, int *target_level)
959 {
960         struct dma_pte *parent, *pte = NULL;
961         int level = agaw_to_level(domain->agaw);
962         int offset;
963
964         BUG_ON(!domain->pgd);
965
966         if (!domain_pfn_supported(domain, pfn))
967                 /* Address beyond IOMMU's addressing capabilities. */
968                 return NULL;
969
970         parent = domain->pgd;
971
972         while (1) {
973                 void *tmp_page;
974
975                 offset = pfn_level_offset(pfn, level);
976                 pte = &parent[offset];
977                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
978                         break;
979                 if (level == *target_level)
980                         break;
981
982                 if (!dma_pte_present(pte)) {
983                         uint64_t pteval;
984
985                         tmp_page = alloc_pgtable_page(domain->nid);
986
987                         if (!tmp_page)
988                                 return NULL;
989
990                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
991                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
992                         if (cmpxchg64(&pte->val, 0ULL, pteval))
993                                 /* Someone else set it while we were thinking; use theirs. */
994                                 free_pgtable_page(tmp_page);
995                         else
996                                 domain_flush_cache(domain, pte, sizeof(*pte));
997                 }
998                 if (level == 1)
999                         break;
1000
1001                 parent = phys_to_virt(dma_pte_addr(pte));
1002                 level--;
1003         }
1004
1005         if (!*target_level)
1006                 *target_level = level;
1007
1008         return pte;
1009 }
1010
1011
1012 /* return address's pte at specific level */
1013 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1014                                          unsigned long pfn,
1015                                          int level, int *large_page)
1016 {
1017         struct dma_pte *parent, *pte = NULL;
1018         int total = agaw_to_level(domain->agaw);
1019         int offset;
1020
1021         parent = domain->pgd;
1022         while (level <= total) {
1023                 offset = pfn_level_offset(pfn, total);
1024                 pte = &parent[offset];
1025                 if (level == total)
1026                         return pte;
1027
1028                 if (!dma_pte_present(pte)) {
1029                         *large_page = total;
1030                         break;
1031                 }
1032
1033                 if (dma_pte_superpage(pte)) {
1034                         *large_page = total;
1035                         return pte;
1036                 }
1037
1038                 parent = phys_to_virt(dma_pte_addr(pte));
1039                 total--;
1040         }
1041         return NULL;
1042 }
1043
1044 /* clear last level pte, a tlb flush should be followed */
1045 static void dma_pte_clear_range(struct dmar_domain *domain,
1046                                 unsigned long start_pfn,
1047                                 unsigned long last_pfn)
1048 {
1049         unsigned int large_page = 1;
1050         struct dma_pte *first_pte, *pte;
1051
1052         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1053         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1054         BUG_ON(start_pfn > last_pfn);
1055
1056         /* we don't need lock here; nobody else touches the iova range */
1057         do {
1058                 large_page = 1;
1059                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1060                 if (!pte) {
1061                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1062                         continue;
1063                 }
1064                 do {
1065                         dma_clear_pte(pte);
1066                         start_pfn += lvl_to_nr_pages(large_page);
1067                         pte++;
1068                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1069
1070                 domain_flush_cache(domain, first_pte,
1071                                    (void *)pte - (void *)first_pte);
1072
1073         } while (start_pfn && start_pfn <= last_pfn);
1074 }
1075
1076 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1077                                struct dma_pte *pte, unsigned long pfn,
1078                                unsigned long start_pfn, unsigned long last_pfn)
1079 {
1080         pfn = max(start_pfn, pfn);
1081         pte = &pte[pfn_level_offset(pfn, level)];
1082
1083         do {
1084                 unsigned long level_pfn;
1085                 struct dma_pte *level_pte;
1086
1087                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1088                         goto next;
1089
1090                 level_pfn = pfn & level_mask(level - 1);
1091                 level_pte = phys_to_virt(dma_pte_addr(pte));
1092
1093                 if (level > 2)
1094                         dma_pte_free_level(domain, level - 1, level_pte,
1095                                            level_pfn, start_pfn, last_pfn);
1096
1097                 /* If range covers entire pagetable, free it */
1098                 if (!(start_pfn > level_pfn ||
1099                       last_pfn < level_pfn + level_size(level) - 1)) {
1100                         dma_clear_pte(pte);
1101                         domain_flush_cache(domain, pte, sizeof(*pte));
1102                         free_pgtable_page(level_pte);
1103                 }
1104 next:
1105                 pfn += level_size(level);
1106         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1107 }
1108
1109 /* free page table pages. last level pte should already be cleared */
1110 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1111                                    unsigned long start_pfn,
1112                                    unsigned long last_pfn)
1113 {
1114         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1115         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1116         BUG_ON(start_pfn > last_pfn);
1117
1118         dma_pte_clear_range(domain, start_pfn, last_pfn);
1119
1120         /* We don't need lock here; nobody else touches the iova range */
1121         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1122                            domain->pgd, 0, start_pfn, last_pfn);
1123
1124         /* free pgd */
1125         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1126                 free_pgtable_page(domain->pgd);
1127                 domain->pgd = NULL;
1128         }
1129 }
1130
1131 /* When a page at a given level is being unlinked from its parent, we don't
1132    need to *modify* it at all. All we need to do is make a list of all the
1133    pages which can be freed just as soon as we've flushed the IOTLB and we
1134    know the hardware page-walk will no longer touch them.
1135    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1136    be freed. */
1137 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1138                                             int level, struct dma_pte *pte,
1139                                             struct page *freelist)
1140 {
1141         struct page *pg;
1142
1143         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1144         pg->freelist = freelist;
1145         freelist = pg;
1146
1147         if (level == 1)
1148                 return freelist;
1149
1150         pte = page_address(pg);
1151         do {
1152                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1153                         freelist = dma_pte_list_pagetables(domain, level - 1,
1154                                                            pte, freelist);
1155                 pte++;
1156         } while (!first_pte_in_page(pte));
1157
1158         return freelist;
1159 }
1160
1161 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1162                                         struct dma_pte *pte, unsigned long pfn,
1163                                         unsigned long start_pfn,
1164                                         unsigned long last_pfn,
1165                                         struct page *freelist)
1166 {
1167         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1168
1169         pfn = max(start_pfn, pfn);
1170         pte = &pte[pfn_level_offset(pfn, level)];
1171
1172         do {
1173                 unsigned long level_pfn;
1174
1175                 if (!dma_pte_present(pte))
1176                         goto next;
1177
1178                 level_pfn = pfn & level_mask(level);
1179
1180                 /* If range covers entire pagetable, free it */
1181                 if (start_pfn <= level_pfn &&
1182                     last_pfn >= level_pfn + level_size(level) - 1) {
1183                         /* These suborbinate page tables are going away entirely. Don't
1184                            bother to clear them; we're just going to *free* them. */
1185                         if (level > 1 && !dma_pte_superpage(pte))
1186                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1187
1188                         dma_clear_pte(pte);
1189                         if (!first_pte)
1190                                 first_pte = pte;
1191                         last_pte = pte;
1192                 } else if (level > 1) {
1193                         /* Recurse down into a level that isn't *entirely* obsolete */
1194                         freelist = dma_pte_clear_level(domain, level - 1,
1195                                                        phys_to_virt(dma_pte_addr(pte)),
1196                                                        level_pfn, start_pfn, last_pfn,
1197                                                        freelist);
1198                 }
1199 next:
1200                 pfn += level_size(level);
1201         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1202
1203         if (first_pte)
1204                 domain_flush_cache(domain, first_pte,
1205                                    (void *)++last_pte - (void *)first_pte);
1206
1207         return freelist;
1208 }
1209
1210 /* We can't just free the pages because the IOMMU may still be walking
1211    the page tables, and may have cached the intermediate levels. The
1212    pages can only be freed after the IOTLB flush has been done. */
1213 struct page *domain_unmap(struct dmar_domain *domain,
1214                           unsigned long start_pfn,
1215                           unsigned long last_pfn)
1216 {
1217         struct page *freelist = NULL;
1218
1219         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1220         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1221         BUG_ON(start_pfn > last_pfn);
1222
1223         /* we don't need lock here; nobody else touches the iova range */
1224         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1225                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1226
1227         /* free pgd */
1228         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1229                 struct page *pgd_page = virt_to_page(domain->pgd);
1230                 pgd_page->freelist = freelist;
1231                 freelist = pgd_page;
1232
1233                 domain->pgd = NULL;
1234         }
1235
1236         return freelist;
1237 }
1238
1239 void dma_free_pagelist(struct page *freelist)
1240 {
1241         struct page *pg;
1242
1243         while ((pg = freelist)) {
1244                 freelist = pg->freelist;
1245                 free_pgtable_page(page_address(pg));
1246         }
1247 }
1248
1249 /* iommu handling */
1250 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1251 {
1252         struct root_entry *root;
1253         unsigned long flags;
1254
1255         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1256         if (!root) {
1257                 pr_err("Allocating root entry for %s failed\n",
1258                         iommu->name);
1259                 return -ENOMEM;
1260         }
1261
1262         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1263
1264         spin_lock_irqsave(&iommu->lock, flags);
1265         iommu->root_entry = root;
1266         spin_unlock_irqrestore(&iommu->lock, flags);
1267
1268         return 0;
1269 }
1270
1271 static void iommu_set_root_entry(struct intel_iommu *iommu)
1272 {
1273         u64 addr;
1274         u32 sts;
1275         unsigned long flag;
1276
1277         addr = virt_to_phys(iommu->root_entry);
1278         if (ecs_enabled(iommu))
1279                 addr |= DMA_RTADDR_RTT;
1280
1281         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1282         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1283
1284         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1285
1286         /* Make sure hardware complete it */
1287         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1288                       readl, (sts & DMA_GSTS_RTPS), sts);
1289
1290         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1291 }
1292
1293 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1294 {
1295         u32 val;
1296         unsigned long flag;
1297
1298         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1299                 return;
1300
1301         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1302         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1303
1304         /* Make sure hardware complete it */
1305         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1306                       readl, (!(val & DMA_GSTS_WBFS)), val);
1307
1308         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1309 }
1310
1311 /* return value determine if we need a write buffer flush */
1312 static void __iommu_flush_context(struct intel_iommu *iommu,
1313                                   u16 did, u16 source_id, u8 function_mask,
1314                                   u64 type)
1315 {
1316         u64 val = 0;
1317         unsigned long flag;
1318
1319         switch (type) {
1320         case DMA_CCMD_GLOBAL_INVL:
1321                 val = DMA_CCMD_GLOBAL_INVL;
1322                 break;
1323         case DMA_CCMD_DOMAIN_INVL:
1324                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1325                 break;
1326         case DMA_CCMD_DEVICE_INVL:
1327                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1328                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1329                 break;
1330         default:
1331                 BUG();
1332         }
1333         val |= DMA_CCMD_ICC;
1334
1335         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1336         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1337
1338         /* Make sure hardware complete it */
1339         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1340                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1341
1342         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1343 }
1344
1345 /* return value determine if we need a write buffer flush */
1346 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1347                                 u64 addr, unsigned int size_order, u64 type)
1348 {
1349         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1350         u64 val = 0, val_iva = 0;
1351         unsigned long flag;
1352
1353         switch (type) {
1354         case DMA_TLB_GLOBAL_FLUSH:
1355                 /* global flush doesn't need set IVA_REG */
1356                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1357                 break;
1358         case DMA_TLB_DSI_FLUSH:
1359                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1360                 break;
1361         case DMA_TLB_PSI_FLUSH:
1362                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1363                 /* IH bit is passed in as part of address */
1364                 val_iva = size_order | addr;
1365                 break;
1366         default:
1367                 BUG();
1368         }
1369         /* Note: set drain read/write */
1370 #if 0
1371         /*
1372          * This is probably to be super secure.. Looks like we can
1373          * ignore it without any impact.
1374          */
1375         if (cap_read_drain(iommu->cap))
1376                 val |= DMA_TLB_READ_DRAIN;
1377 #endif
1378         if (cap_write_drain(iommu->cap))
1379                 val |= DMA_TLB_WRITE_DRAIN;
1380
1381         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382         /* Note: Only uses first TLB reg currently */
1383         if (val_iva)
1384                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1385         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1386
1387         /* Make sure hardware complete it */
1388         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1389                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1390
1391         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1392
1393         /* check IOTLB invalidation granularity */
1394         if (DMA_TLB_IAIG(val) == 0)
1395                 pr_err("Flush IOTLB failed\n");
1396         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1397                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1398                         (unsigned long long)DMA_TLB_IIRG(type),
1399                         (unsigned long long)DMA_TLB_IAIG(val));
1400 }
1401
1402 static struct device_domain_info *
1403 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1404                          u8 bus, u8 devfn)
1405 {
1406         bool found = false;
1407         unsigned long flags;
1408         struct device_domain_info *info;
1409         struct pci_dev *pdev;
1410
1411         if (!ecap_dev_iotlb_support(iommu->ecap))
1412                 return NULL;
1413
1414         if (!iommu->qi)
1415                 return NULL;
1416
1417         spin_lock_irqsave(&device_domain_lock, flags);
1418         list_for_each_entry(info, &domain->devices, link)
1419                 if (info->iommu == iommu && info->bus == bus &&
1420                     info->devfn == devfn) {
1421                         found = true;
1422                         break;
1423                 }
1424         spin_unlock_irqrestore(&device_domain_lock, flags);
1425
1426         if (!found || !info->dev || !dev_is_pci(info->dev))
1427                 return NULL;
1428
1429         pdev = to_pci_dev(info->dev);
1430
1431         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1432                 return NULL;
1433
1434         if (!dmar_find_matched_atsr_unit(pdev))
1435                 return NULL;
1436
1437         return info;
1438 }
1439
1440 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1441 {
1442         if (!info || !dev_is_pci(info->dev))
1443                 return;
1444
1445         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1446 }
1447
1448 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1449 {
1450         if (!info->dev || !dev_is_pci(info->dev) ||
1451             !pci_ats_enabled(to_pci_dev(info->dev)))
1452                 return;
1453
1454         pci_disable_ats(to_pci_dev(info->dev));
1455 }
1456
1457 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1458                                   u64 addr, unsigned mask)
1459 {
1460         u16 sid, qdep;
1461         unsigned long flags;
1462         struct device_domain_info *info;
1463
1464         spin_lock_irqsave(&device_domain_lock, flags);
1465         list_for_each_entry(info, &domain->devices, link) {
1466                 struct pci_dev *pdev;
1467                 if (!info->dev || !dev_is_pci(info->dev))
1468                         continue;
1469
1470                 pdev = to_pci_dev(info->dev);
1471                 if (!pci_ats_enabled(pdev))
1472                         continue;
1473
1474                 sid = info->bus << 8 | info->devfn;
1475                 qdep = pci_ats_queue_depth(pdev);
1476                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1477         }
1478         spin_unlock_irqrestore(&device_domain_lock, flags);
1479 }
1480
1481 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1482                                   struct dmar_domain *domain,
1483                                   unsigned long pfn, unsigned int pages,
1484                                   int ih, int map)
1485 {
1486         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1487         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1488         u16 did = domain->iommu_did[iommu->seq_id];
1489
1490         BUG_ON(pages == 0);
1491
1492         if (ih)
1493                 ih = 1 << 6;
1494         /*
1495          * Fallback to domain selective flush if no PSI support or the size is
1496          * too big.
1497          * PSI requires page size to be 2 ^ x, and the base address is naturally
1498          * aligned to the size
1499          */
1500         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1501                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1502                                                 DMA_TLB_DSI_FLUSH);
1503         else
1504                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1505                                                 DMA_TLB_PSI_FLUSH);
1506
1507         /*
1508          * In caching mode, changes of pages from non-present to present require
1509          * flush. However, device IOTLB doesn't need to be flushed in this case.
1510          */
1511         if (!cap_caching_mode(iommu->cap) || !map)
1512                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1513                                       addr, mask);
1514 }
1515
1516 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1517 {
1518         u32 pmen;
1519         unsigned long flags;
1520
1521         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1522         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1523         pmen &= ~DMA_PMEN_EPM;
1524         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1525
1526         /* wait for the protected region status bit to clear */
1527         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1528                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1529
1530         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1531 }
1532
1533 static void iommu_enable_translation(struct intel_iommu *iommu)
1534 {
1535         u32 sts;
1536         unsigned long flags;
1537
1538         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1539         iommu->gcmd |= DMA_GCMD_TE;
1540         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1541
1542         /* Make sure hardware complete it */
1543         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1544                       readl, (sts & DMA_GSTS_TES), sts);
1545
1546         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1547 }
1548
1549 static void iommu_disable_translation(struct intel_iommu *iommu)
1550 {
1551         u32 sts;
1552         unsigned long flag;
1553
1554         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1555         iommu->gcmd &= ~DMA_GCMD_TE;
1556         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1557
1558         /* Make sure hardware complete it */
1559         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1560                       readl, (!(sts & DMA_GSTS_TES)), sts);
1561
1562         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1563 }
1564
1565
1566 static int iommu_init_domains(struct intel_iommu *iommu)
1567 {
1568         u32 ndomains, nlongs;
1569         size_t size;
1570
1571         ndomains = cap_ndoms(iommu->cap);
1572         pr_debug("%s: Number of Domains supported <%d>\n",
1573                  iommu->name, ndomains);
1574         nlongs = BITS_TO_LONGS(ndomains);
1575
1576         spin_lock_init(&iommu->lock);
1577
1578         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1579         if (!iommu->domain_ids) {
1580                 pr_err("%s: Allocating domain id array failed\n",
1581                        iommu->name);
1582                 return -ENOMEM;
1583         }
1584
1585         size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
1586         iommu->domains = kzalloc(size, GFP_KERNEL);
1587
1588         if (iommu->domains) {
1589                 size = 256 * sizeof(struct dmar_domain *);
1590                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1591         }
1592
1593         if (!iommu->domains || !iommu->domains[0]) {
1594                 pr_err("%s: Allocating domain array failed\n",
1595                        iommu->name);
1596                 kfree(iommu->domain_ids);
1597                 kfree(iommu->domains);
1598                 iommu->domain_ids = NULL;
1599                 iommu->domains    = NULL;
1600                 return -ENOMEM;
1601         }
1602
1603
1604
1605         /*
1606          * If Caching mode is set, then invalid translations are tagged
1607          * with domain-id 0, hence we need to pre-allocate it. We also
1608          * use domain-id 0 as a marker for non-allocated domain-id, so
1609          * make sure it is not used for a real domain.
1610          */
1611         set_bit(0, iommu->domain_ids);
1612
1613         return 0;
1614 }
1615
1616 static void disable_dmar_iommu(struct intel_iommu *iommu)
1617 {
1618         struct device_domain_info *info, *tmp;
1619
1620         if (!iommu->domains || !iommu->domain_ids)
1621                 return;
1622
1623         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1624                 struct dmar_domain *domain;
1625
1626                 if (info->iommu != iommu)
1627                         continue;
1628
1629                 if (!info->dev || !info->domain)
1630                         continue;
1631
1632                 domain = info->domain;
1633
1634                 domain_remove_one_dev_info(domain, info->dev);
1635
1636                 if (!domain_type_is_vm_or_si(domain))
1637                         domain_exit(domain);
1638         }
1639
1640         if (iommu->gcmd & DMA_GCMD_TE)
1641                 iommu_disable_translation(iommu);
1642 }
1643
1644 static void free_dmar_iommu(struct intel_iommu *iommu)
1645 {
1646         if ((iommu->domains) && (iommu->domain_ids)) {
1647                 int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
1648                 int i;
1649
1650                 for (i = 0; i < elems; i++)
1651                         kfree(iommu->domains[i]);
1652                 kfree(iommu->domains);
1653                 kfree(iommu->domain_ids);
1654                 iommu->domains = NULL;
1655                 iommu->domain_ids = NULL;
1656         }
1657
1658         g_iommus[iommu->seq_id] = NULL;
1659
1660         /* free context mapping */
1661         free_context_table(iommu);
1662 }
1663
1664 static struct dmar_domain *alloc_domain(int flags)
1665 {
1666         struct dmar_domain *domain;
1667
1668         domain = alloc_domain_mem();
1669         if (!domain)
1670                 return NULL;
1671
1672         memset(domain, 0, sizeof(*domain));
1673         domain->nid = -1;
1674         domain->flags = flags;
1675         spin_lock_init(&domain->iommu_lock);
1676         INIT_LIST_HEAD(&domain->devices);
1677
1678         return domain;
1679 }
1680
1681 static int __iommu_attach_domain(struct dmar_domain *domain,
1682                                  struct intel_iommu *iommu)
1683 {
1684         int num;
1685         unsigned long ndomains;
1686
1687         num = domain->iommu_did[iommu->seq_id];
1688         if (num)
1689                 return num;
1690
1691         ndomains = cap_ndoms(iommu->cap);
1692         num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1693
1694         if (num < ndomains) {
1695                 set_bit(num, iommu->domain_ids);
1696                 set_iommu_domain(iommu, num, domain);
1697                 domain->iommu_did[iommu->seq_id] = num;
1698         } else {
1699                 num = -ENOSPC;
1700         }
1701
1702         if (num < 0)
1703                 pr_err("%s: No free domain ids\n", iommu->name);
1704
1705         return num;
1706 }
1707
1708 static int iommu_attach_domain(struct dmar_domain *domain,
1709                                struct intel_iommu *iommu)
1710 {
1711         int num;
1712         unsigned long flags;
1713
1714         spin_lock_irqsave(&iommu->lock, flags);
1715         num = __iommu_attach_domain(domain, iommu);
1716         spin_unlock_irqrestore(&iommu->lock, flags);
1717
1718         return num;
1719 }
1720
1721 static void iommu_detach_domain(struct dmar_domain *domain,
1722                                 struct intel_iommu *iommu)
1723 {
1724         unsigned long flags;
1725         int num;
1726
1727         spin_lock_irqsave(&iommu->lock, flags);
1728
1729         num = domain->iommu_did[iommu->seq_id];
1730
1731         if (num == 0)
1732                 return;
1733
1734         clear_bit(num, iommu->domain_ids);
1735         set_iommu_domain(iommu, num, NULL);
1736
1737         spin_unlock_irqrestore(&iommu->lock, flags);
1738 }
1739
1740 static void domain_attach_iommu(struct dmar_domain *domain,
1741                                struct intel_iommu *iommu)
1742 {
1743         unsigned long flags;
1744
1745         spin_lock_irqsave(&domain->iommu_lock, flags);
1746         domain->iommu_refcnt[iommu->seq_id] += 1;
1747         domain->iommu_count += 1;
1748         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1749                 domain->nid = iommu->node;
1750                 domain_update_iommu_cap(domain);
1751         }
1752         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1753 }
1754
1755 static int domain_detach_iommu(struct dmar_domain *domain,
1756                                struct intel_iommu *iommu)
1757 {
1758         unsigned long flags;
1759         int count = INT_MAX;
1760
1761         spin_lock_irqsave(&domain->iommu_lock, flags);
1762         domain->iommu_refcnt[iommu->seq_id] -= 1;
1763         count = --domain->iommu_count;
1764         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1765                 domain_update_iommu_cap(domain);
1766                 domain->iommu_did[iommu->seq_id] = 0;
1767         }
1768         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1769
1770         return count;
1771 }
1772
1773 static struct iova_domain reserved_iova_list;
1774 static struct lock_class_key reserved_rbtree_key;
1775
1776 static int dmar_init_reserved_ranges(void)
1777 {
1778         struct pci_dev *pdev = NULL;
1779         struct iova *iova;
1780         int i;
1781
1782         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1783                         DMA_32BIT_PFN);
1784
1785         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1786                 &reserved_rbtree_key);
1787
1788         /* IOAPIC ranges shouldn't be accessed by DMA */
1789         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1790                 IOVA_PFN(IOAPIC_RANGE_END));
1791         if (!iova) {
1792                 pr_err("Reserve IOAPIC range failed\n");
1793                 return -ENODEV;
1794         }
1795
1796         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1797         for_each_pci_dev(pdev) {
1798                 struct resource *r;
1799
1800                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1801                         r = &pdev->resource[i];
1802                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1803                                 continue;
1804                         iova = reserve_iova(&reserved_iova_list,
1805                                             IOVA_PFN(r->start),
1806                                             IOVA_PFN(r->end));
1807                         if (!iova) {
1808                                 pr_err("Reserve iova failed\n");
1809                                 return -ENODEV;
1810                         }
1811                 }
1812         }
1813         return 0;
1814 }
1815
1816 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1817 {
1818         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1819 }
1820
1821 static inline int guestwidth_to_adjustwidth(int gaw)
1822 {
1823         int agaw;
1824         int r = (gaw - 12) % 9;
1825
1826         if (r == 0)
1827                 agaw = gaw;
1828         else
1829                 agaw = gaw + 9 - r;
1830         if (agaw > 64)
1831                 agaw = 64;
1832         return agaw;
1833 }
1834
1835 static int domain_init(struct dmar_domain *domain, int guest_width)
1836 {
1837         struct intel_iommu *iommu;
1838         int adjust_width, agaw;
1839         unsigned long sagaw;
1840
1841         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1842                         DMA_32BIT_PFN);
1843         domain_reserve_special_ranges(domain);
1844
1845         /* calculate AGAW */
1846         iommu = domain_get_iommu(domain);
1847         if (guest_width > cap_mgaw(iommu->cap))
1848                 guest_width = cap_mgaw(iommu->cap);
1849         domain->gaw = guest_width;
1850         adjust_width = guestwidth_to_adjustwidth(guest_width);
1851         agaw = width_to_agaw(adjust_width);
1852         sagaw = cap_sagaw(iommu->cap);
1853         if (!test_bit(agaw, &sagaw)) {
1854                 /* hardware doesn't support it, choose a bigger one */
1855                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1856                 agaw = find_next_bit(&sagaw, 5, agaw);
1857                 if (agaw >= 5)
1858                         return -ENODEV;
1859         }
1860         domain->agaw = agaw;
1861
1862         if (ecap_coherent(iommu->ecap))
1863                 domain->iommu_coherency = 1;
1864         else
1865                 domain->iommu_coherency = 0;
1866
1867         if (ecap_sc_support(iommu->ecap))
1868                 domain->iommu_snooping = 1;
1869         else
1870                 domain->iommu_snooping = 0;
1871
1872         if (intel_iommu_superpage)
1873                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1874         else
1875                 domain->iommu_superpage = 0;
1876
1877         domain->nid = iommu->node;
1878
1879         /* always allocate the top pgd */
1880         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1881         if (!domain->pgd)
1882                 return -ENOMEM;
1883         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1884         return 0;
1885 }
1886
1887 static void domain_exit(struct dmar_domain *domain)
1888 {
1889         struct page *freelist = NULL;
1890         int i;
1891
1892         /* Domain 0 is reserved, so dont process it */
1893         if (!domain)
1894                 return;
1895
1896         /* Flush any lazy unmaps that may reference this domain */
1897         if (!intel_iommu_strict)
1898                 flush_unmaps_timeout(0);
1899
1900         /* remove associated devices */
1901         domain_remove_dev_info(domain);
1902
1903         /* destroy iovas */
1904         put_iova_domain(&domain->iovad);
1905
1906         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1907
1908         /* clear attached or cached domains */
1909         rcu_read_lock();
1910         for_each_domain_iommu(i, domain)
1911                 iommu_detach_domain(domain, g_iommus[i]);
1912         rcu_read_unlock();
1913
1914         dma_free_pagelist(freelist);
1915
1916         free_domain_mem(domain);
1917 }
1918
1919 static int domain_context_mapping_one(struct dmar_domain *domain,
1920                                       struct intel_iommu *iommu,
1921                                       u8 bus, u8 devfn)
1922 {
1923         int translation = CONTEXT_TT_MULTI_LEVEL;
1924         struct device_domain_info *info = NULL;
1925         struct context_entry *context;
1926         unsigned long flags;
1927         struct dma_pte *pgd;
1928         int id;
1929         int agaw;
1930
1931         if (hw_pass_through && domain_type_is_si(domain))
1932                 translation = CONTEXT_TT_PASS_THROUGH;
1933
1934         pr_debug("Set context mapping for %02x:%02x.%d\n",
1935                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1936
1937         BUG_ON(!domain->pgd);
1938
1939         spin_lock_irqsave(&iommu->lock, flags);
1940         context = iommu_context_addr(iommu, bus, devfn, 1);
1941         spin_unlock_irqrestore(&iommu->lock, flags);
1942         if (!context)
1943                 return -ENOMEM;
1944         spin_lock_irqsave(&iommu->lock, flags);
1945         if (context_present(context)) {
1946                 spin_unlock_irqrestore(&iommu->lock, flags);
1947                 return 0;
1948         }
1949
1950         pgd = domain->pgd;
1951
1952         id = __iommu_attach_domain(domain, iommu);
1953         if (id < 0) {
1954                 spin_unlock_irqrestore(&iommu->lock, flags);
1955                 pr_err("%s: No free domain ids\n", iommu->name);
1956                 return -EFAULT;
1957         }
1958
1959         context_clear_entry(context);
1960         context_set_domain_id(context, id);
1961
1962         /*
1963          * Skip top levels of page tables for iommu which has less agaw
1964          * than default.  Unnecessary for PT mode.
1965          */
1966         if (translation != CONTEXT_TT_PASS_THROUGH) {
1967                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1968                         pgd = phys_to_virt(dma_pte_addr(pgd));
1969                         if (!dma_pte_present(pgd)) {
1970                                 spin_unlock_irqrestore(&iommu->lock, flags);
1971                                 return -ENOMEM;
1972                         }
1973                 }
1974
1975                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1976                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1977                                      CONTEXT_TT_MULTI_LEVEL;
1978
1979                 context_set_address_root(context, virt_to_phys(pgd));
1980                 context_set_address_width(context, iommu->agaw);
1981         } else {
1982                 /*
1983                  * In pass through mode, AW must be programmed to
1984                  * indicate the largest AGAW value supported by
1985                  * hardware. And ASR is ignored by hardware.
1986                  */
1987                 context_set_address_width(context, iommu->msagaw);
1988         }
1989
1990         context_set_translation_type(context, translation);
1991         context_set_fault_enable(context);
1992         context_set_present(context);
1993         domain_flush_cache(domain, context, sizeof(*context));
1994
1995         /*
1996          * It's a non-present to present mapping. If hardware doesn't cache
1997          * non-present entry we only need to flush the write-buffer. If the
1998          * _does_ cache non-present entries, then it does so in the special
1999          * domain #0, which we have to flush:
2000          */
2001         if (cap_caching_mode(iommu->cap)) {
2002                 iommu->flush.flush_context(iommu, 0,
2003                                            (((u16)bus) << 8) | devfn,
2004                                            DMA_CCMD_MASK_NOBIT,
2005                                            DMA_CCMD_DEVICE_INVL);
2006                 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
2007         } else {
2008                 iommu_flush_write_buffer(iommu);
2009         }
2010         iommu_enable_dev_iotlb(info);
2011         spin_unlock_irqrestore(&iommu->lock, flags);
2012
2013         domain_attach_iommu(domain, iommu);
2014
2015         return 0;
2016 }
2017
2018 struct domain_context_mapping_data {
2019         struct dmar_domain *domain;
2020         struct intel_iommu *iommu;
2021 };
2022
2023 static int domain_context_mapping_cb(struct pci_dev *pdev,
2024                                      u16 alias, void *opaque)
2025 {
2026         struct domain_context_mapping_data *data = opaque;
2027
2028         return domain_context_mapping_one(data->domain, data->iommu,
2029                                           PCI_BUS_NUM(alias), alias & 0xff);
2030 }
2031
2032 static int
2033 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2034 {
2035         struct intel_iommu *iommu;
2036         u8 bus, devfn;
2037         struct domain_context_mapping_data data;
2038
2039         iommu = device_to_iommu(dev, &bus, &devfn);
2040         if (!iommu)
2041                 return -ENODEV;
2042
2043         if (!dev_is_pci(dev))
2044                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2045
2046         data.domain = domain;
2047         data.iommu = iommu;
2048
2049         return pci_for_each_dma_alias(to_pci_dev(dev),
2050                                       &domain_context_mapping_cb, &data);
2051 }
2052
2053 static int domain_context_mapped_cb(struct pci_dev *pdev,
2054                                     u16 alias, void *opaque)
2055 {
2056         struct intel_iommu *iommu = opaque;
2057
2058         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2059 }
2060
2061 static int domain_context_mapped(struct device *dev)
2062 {
2063         struct intel_iommu *iommu;
2064         u8 bus, devfn;
2065
2066         iommu = device_to_iommu(dev, &bus, &devfn);
2067         if (!iommu)
2068                 return -ENODEV;
2069
2070         if (!dev_is_pci(dev))
2071                 return device_context_mapped(iommu, bus, devfn);
2072
2073         return !pci_for_each_dma_alias(to_pci_dev(dev),
2074                                        domain_context_mapped_cb, iommu);
2075 }
2076
2077 /* Returns a number of VTD pages, but aligned to MM page size */
2078 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2079                                             size_t size)
2080 {
2081         host_addr &= ~PAGE_MASK;
2082         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2083 }
2084
2085 /* Return largest possible superpage level for a given mapping */
2086 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2087                                           unsigned long iov_pfn,
2088                                           unsigned long phy_pfn,
2089                                           unsigned long pages)
2090 {
2091         int support, level = 1;
2092         unsigned long pfnmerge;
2093
2094         support = domain->iommu_superpage;
2095
2096         /* To use a large page, the virtual *and* physical addresses
2097            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2098            of them will mean we have to use smaller pages. So just
2099            merge them and check both at once. */
2100         pfnmerge = iov_pfn | phy_pfn;
2101
2102         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2103                 pages >>= VTD_STRIDE_SHIFT;
2104                 if (!pages)
2105                         break;
2106                 pfnmerge >>= VTD_STRIDE_SHIFT;
2107                 level++;
2108                 support--;
2109         }
2110         return level;
2111 }
2112
2113 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2114                             struct scatterlist *sg, unsigned long phys_pfn,
2115                             unsigned long nr_pages, int prot)
2116 {
2117         struct dma_pte *first_pte = NULL, *pte = NULL;
2118         phys_addr_t uninitialized_var(pteval);
2119         unsigned long sg_res = 0;
2120         unsigned int largepage_lvl = 0;
2121         unsigned long lvl_pages = 0;
2122
2123         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2124
2125         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2126                 return -EINVAL;
2127
2128         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2129
2130         if (!sg) {
2131                 sg_res = nr_pages;
2132                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2133         }
2134
2135         while (nr_pages > 0) {
2136                 uint64_t tmp;
2137
2138                 if (!sg_res) {
2139                         sg_res = aligned_nrpages(sg->offset, sg->length);
2140                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2141                         sg->dma_length = sg->length;
2142                         pteval = page_to_phys(sg_page(sg)) | prot;
2143                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2144                 }
2145
2146                 if (!pte) {
2147                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2148
2149                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2150                         if (!pte)
2151                                 return -ENOMEM;
2152                         /* It is large page*/
2153                         if (largepage_lvl > 1) {
2154                                 pteval |= DMA_PTE_LARGE_PAGE;
2155                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2156                                 /*
2157                                  * Ensure that old small page tables are
2158                                  * removed to make room for superpage,
2159                                  * if they exist.
2160                                  */
2161                                 dma_pte_free_pagetable(domain, iov_pfn,
2162                                                        iov_pfn + lvl_pages - 1);
2163                         } else {
2164                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2165                         }
2166
2167                 }
2168                 /* We don't need lock here, nobody else
2169                  * touches the iova range
2170                  */
2171                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2172                 if (tmp) {
2173                         static int dumps = 5;
2174                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2175                                 iov_pfn, tmp, (unsigned long long)pteval);
2176                         if (dumps) {
2177                                 dumps--;
2178                                 debug_dma_dump_mappings(NULL);
2179                         }
2180                         WARN_ON(1);
2181                 }
2182
2183                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2184
2185                 BUG_ON(nr_pages < lvl_pages);
2186                 BUG_ON(sg_res < lvl_pages);
2187
2188                 nr_pages -= lvl_pages;
2189                 iov_pfn += lvl_pages;
2190                 phys_pfn += lvl_pages;
2191                 pteval += lvl_pages * VTD_PAGE_SIZE;
2192                 sg_res -= lvl_pages;
2193
2194                 /* If the next PTE would be the first in a new page, then we
2195                    need to flush the cache on the entries we've just written.
2196                    And then we'll need to recalculate 'pte', so clear it and
2197                    let it get set again in the if (!pte) block above.
2198
2199                    If we're done (!nr_pages) we need to flush the cache too.
2200
2201                    Also if we've been setting superpages, we may need to
2202                    recalculate 'pte' and switch back to smaller pages for the
2203                    end of the mapping, if the trailing size is not enough to
2204                    use another superpage (i.e. sg_res < lvl_pages). */
2205                 pte++;
2206                 if (!nr_pages || first_pte_in_page(pte) ||
2207                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2208                         domain_flush_cache(domain, first_pte,
2209                                            (void *)pte - (void *)first_pte);
2210                         pte = NULL;
2211                 }
2212
2213                 if (!sg_res && nr_pages)
2214                         sg = sg_next(sg);
2215         }
2216         return 0;
2217 }
2218
2219 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2220                                     struct scatterlist *sg, unsigned long nr_pages,
2221                                     int prot)
2222 {
2223         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2224 }
2225
2226 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2227                                      unsigned long phys_pfn, unsigned long nr_pages,
2228                                      int prot)
2229 {
2230         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2231 }
2232
2233 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2234 {
2235         if (!iommu)
2236                 return;
2237
2238         clear_context_table(iommu, bus, devfn);
2239         iommu->flush.flush_context(iommu, 0, 0, 0,
2240                                            DMA_CCMD_GLOBAL_INVL);
2241         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2242 }
2243
2244 static inline void unlink_domain_info(struct device_domain_info *info)
2245 {
2246         assert_spin_locked(&device_domain_lock);
2247         list_del(&info->link);
2248         list_del(&info->global);
2249         if (info->dev)
2250                 info->dev->archdata.iommu = NULL;
2251 }
2252
2253 static void domain_remove_dev_info(struct dmar_domain *domain)
2254 {
2255         struct device_domain_info *info, *tmp;
2256
2257         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2258                 domain_remove_one_dev_info(domain, info->dev);
2259 }
2260
2261 /*
2262  * find_domain
2263  * Note: we use struct device->archdata.iommu stores the info
2264  */
2265 static struct dmar_domain *find_domain(struct device *dev)
2266 {
2267         struct device_domain_info *info;
2268
2269         /* No lock here, assumes no domain exit in normal case */
2270         info = dev->archdata.iommu;
2271         if (info)
2272                 return info->domain;
2273         return NULL;
2274 }
2275
2276 static inline struct device_domain_info *
2277 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2278 {
2279         struct device_domain_info *info;
2280
2281         list_for_each_entry(info, &device_domain_list, global)
2282                 if (info->iommu->segment == segment && info->bus == bus &&
2283                     info->devfn == devfn)
2284                         return info;
2285
2286         return NULL;
2287 }
2288
2289 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2290                                                 int bus, int devfn,
2291                                                 struct device *dev,
2292                                                 struct dmar_domain *domain)
2293 {
2294         struct dmar_domain *found = NULL;
2295         struct device_domain_info *info;
2296         unsigned long flags;
2297
2298         info = alloc_devinfo_mem();
2299         if (!info)
2300                 return NULL;
2301
2302         info->bus = bus;
2303         info->devfn = devfn;
2304         info->dev = dev;
2305         info->domain = domain;
2306         info->iommu = iommu;
2307
2308         spin_lock_irqsave(&device_domain_lock, flags);
2309         if (dev)
2310                 found = find_domain(dev);
2311         else {
2312                 struct device_domain_info *info2;
2313                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2314                 if (info2)
2315                         found = info2->domain;
2316         }
2317         if (found) {
2318                 spin_unlock_irqrestore(&device_domain_lock, flags);
2319                 free_devinfo_mem(info);
2320                 /* Caller must free the original domain */
2321                 return found;
2322         }
2323
2324         list_add(&info->link, &domain->devices);
2325         list_add(&info->global, &device_domain_list);
2326         if (dev)
2327                 dev->archdata.iommu = info;
2328         spin_unlock_irqrestore(&device_domain_lock, flags);
2329
2330         if (dev && domain_context_mapping(domain, dev)) {
2331                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2332                 domain_remove_one_dev_info(domain, dev);
2333                 return NULL;
2334         }
2335
2336         return domain;
2337 }
2338
2339 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2340 {
2341         *(u16 *)opaque = alias;
2342         return 0;
2343 }
2344
2345 /* domain is initialized */
2346 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2347 {
2348         struct device_domain_info *info = NULL;
2349         struct dmar_domain *domain, *tmp;
2350         struct intel_iommu *iommu;
2351         unsigned long flags;
2352         u16 dma_alias;
2353         u8 bus, devfn;
2354
2355         domain = find_domain(dev);
2356         if (domain)
2357                 return domain;
2358
2359         iommu = device_to_iommu(dev, &bus, &devfn);
2360         if (!iommu)
2361                 return NULL;
2362
2363         if (dev_is_pci(dev)) {
2364                 struct pci_dev *pdev = to_pci_dev(dev);
2365
2366                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2367
2368                 spin_lock_irqsave(&device_domain_lock, flags);
2369                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2370                                                       PCI_BUS_NUM(dma_alias),
2371                                                       dma_alias & 0xff);
2372                 if (info) {
2373                         iommu = info->iommu;
2374                         domain = info->domain;
2375                 }
2376                 spin_unlock_irqrestore(&device_domain_lock, flags);
2377
2378                 /* DMA alias already has a domain, uses it */
2379                 if (info)
2380                         goto found_domain;
2381         }
2382
2383         /* Allocate and initialize new domain for the device */
2384         domain = alloc_domain(0);
2385         if (!domain)
2386                 return NULL;
2387         if (iommu_attach_domain(domain, iommu) < 0) {
2388                 free_domain_mem(domain);
2389                 return NULL;
2390         }
2391         domain_attach_iommu(domain, iommu);
2392         if (domain_init(domain, gaw)) {
2393                 domain_exit(domain);
2394                 return NULL;
2395         }
2396
2397         /* register PCI DMA alias device */
2398         if (dev_is_pci(dev)) {
2399                 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2400                                            dma_alias & 0xff, NULL, domain);
2401
2402                 if (!tmp || tmp != domain) {
2403                         domain_exit(domain);
2404                         domain = tmp;
2405                 }
2406
2407                 if (!domain)
2408                         return NULL;
2409         }
2410
2411 found_domain:
2412         tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2413
2414         if (!tmp || tmp != domain) {
2415                 domain_exit(domain);
2416                 domain = tmp;
2417         }
2418
2419         return domain;
2420 }
2421
2422 static int iommu_identity_mapping;
2423 #define IDENTMAP_ALL            1
2424 #define IDENTMAP_GFX            2
2425 #define IDENTMAP_AZALIA         4
2426
2427 static int iommu_domain_identity_map(struct dmar_domain *domain,
2428                                      unsigned long long start,
2429                                      unsigned long long end)
2430 {
2431         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2432         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2433
2434         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2435                           dma_to_mm_pfn(last_vpfn))) {
2436                 pr_err("Reserving iova failed\n");
2437                 return -ENOMEM;
2438         }
2439
2440         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2441         /*
2442          * RMRR range might have overlap with physical memory range,
2443          * clear it first
2444          */
2445         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2446
2447         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2448                                   last_vpfn - first_vpfn + 1,
2449                                   DMA_PTE_READ|DMA_PTE_WRITE);
2450 }
2451
2452 static int iommu_prepare_identity_map(struct device *dev,
2453                                       unsigned long long start,
2454                                       unsigned long long end)
2455 {
2456         struct dmar_domain *domain;
2457         int ret;
2458
2459         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2460         if (!domain)
2461                 return -ENOMEM;
2462
2463         /* For _hardware_ passthrough, don't bother. But for software
2464            passthrough, we do it anyway -- it may indicate a memory
2465            range which is reserved in E820, so which didn't get set
2466            up to start with in si_domain */
2467         if (domain == si_domain && hw_pass_through) {
2468                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2469                         dev_name(dev), start, end);
2470                 return 0;
2471         }
2472
2473         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2474                 dev_name(dev), start, end);
2475
2476         if (end < start) {
2477                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2478                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2479                         dmi_get_system_info(DMI_BIOS_VENDOR),
2480                         dmi_get_system_info(DMI_BIOS_VERSION),
2481                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2482                 ret = -EIO;
2483                 goto error;
2484         }
2485
2486         if (end >> agaw_to_width(domain->agaw)) {
2487                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2488                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2489                      agaw_to_width(domain->agaw),
2490                      dmi_get_system_info(DMI_BIOS_VENDOR),
2491                      dmi_get_system_info(DMI_BIOS_VERSION),
2492                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2493                 ret = -EIO;
2494                 goto error;
2495         }
2496
2497         ret = iommu_domain_identity_map(domain, start, end);
2498         if (ret)
2499                 goto error;
2500
2501         return 0;
2502
2503  error:
2504         domain_exit(domain);
2505         return ret;
2506 }
2507
2508 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2509                                          struct device *dev)
2510 {
2511         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2512                 return 0;
2513         return iommu_prepare_identity_map(dev, rmrr->base_address,
2514                                           rmrr->end_address);
2515 }
2516
2517 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2518 static inline void iommu_prepare_isa(void)
2519 {
2520         struct pci_dev *pdev;
2521         int ret;
2522
2523         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2524         if (!pdev)
2525                 return;
2526
2527         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2528         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2529
2530         if (ret)
2531                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2532
2533         pci_dev_put(pdev);
2534 }
2535 #else
2536 static inline void iommu_prepare_isa(void)
2537 {
2538         return;
2539 }
2540 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2541
2542 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2543
2544 static int __init si_domain_init(int hw)
2545 {
2546         int nid, ret = 0;
2547
2548         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2549         if (!si_domain)
2550                 return -EFAULT;
2551
2552         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2553                 domain_exit(si_domain);
2554                 return -EFAULT;
2555         }
2556
2557         pr_debug("Identity mapping domain allocated\n");
2558
2559         if (hw)
2560                 return 0;
2561
2562         for_each_online_node(nid) {
2563                 unsigned long start_pfn, end_pfn;
2564                 int i;
2565
2566                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2567                         ret = iommu_domain_identity_map(si_domain,
2568                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2569                         if (ret)
2570                                 return ret;
2571                 }
2572         }
2573
2574         return 0;
2575 }
2576
2577 static int identity_mapping(struct device *dev)
2578 {
2579         struct device_domain_info *info;
2580
2581         if (likely(!iommu_identity_mapping))
2582                 return 0;
2583
2584         info = dev->archdata.iommu;
2585         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2586                 return (info->domain == si_domain);
2587
2588         return 0;
2589 }
2590
2591 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2592 {
2593         struct dmar_domain *ndomain;
2594         struct intel_iommu *iommu;
2595         u8 bus, devfn;
2596
2597         iommu = device_to_iommu(dev, &bus, &devfn);
2598         if (!iommu)
2599                 return -ENODEV;
2600
2601         ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2602         if (ndomain != domain)
2603                 return -EBUSY;
2604
2605         return 0;
2606 }
2607
2608 static bool device_has_rmrr(struct device *dev)
2609 {
2610         struct dmar_rmrr_unit *rmrr;
2611         struct device *tmp;
2612         int i;
2613
2614         rcu_read_lock();
2615         for_each_rmrr_units(rmrr) {
2616                 /*
2617                  * Return TRUE if this RMRR contains the device that
2618                  * is passed in.
2619                  */
2620                 for_each_active_dev_scope(rmrr->devices,
2621                                           rmrr->devices_cnt, i, tmp)
2622                         if (tmp == dev) {
2623                                 rcu_read_unlock();
2624                                 return true;
2625                         }
2626         }
2627         rcu_read_unlock();
2628         return false;
2629 }
2630
2631 /*
2632  * There are a couple cases where we need to restrict the functionality of
2633  * devices associated with RMRRs.  The first is when evaluating a device for
2634  * identity mapping because problems exist when devices are moved in and out
2635  * of domains and their respective RMRR information is lost.  This means that
2636  * a device with associated RMRRs will never be in a "passthrough" domain.
2637  * The second is use of the device through the IOMMU API.  This interface
2638  * expects to have full control of the IOVA space for the device.  We cannot
2639  * satisfy both the requirement that RMRR access is maintained and have an
2640  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2641  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2642  * We therefore prevent devices associated with an RMRR from participating in
2643  * the IOMMU API, which eliminates them from device assignment.
2644  *
2645  * In both cases we assume that PCI USB devices with RMRRs have them largely
2646  * for historical reasons and that the RMRR space is not actively used post
2647  * boot.  This exclusion may change if vendors begin to abuse it.
2648  *
2649  * The same exception is made for graphics devices, with the requirement that
2650  * any use of the RMRR regions will be torn down before assigning the device
2651  * to a guest.
2652  */
2653 static bool device_is_rmrr_locked(struct device *dev)
2654 {
2655         if (!device_has_rmrr(dev))
2656                 return false;
2657
2658         if (dev_is_pci(dev)) {
2659                 struct pci_dev *pdev = to_pci_dev(dev);
2660
2661                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2662                         return false;
2663         }
2664
2665         return true;
2666 }
2667
2668 static int iommu_should_identity_map(struct device *dev, int startup)
2669 {
2670
2671         if (dev_is_pci(dev)) {
2672                 struct pci_dev *pdev = to_pci_dev(dev);
2673
2674                 if (device_is_rmrr_locked(dev))
2675                         return 0;
2676
2677                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2678                         return 1;
2679
2680                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2681                         return 1;
2682
2683                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2684                         return 0;
2685
2686                 /*
2687                  * We want to start off with all devices in the 1:1 domain, and
2688                  * take them out later if we find they can't access all of memory.
2689                  *
2690                  * However, we can't do this for PCI devices behind bridges,
2691                  * because all PCI devices behind the same bridge will end up
2692                  * with the same source-id on their transactions.
2693                  *
2694                  * Practically speaking, we can't change things around for these
2695                  * devices at run-time, because we can't be sure there'll be no
2696                  * DMA transactions in flight for any of their siblings.
2697                  *
2698                  * So PCI devices (unless they're on the root bus) as well as
2699                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2700                  * the 1:1 domain, just in _case_ one of their siblings turns out
2701                  * not to be able to map all of memory.
2702                  */
2703                 if (!pci_is_pcie(pdev)) {
2704                         if (!pci_is_root_bus(pdev->bus))
2705                                 return 0;
2706                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2707                                 return 0;
2708                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2709                         return 0;
2710         } else {
2711                 if (device_has_rmrr(dev))
2712                         return 0;
2713         }
2714
2715         /*
2716          * At boot time, we don't yet know if devices will be 64-bit capable.
2717          * Assume that they will — if they turn out not to be, then we can
2718          * take them out of the 1:1 domain later.
2719          */
2720         if (!startup) {
2721                 /*
2722                  * If the device's dma_mask is less than the system's memory
2723                  * size then this is not a candidate for identity mapping.
2724                  */
2725                 u64 dma_mask = *dev->dma_mask;
2726
2727                 if (dev->coherent_dma_mask &&
2728                     dev->coherent_dma_mask < dma_mask)
2729                         dma_mask = dev->coherent_dma_mask;
2730
2731                 return dma_mask >= dma_get_required_mask(dev);
2732         }
2733
2734         return 1;
2735 }
2736
2737 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2738 {
2739         int ret;
2740
2741         if (!iommu_should_identity_map(dev, 1))
2742                 return 0;
2743
2744         ret = domain_add_dev_info(si_domain, dev);
2745         if (!ret)
2746                 pr_info("%s identity mapping for device %s\n",
2747                         hw ? "Hardware" : "Software", dev_name(dev));
2748         else if (ret == -ENODEV)
2749                 /* device not associated with an iommu */
2750                 ret = 0;
2751
2752         return ret;
2753 }
2754
2755
2756 static int __init iommu_prepare_static_identity_mapping(int hw)
2757 {
2758         struct pci_dev *pdev = NULL;
2759         struct dmar_drhd_unit *drhd;
2760         struct intel_iommu *iommu;
2761         struct device *dev;
2762         int i;
2763         int ret = 0;
2764
2765         for_each_pci_dev(pdev) {
2766                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2767                 if (ret)
2768                         return ret;
2769         }
2770
2771         for_each_active_iommu(iommu, drhd)
2772                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2773                         struct acpi_device_physical_node *pn;
2774                         struct acpi_device *adev;
2775
2776                         if (dev->bus != &acpi_bus_type)
2777                                 continue;
2778
2779                         adev= to_acpi_device(dev);
2780                         mutex_lock(&adev->physical_node_lock);
2781                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2782                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2783                                 if (ret)
2784                                         break;
2785                         }
2786                         mutex_unlock(&adev->physical_node_lock);
2787                         if (ret)
2788                                 return ret;
2789                 }
2790
2791         return 0;
2792 }
2793
2794 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2795 {
2796         /*
2797          * Start from the sane iommu hardware state.
2798          * If the queued invalidation is already initialized by us
2799          * (for example, while enabling interrupt-remapping) then
2800          * we got the things already rolling from a sane state.
2801          */
2802         if (!iommu->qi) {
2803                 /*
2804                  * Clear any previous faults.
2805                  */
2806                 dmar_fault(-1, iommu);
2807                 /*
2808                  * Disable queued invalidation if supported and already enabled
2809                  * before OS handover.
2810                  */
2811                 dmar_disable_qi(iommu);
2812         }
2813
2814         if (dmar_enable_qi(iommu)) {
2815                 /*
2816                  * Queued Invalidate not enabled, use Register Based Invalidate
2817                  */
2818                 iommu->flush.flush_context = __iommu_flush_context;
2819                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2820                 pr_info("%s: Using Register based invalidation\n",
2821                         iommu->name);
2822         } else {
2823                 iommu->flush.flush_context = qi_flush_context;
2824                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2825                 pr_info("%s: Using Queued invalidation\n", iommu->name);
2826         }
2827 }
2828
2829 static int copy_context_table(struct intel_iommu *iommu,
2830                               struct root_entry *old_re,
2831                               struct context_entry **tbl,
2832                               int bus, bool ext)
2833 {
2834         struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2835         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2836         phys_addr_t old_ce_phys;
2837
2838         tbl_idx = ext ? bus * 2 : bus;
2839
2840         for (devfn = 0; devfn < 256; devfn++) {
2841                 /* First calculate the correct index */
2842                 idx = (ext ? devfn * 2 : devfn) % 256;
2843
2844                 if (idx == 0) {
2845                         /* First save what we may have and clean up */
2846                         if (new_ce) {
2847                                 tbl[tbl_idx] = new_ce;
2848                                 __iommu_flush_cache(iommu, new_ce,
2849                                                     VTD_PAGE_SIZE);
2850                                 pos = 1;
2851                         }
2852
2853                         if (old_ce)
2854                                 iounmap(old_ce);
2855
2856                         ret = 0;
2857                         if (devfn < 0x80)
2858                                 old_ce_phys = root_entry_lctp(old_re);
2859                         else
2860                                 old_ce_phys = root_entry_uctp(old_re);
2861
2862                         if (!old_ce_phys) {
2863                                 if (ext && devfn == 0) {
2864                                         /* No LCTP, try UCTP */
2865                                         devfn = 0x7f;
2866                                         continue;
2867                                 } else {
2868                                         goto out;
2869                                 }
2870                         }
2871
2872                         ret = -ENOMEM;
2873                         old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2874                         if (!old_ce)
2875                                 goto out;
2876
2877                         new_ce = alloc_pgtable_page(iommu->node);
2878                         if (!new_ce)
2879                                 goto out_unmap;
2880
2881                         ret = 0;
2882                 }
2883
2884                 /* Now copy the context entry */
2885                 ce = old_ce[idx];
2886
2887                 if (!__context_present(&ce))
2888                         continue;
2889
2890                 did = context_domain_id(&ce);
2891                 if (did >= 0 && did < cap_ndoms(iommu->cap))
2892                         set_bit(did, iommu->domain_ids);
2893
2894                 /*
2895                  * We need a marker for copied context entries. This
2896                  * marker needs to work for the old format as well as
2897                  * for extended context entries.
2898                  *
2899                  * Bit 67 of the context entry is used. In the old
2900                  * format this bit is available to software, in the
2901                  * extended format it is the PGE bit, but PGE is ignored
2902                  * by HW if PASIDs are disabled (and thus still
2903                  * available).
2904                  *
2905                  * So disable PASIDs first and then mark the entry
2906                  * copied. This means that we don't copy PASID
2907                  * translations from the old kernel, but this is fine as
2908                  * faults there are not fatal.
2909                  */
2910                 context_clear_pasid_enable(&ce);
2911                 context_set_copied(&ce);
2912
2913                 new_ce[idx] = ce;
2914         }
2915
2916         tbl[tbl_idx + pos] = new_ce;
2917
2918         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2919
2920 out_unmap:
2921         iounmap(old_ce);
2922
2923 out:
2924         return ret;
2925 }
2926
2927 static int copy_translation_tables(struct intel_iommu *iommu)
2928 {
2929         struct context_entry **ctxt_tbls;
2930         struct root_entry *old_rt;
2931         phys_addr_t old_rt_phys;
2932         int ctxt_table_entries;
2933         unsigned long flags;
2934         u64 rtaddr_reg;
2935         int bus, ret;
2936         bool new_ext, ext;
2937
2938         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2939         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
2940         new_ext    = !!ecap_ecs(iommu->ecap);
2941
2942         /*
2943          * The RTT bit can only be changed when translation is disabled,
2944          * but disabling translation means to open a window for data
2945          * corruption. So bail out and don't copy anything if we would
2946          * have to change the bit.
2947          */
2948         if (new_ext != ext)
2949                 return -EINVAL;
2950
2951         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2952         if (!old_rt_phys)
2953                 return -EINVAL;
2954
2955         old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2956         if (!old_rt)
2957                 return -ENOMEM;
2958
2959         /* This is too big for the stack - allocate it from slab */
2960         ctxt_table_entries = ext ? 512 : 256;
2961         ret = -ENOMEM;
2962         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2963         if (!ctxt_tbls)
2964                 goto out_unmap;
2965
2966         for (bus = 0; bus < 256; bus++) {
2967                 ret = copy_context_table(iommu, &old_rt[bus],
2968                                          ctxt_tbls, bus, ext);
2969                 if (ret) {
2970                         pr_err("%s: Failed to copy context table for bus %d\n",
2971                                 iommu->name, bus);
2972                         continue;
2973                 }
2974         }
2975
2976         spin_lock_irqsave(&iommu->lock, flags);
2977
2978         /* Context tables are copied, now write them to the root_entry table */
2979         for (bus = 0; bus < 256; bus++) {
2980                 int idx = ext ? bus * 2 : bus;
2981                 u64 val;
2982
2983                 if (ctxt_tbls[idx]) {
2984                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
2985                         iommu->root_entry[bus].lo = val;
2986                 }
2987
2988                 if (!ext || !ctxt_tbls[idx + 1])
2989                         continue;
2990
2991                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2992                 iommu->root_entry[bus].hi = val;
2993         }
2994
2995         spin_unlock_irqrestore(&iommu->lock, flags);
2996
2997         kfree(ctxt_tbls);
2998
2999         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3000
3001         ret = 0;
3002
3003 out_unmap:
3004         iounmap(old_rt);
3005
3006         return ret;
3007 }
3008
3009 static int __init init_dmars(void)
3010 {
3011         struct dmar_drhd_unit *drhd;
3012         struct dmar_rmrr_unit *rmrr;
3013         bool copied_tables = false;
3014         struct device *dev;
3015         struct intel_iommu *iommu;
3016         int i, ret;
3017
3018         /*
3019          * for each drhd
3020          *    allocate root
3021          *    initialize and program root entry to not present
3022          * endfor
3023          */
3024         for_each_drhd_unit(drhd) {
3025                 /*
3026                  * lock not needed as this is only incremented in the single
3027                  * threaded kernel __init code path all other access are read
3028                  * only
3029                  */
3030                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3031                         g_num_of_iommus++;
3032                         continue;
3033                 }
3034                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3035         }
3036
3037         /* Preallocate enough resources for IOMMU hot-addition */
3038         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3039                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3040
3041         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3042                         GFP_KERNEL);
3043         if (!g_iommus) {
3044                 pr_err("Allocating global iommu array failed\n");
3045                 ret = -ENOMEM;
3046                 goto error;
3047         }
3048
3049         deferred_flush = kzalloc(g_num_of_iommus *
3050                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3051         if (!deferred_flush) {
3052                 ret = -ENOMEM;
3053                 goto free_g_iommus;
3054         }
3055
3056         for_each_active_iommu(iommu, drhd) {
3057                 g_iommus[iommu->seq_id] = iommu;
3058
3059                 intel_iommu_init_qi(iommu);
3060
3061                 ret = iommu_init_domains(iommu);
3062                 if (ret)
3063                         goto free_iommu;
3064
3065                 init_translation_status(iommu);
3066
3067                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3068                         iommu_disable_translation(iommu);
3069                         clear_translation_pre_enabled(iommu);
3070                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3071                                 iommu->name);
3072                 }
3073
3074                 /*
3075                  * TBD:
3076                  * we could share the same root & context tables
3077                  * among all IOMMU's. Need to Split it later.
3078                  */
3079                 ret = iommu_alloc_root_entry(iommu);
3080                 if (ret)
3081                         goto free_iommu;
3082
3083                 if (translation_pre_enabled(iommu)) {
3084                         pr_info("Translation already enabled - trying to copy translation structures\n");
3085
3086                         ret = copy_translation_tables(iommu);
3087                         if (ret) {
3088                                 /*
3089                                  * We found the IOMMU with translation
3090                                  * enabled - but failed to copy over the
3091                                  * old root-entry table. Try to proceed
3092                                  * by disabling translation now and
3093                                  * allocating a clean root-entry table.
3094                                  * This might cause DMAR faults, but
3095                                  * probably the dump will still succeed.
3096                                  */
3097                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3098                                        iommu->name);
3099                                 iommu_disable_translation(iommu);
3100                                 clear_translation_pre_enabled(iommu);
3101                         } else {
3102                                 pr_info("Copied translation tables from previous kernel for %s\n",
3103                                         iommu->name);
3104                                 copied_tables = true;
3105                         }
3106                 }
3107
3108                 iommu_flush_write_buffer(iommu);
3109                 iommu_set_root_entry(iommu);
3110                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3111                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3112
3113                 if (!ecap_pass_through(iommu->ecap))
3114                         hw_pass_through = 0;
3115         }
3116
3117         if (iommu_pass_through)
3118                 iommu_identity_mapping |= IDENTMAP_ALL;
3119
3120 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3121         iommu_identity_mapping |= IDENTMAP_GFX;
3122 #endif
3123
3124         if (iommu_identity_mapping) {
3125                 ret = si_domain_init(hw_pass_through);
3126                 if (ret)
3127                         goto free_iommu;
3128         }
3129
3130         check_tylersburg_isoch();
3131
3132         /*
3133          * If we copied translations from a previous kernel in the kdump
3134          * case, we can not assign the devices to domains now, as that
3135          * would eliminate the old mappings. So skip this part and defer
3136          * the assignment to device driver initialization time.
3137          */
3138         if (copied_tables)
3139                 goto domains_done;
3140
3141         /*
3142          * If pass through is not set or not enabled, setup context entries for
3143          * identity mappings for rmrr, gfx, and isa and may fall back to static
3144          * identity mapping if iommu_identity_mapping is set.
3145          */
3146         if (iommu_identity_mapping) {
3147                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3148                 if (ret) {
3149                         pr_crit("Failed to setup IOMMU pass-through\n");
3150                         goto free_iommu;
3151                 }
3152         }
3153         /*
3154          * For each rmrr
3155          *   for each dev attached to rmrr
3156          *   do
3157          *     locate drhd for dev, alloc domain for dev
3158          *     allocate free domain
3159          *     allocate page table entries for rmrr
3160          *     if context not allocated for bus
3161          *           allocate and init context
3162          *           set present in root table for this bus
3163          *     init context with domain, translation etc
3164          *    endfor
3165          * endfor
3166          */
3167         pr_info("Setting RMRR:\n");
3168         for_each_rmrr_units(rmrr) {
3169                 /* some BIOS lists non-exist devices in DMAR table. */
3170                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3171                                           i, dev) {
3172                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3173                         if (ret)
3174                                 pr_err("Mapping reserved region failed\n");
3175                 }
3176         }
3177
3178         iommu_prepare_isa();
3179
3180 domains_done:
3181
3182         /*
3183          * for each drhd
3184          *   enable fault log
3185          *   global invalidate context cache
3186          *   global invalidate iotlb
3187          *   enable translation
3188          */
3189         for_each_iommu(iommu, drhd) {
3190                 if (drhd->ignored) {
3191                         /*
3192                          * we always have to disable PMRs or DMA may fail on
3193                          * this device
3194                          */
3195                         if (force_on)
3196                                 iommu_disable_protect_mem_regions(iommu);
3197                         continue;
3198                 }
3199
3200                 iommu_flush_write_buffer(iommu);
3201
3202                 ret = dmar_set_interrupt(iommu);
3203                 if (ret)
3204                         goto free_iommu;
3205
3206                 if (!translation_pre_enabled(iommu))
3207                         iommu_enable_translation(iommu);
3208
3209                 iommu_disable_protect_mem_regions(iommu);
3210         }
3211
3212         return 0;
3213
3214 free_iommu:
3215         for_each_active_iommu(iommu, drhd) {
3216                 disable_dmar_iommu(iommu);
3217                 free_dmar_iommu(iommu);
3218         }
3219         kfree(deferred_flush);
3220 free_g_iommus:
3221         kfree(g_iommus);
3222 error:
3223         return ret;
3224 }
3225
3226 /* This takes a number of _MM_ pages, not VTD pages */
3227 static struct iova *intel_alloc_iova(struct device *dev,
3228                                      struct dmar_domain *domain,
3229                                      unsigned long nrpages, uint64_t dma_mask)
3230 {
3231         struct iova *iova = NULL;
3232
3233         /* Restrict dma_mask to the width that the iommu can handle */
3234         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3235
3236         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3237                 /*
3238                  * First try to allocate an io virtual address in
3239                  * DMA_BIT_MASK(32) and if that fails then try allocating
3240                  * from higher range
3241                  */
3242                 iova = alloc_iova(&domain->iovad, nrpages,
3243                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
3244                 if (iova)
3245                         return iova;
3246         }
3247         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3248         if (unlikely(!iova)) {
3249                 pr_err("Allocating %ld-page iova for %s failed",
3250                        nrpages, dev_name(dev));
3251                 return NULL;
3252         }
3253
3254         return iova;
3255 }
3256
3257 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3258 {
3259         struct dmar_domain *domain;
3260
3261         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3262         if (!domain) {
3263                 pr_err("Allocating domain for %s failed\n",
3264                        dev_name(dev));
3265                 return NULL;
3266         }
3267
3268         return domain;
3269 }
3270
3271 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3272 {
3273         struct device_domain_info *info;
3274
3275         /* No lock here, assumes no domain exit in normal case */
3276         info = dev->archdata.iommu;
3277         if (likely(info))
3278                 return info->domain;
3279
3280         return __get_valid_domain_for_dev(dev);
3281 }
3282
3283 /* Check if the dev needs to go through non-identity map and unmap process.*/
3284 static int iommu_no_mapping(struct device *dev)
3285 {
3286         int found;
3287
3288         if (iommu_dummy(dev))
3289                 return 1;
3290
3291         if (!iommu_identity_mapping)
3292                 return 0;
3293
3294         found = identity_mapping(dev);
3295         if (found) {
3296                 if (iommu_should_identity_map(dev, 0))
3297                         return 1;
3298                 else {
3299                         /*
3300                          * 32 bit DMA is removed from si_domain and fall back
3301                          * to non-identity mapping.
3302                          */
3303                         domain_remove_one_dev_info(si_domain, dev);
3304                         pr_info("32bit %s uses non-identity mapping\n",
3305                                 dev_name(dev));
3306                         return 0;
3307                 }
3308         } else {
3309                 /*
3310                  * In case of a detached 64 bit DMA device from vm, the device
3311                  * is put into si_domain for identity mapping.
3312                  */
3313                 if (iommu_should_identity_map(dev, 0)) {
3314                         int ret;
3315                         ret = domain_add_dev_info(si_domain, dev);
3316                         if (!ret) {
3317                                 pr_info("64bit %s uses identity mapping\n",
3318                                         dev_name(dev));
3319                                 return 1;
3320                         }
3321                 }
3322         }
3323
3324         return 0;
3325 }
3326
3327 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3328                                      size_t size, int dir, u64 dma_mask)
3329 {
3330         struct dmar_domain *domain;
3331         phys_addr_t start_paddr;
3332         struct iova *iova;
3333         int prot = 0;
3334         int ret;
3335         struct intel_iommu *iommu;
3336         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3337
3338         BUG_ON(dir == DMA_NONE);
3339
3340         if (iommu_no_mapping(dev))
3341                 return paddr;
3342
3343         domain = get_valid_domain_for_dev(dev);
3344         if (!domain)
3345                 return 0;
3346
3347         iommu = domain_get_iommu(domain);
3348         size = aligned_nrpages(paddr, size);
3349
3350         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3351         if (!iova)
3352                 goto error;
3353
3354         /*
3355          * Check if DMAR supports zero-length reads on write only
3356          * mappings..
3357          */
3358         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3359                         !cap_zlr(iommu->cap))
3360                 prot |= DMA_PTE_READ;
3361         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3362                 prot |= DMA_PTE_WRITE;
3363         /*
3364          * paddr - (paddr + size) might be partial page, we should map the whole
3365          * page.  Note: if two part of one page are separately mapped, we
3366          * might have two guest_addr mapping to the same host paddr, but this
3367          * is not a big problem
3368          */
3369         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3370                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3371         if (ret)
3372                 goto error;
3373
3374         /* it's a non-present to present mapping. Only flush if caching mode */
3375         if (cap_caching_mode(iommu->cap))
3376                 iommu_flush_iotlb_psi(iommu, domain,
3377                                       mm_to_dma_pfn(iova->pfn_lo),
3378                                       size, 0, 1);
3379         else
3380                 iommu_flush_write_buffer(iommu);
3381
3382         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3383         start_paddr += paddr & ~PAGE_MASK;
3384         return start_paddr;
3385
3386 error:
3387         if (iova)
3388                 __free_iova(&domain->iovad, iova);
3389         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3390                 dev_name(dev), size, (unsigned long long)paddr, dir);
3391         return 0;
3392 }
3393
3394 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3395                                  unsigned long offset, size_t size,
3396                                  enum dma_data_direction dir,
3397                                  struct dma_attrs *attrs)
3398 {
3399         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3400                                   dir, *dev->dma_mask);
3401 }
3402
3403 static void flush_unmaps(void)
3404 {
3405         int i, j;
3406
3407         timer_on = 0;
3408
3409         /* just flush them all */
3410         for (i = 0; i < g_num_of_iommus; i++) {
3411                 struct intel_iommu *iommu = g_iommus[i];
3412                 if (!iommu)
3413                         continue;
3414
3415                 if (!deferred_flush[i].next)
3416                         continue;
3417
3418                 /* In caching mode, global flushes turn emulation expensive */
3419                 if (!cap_caching_mode(iommu->cap))
3420                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3421                                          DMA_TLB_GLOBAL_FLUSH);
3422                 for (j = 0; j < deferred_flush[i].next; j++) {
3423                         unsigned long mask;
3424                         struct iova *iova = deferred_flush[i].iova[j];
3425                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3426
3427                         /* On real hardware multiple invalidations are expensive */
3428                         if (cap_caching_mode(iommu->cap))
3429                                 iommu_flush_iotlb_psi(iommu, domain,
3430                                         iova->pfn_lo, iova_size(iova),
3431                                         !deferred_flush[i].freelist[j], 0);
3432                         else {
3433                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3434                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3435                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3436                         }
3437                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3438                         if (deferred_flush[i].freelist[j])
3439                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3440                 }
3441                 deferred_flush[i].next = 0;
3442         }
3443
3444         list_size = 0;
3445 }
3446
3447 static void flush_unmaps_timeout(unsigned long data)
3448 {
3449         unsigned long flags;
3450
3451         spin_lock_irqsave(&async_umap_flush_lock, flags);
3452         flush_unmaps();
3453         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3454 }
3455
3456 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3457 {
3458         unsigned long flags;
3459         int next, iommu_id;
3460         struct intel_iommu *iommu;
3461
3462         spin_lock_irqsave(&async_umap_flush_lock, flags);
3463         if (list_size == HIGH_WATER_MARK)
3464                 flush_unmaps();
3465
3466         iommu = domain_get_iommu(dom);
3467         iommu_id = iommu->seq_id;
3468
3469         next = deferred_flush[iommu_id].next;
3470         deferred_flush[iommu_id].domain[next] = dom;
3471         deferred_flush[iommu_id].iova[next] = iova;
3472         deferred_flush[iommu_id].freelist[next] = freelist;
3473         deferred_flush[iommu_id].next++;
3474
3475         if (!timer_on) {
3476                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3477                 timer_on = 1;
3478         }
3479         list_size++;
3480         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3481 }
3482
3483 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3484 {
3485         struct dmar_domain *domain;
3486         unsigned long start_pfn, last_pfn;
3487         struct iova *iova;
3488         struct intel_iommu *iommu;
3489         struct page *freelist;
3490
3491         if (iommu_no_mapping(dev))
3492                 return;
3493
3494         domain = find_domain(dev);
3495         BUG_ON(!domain);
3496
3497         iommu = domain_get_iommu(domain);
3498
3499         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3500         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3501                       (unsigned long long)dev_addr))
3502                 return;
3503
3504         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3505         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3506
3507         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3508                  dev_name(dev), start_pfn, last_pfn);
3509
3510         freelist = domain_unmap(domain, start_pfn, last_pfn);
3511
3512         if (intel_iommu_strict) {
3513                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3514                                       last_pfn - start_pfn + 1, !freelist, 0);
3515                 /* free iova */
3516                 __free_iova(&domain->iovad, iova);
3517                 dma_free_pagelist(freelist);
3518         } else {
3519                 add_unmap(domain, iova, freelist);
3520                 /*
3521                  * queue up the release of the unmap to save the 1/6th of the
3522                  * cpu used up by the iotlb flush operation...
3523                  */
3524         }
3525 }
3526
3527 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3528                              size_t size, enum dma_data_direction dir,
3529                              struct dma_attrs *attrs)
3530 {
3531         intel_unmap(dev, dev_addr);
3532 }
3533
3534 static void *intel_alloc_coherent(struct device *dev, size_t size,
3535                                   dma_addr_t *dma_handle, gfp_t flags,
3536                                   struct dma_attrs *attrs)
3537 {
3538         struct page *page = NULL;
3539         int order;
3540
3541         size = PAGE_ALIGN(size);
3542         order = get_order(size);
3543
3544         if (!iommu_no_mapping(dev))
3545                 flags &= ~(GFP_DMA | GFP_DMA32);
3546         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3547                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3548                         flags |= GFP_DMA;
3549                 else
3550                         flags |= GFP_DMA32;
3551         }
3552
3553         if (flags & __GFP_WAIT) {
3554                 unsigned int count = size >> PAGE_SHIFT;
3555
3556                 page = dma_alloc_from_contiguous(dev, count, order);
3557                 if (page && iommu_no_mapping(dev) &&
3558                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3559                         dma_release_from_contiguous(dev, page, count);
3560                         page = NULL;
3561                 }
3562         }
3563
3564         if (!page)
3565                 page = alloc_pages(flags, order);
3566         if (!page)
3567                 return NULL;
3568         memset(page_address(page), 0, size);
3569
3570         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3571                                          DMA_BIDIRECTIONAL,
3572                                          dev->coherent_dma_mask);
3573         if (*dma_handle)
3574                 return page_address(page);
3575         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3576                 __free_pages(page, order);
3577
3578         return NULL;
3579 }
3580
3581 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3582                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3583 {
3584         int order;
3585         struct page *page = virt_to_page(vaddr);
3586
3587         size = PAGE_ALIGN(size);
3588         order = get_order(size);
3589
3590         intel_unmap(dev, dma_handle);
3591         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3592                 __free_pages(page, order);
3593 }
3594
3595 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3596                            int nelems, enum dma_data_direction dir,
3597                            struct dma_attrs *attrs)
3598 {
3599         intel_unmap(dev, sglist[0].dma_address);
3600 }
3601
3602 static int intel_nontranslate_map_sg(struct device *hddev,
3603         struct scatterlist *sglist, int nelems, int dir)
3604 {
3605         int i;
3606         struct scatterlist *sg;
3607
3608         for_each_sg(sglist, sg, nelems, i) {
3609                 BUG_ON(!sg_page(sg));
3610                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3611                 sg->dma_length = sg->length;
3612         }
3613         return nelems;
3614 }
3615
3616 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3617                         enum dma_data_direction dir, struct dma_attrs *attrs)
3618 {
3619         int i;
3620         struct dmar_domain *domain;
3621         size_t size = 0;
3622         int prot = 0;
3623         struct iova *iova = NULL;
3624         int ret;
3625         struct scatterlist *sg;
3626         unsigned long start_vpfn;
3627         struct intel_iommu *iommu;
3628
3629         BUG_ON(dir == DMA_NONE);
3630         if (iommu_no_mapping(dev))
3631                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3632
3633         domain = get_valid_domain_for_dev(dev);
3634         if (!domain)
3635                 return 0;
3636
3637         iommu = domain_get_iommu(domain);
3638
3639         for_each_sg(sglist, sg, nelems, i)
3640                 size += aligned_nrpages(sg->offset, sg->length);
3641
3642         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3643                                 *dev->dma_mask);
3644         if (!iova) {
3645                 sglist->dma_length = 0;
3646                 return 0;
3647         }
3648
3649         /*
3650          * Check if DMAR supports zero-length reads on write only
3651          * mappings..
3652          */
3653         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3654                         !cap_zlr(iommu->cap))
3655                 prot |= DMA_PTE_READ;
3656         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3657                 prot |= DMA_PTE_WRITE;
3658
3659         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3660
3661         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3662         if (unlikely(ret)) {
3663                 dma_pte_free_pagetable(domain, start_vpfn,
3664                                        start_vpfn + size - 1);
3665                 __free_iova(&domain->iovad, iova);
3666                 return 0;
3667         }
3668
3669         /* it's a non-present to present mapping. Only flush if caching mode */
3670         if (cap_caching_mode(iommu->cap))
3671                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3672         else
3673                 iommu_flush_write_buffer(iommu);
3674
3675         return nelems;
3676 }
3677
3678 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3679 {
3680         return !dma_addr;
3681 }
3682
3683 struct dma_map_ops intel_dma_ops = {
3684         .alloc = intel_alloc_coherent,
3685         .free = intel_free_coherent,
3686         .map_sg = intel_map_sg,
3687         .unmap_sg = intel_unmap_sg,
3688         .map_page = intel_map_page,
3689         .unmap_page = intel_unmap_page,
3690         .mapping_error = intel_mapping_error,
3691 };
3692
3693 static inline int iommu_domain_cache_init(void)
3694 {
3695         int ret = 0;
3696
3697         iommu_domain_cache = kmem_cache_create("iommu_domain",
3698                                          sizeof(struct dmar_domain),
3699                                          0,
3700                                          SLAB_HWCACHE_ALIGN,
3701
3702                                          NULL);
3703         if (!iommu_domain_cache) {
3704                 pr_err("Couldn't create iommu_domain cache\n");
3705                 ret = -ENOMEM;
3706         }
3707
3708         return ret;
3709 }
3710
3711 static inline int iommu_devinfo_cache_init(void)
3712 {
3713         int ret = 0;
3714
3715         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3716                                          sizeof(struct device_domain_info),
3717                                          0,
3718                                          SLAB_HWCACHE_ALIGN,
3719                                          NULL);
3720         if (!iommu_devinfo_cache) {
3721                 pr_err("Couldn't create devinfo cache\n");
3722                 ret = -ENOMEM;
3723         }
3724
3725         return ret;
3726 }
3727
3728 static int __init iommu_init_mempool(void)
3729 {
3730         int ret;
3731         ret = iommu_iova_cache_init();
3732         if (ret)
3733                 return ret;
3734
3735         ret = iommu_domain_cache_init();
3736         if (ret)
3737                 goto domain_error;
3738
3739         ret = iommu_devinfo_cache_init();
3740         if (!ret)
3741                 return ret;
3742
3743         kmem_cache_destroy(iommu_domain_cache);
3744 domain_error:
3745         iommu_iova_cache_destroy();
3746
3747         return -ENOMEM;
3748 }
3749
3750 static void __init iommu_exit_mempool(void)
3751 {
3752         kmem_cache_destroy(iommu_devinfo_cache);
3753         kmem_cache_destroy(iommu_domain_cache);
3754         iommu_iova_cache_destroy();
3755 }
3756
3757 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3758 {
3759         struct dmar_drhd_unit *drhd;
3760         u32 vtbar;
3761         int rc;
3762
3763         /* We know that this device on this chipset has its own IOMMU.
3764          * If we find it under a different IOMMU, then the BIOS is lying
3765          * to us. Hope that the IOMMU for this device is actually
3766          * disabled, and it needs no translation...
3767          */
3768         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3769         if (rc) {
3770                 /* "can't" happen */
3771                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3772                 return;
3773         }
3774         vtbar &= 0xffff0000;
3775
3776         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3777         drhd = dmar_find_matched_drhd_unit(pdev);
3778         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3779                             TAINT_FIRMWARE_WORKAROUND,
3780                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3781                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3782 }
3783 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3784
3785 static void __init init_no_remapping_devices(void)
3786 {
3787         struct dmar_drhd_unit *drhd;
3788         struct device *dev;
3789         int i;
3790
3791         for_each_drhd_unit(drhd) {
3792                 if (!drhd->include_all) {
3793                         for_each_active_dev_scope(drhd->devices,
3794                                                   drhd->devices_cnt, i, dev)
3795                                 break;
3796                         /* ignore DMAR unit if no devices exist */
3797                         if (i == drhd->devices_cnt)
3798                                 drhd->ignored = 1;
3799                 }
3800         }
3801
3802         for_each_active_drhd_unit(drhd) {
3803                 if (drhd->include_all)
3804                         continue;
3805
3806                 for_each_active_dev_scope(drhd->devices,
3807                                           drhd->devices_cnt, i, dev)
3808                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3809                                 break;
3810                 if (i < drhd->devices_cnt)
3811                         continue;
3812
3813                 /* This IOMMU has *only* gfx devices. Either bypass it or
3814                    set the gfx_mapped flag, as appropriate */
3815                 if (dmar_map_gfx) {
3816                         intel_iommu_gfx_mapped = 1;
3817                 } else {
3818                         drhd->ignored = 1;
3819                         for_each_active_dev_scope(drhd->devices,
3820                                                   drhd->devices_cnt, i, dev)
3821                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3822                 }
3823         }
3824 }
3825
3826 #ifdef CONFIG_SUSPEND
3827 static int init_iommu_hw(void)
3828 {
3829         struct dmar_drhd_unit *drhd;
3830         struct intel_iommu *iommu = NULL;
3831
3832         for_each_active_iommu(iommu, drhd)
3833                 if (iommu->qi)
3834                         dmar_reenable_qi(iommu);
3835
3836         for_each_iommu(iommu, drhd) {
3837                 if (drhd->ignored) {
3838                         /*
3839                          * we always have to disable PMRs or DMA may fail on
3840                          * this device
3841                          */
3842                         if (force_on)
3843                                 iommu_disable_protect_mem_regions(iommu);
3844                         continue;
3845                 }
3846         
3847                 iommu_flush_write_buffer(iommu);
3848
3849                 iommu_set_root_entry(iommu);
3850
3851                 iommu->flush.flush_context(iommu, 0, 0, 0,
3852                                            DMA_CCMD_GLOBAL_INVL);
3853                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3854                 iommu_enable_translation(iommu);
3855                 iommu_disable_protect_mem_regions(iommu);
3856         }
3857
3858         return 0;
3859 }
3860
3861 static void iommu_flush_all(void)
3862 {
3863         struct dmar_drhd_unit *drhd;
3864         struct intel_iommu *iommu;
3865
3866         for_each_active_iommu(iommu, drhd) {
3867                 iommu->flush.flush_context(iommu, 0, 0, 0,
3868                                            DMA_CCMD_GLOBAL_INVL);
3869                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3870                                          DMA_TLB_GLOBAL_FLUSH);
3871         }
3872 }
3873
3874 static int iommu_suspend(void)
3875 {
3876         struct dmar_drhd_unit *drhd;
3877         struct intel_iommu *iommu = NULL;
3878         unsigned long flag;
3879
3880         for_each_active_iommu(iommu, drhd) {
3881                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3882                                                  GFP_ATOMIC);
3883                 if (!iommu->iommu_state)
3884                         goto nomem;
3885         }
3886
3887         iommu_flush_all();
3888
3889         for_each_active_iommu(iommu, drhd) {
3890                 iommu_disable_translation(iommu);
3891
3892                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3893
3894                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3895                         readl(iommu->reg + DMAR_FECTL_REG);
3896                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3897                         readl(iommu->reg + DMAR_FEDATA_REG);
3898                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3899                         readl(iommu->reg + DMAR_FEADDR_REG);
3900                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3901                         readl(iommu->reg + DMAR_FEUADDR_REG);
3902
3903                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3904         }
3905         return 0;
3906
3907 nomem:
3908         for_each_active_iommu(iommu, drhd)
3909                 kfree(iommu->iommu_state);
3910
3911         return -ENOMEM;
3912 }
3913
3914 static void iommu_resume(void)
3915 {
3916         struct dmar_drhd_unit *drhd;
3917         struct intel_iommu *iommu = NULL;
3918         unsigned long flag;
3919
3920         if (init_iommu_hw()) {
3921                 if (force_on)
3922                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3923                 else
3924                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3925                 return;
3926         }
3927
3928         for_each_active_iommu(iommu, drhd) {
3929
3930                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3931
3932                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3933                         iommu->reg + DMAR_FECTL_REG);
3934                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3935                         iommu->reg + DMAR_FEDATA_REG);
3936                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3937                         iommu->reg + DMAR_FEADDR_REG);
3938                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3939                         iommu->reg + DMAR_FEUADDR_REG);
3940
3941                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3942         }
3943
3944         for_each_active_iommu(iommu, drhd)
3945                 kfree(iommu->iommu_state);
3946 }
3947
3948 static struct syscore_ops iommu_syscore_ops = {
3949         .resume         = iommu_resume,
3950         .suspend        = iommu_suspend,
3951 };
3952
3953 static void __init init_iommu_pm_ops(void)
3954 {
3955         register_syscore_ops(&iommu_syscore_ops);
3956 }
3957
3958 #else
3959 static inline void init_iommu_pm_ops(void) {}
3960 #endif  /* CONFIG_PM */
3961
3962
3963 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3964 {
3965         struct acpi_dmar_reserved_memory *rmrr;
3966         struct dmar_rmrr_unit *rmrru;
3967
3968         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3969         if (!rmrru)
3970                 return -ENOMEM;
3971
3972         rmrru->hdr = header;
3973         rmrr = (struct acpi_dmar_reserved_memory *)header;
3974         rmrru->base_address = rmrr->base_address;
3975         rmrru->end_address = rmrr->end_address;
3976         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3977                                 ((void *)rmrr) + rmrr->header.length,
3978                                 &rmrru->devices_cnt);
3979         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3980                 kfree(rmrru);
3981                 return -ENOMEM;
3982         }
3983
3984         list_add(&rmrru->list, &dmar_rmrr_units);
3985
3986         return 0;
3987 }
3988
3989 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3990 {
3991         struct dmar_atsr_unit *atsru;
3992         struct acpi_dmar_atsr *tmp;
3993
3994         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3995                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3996                 if (atsr->segment != tmp->segment)
3997                         continue;
3998                 if (atsr->header.length != tmp->header.length)
3999                         continue;
4000                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4001                         return atsru;
4002         }
4003
4004         return NULL;
4005 }
4006
4007 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4008 {
4009         struct acpi_dmar_atsr *atsr;
4010         struct dmar_atsr_unit *atsru;
4011
4012         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4013                 return 0;
4014
4015         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4016         atsru = dmar_find_atsr(atsr);
4017         if (atsru)
4018                 return 0;
4019
4020         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4021         if (!atsru)
4022                 return -ENOMEM;
4023
4024         /*
4025          * If memory is allocated from slab by ACPI _DSM method, we need to
4026          * copy the memory content because the memory buffer will be freed
4027          * on return.
4028          */
4029         atsru->hdr = (void *)(atsru + 1);
4030         memcpy(atsru->hdr, hdr, hdr->length);
4031         atsru->include_all = atsr->flags & 0x1;
4032         if (!atsru->include_all) {
4033                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4034                                 (void *)atsr + atsr->header.length,
4035                                 &atsru->devices_cnt);
4036                 if (atsru->devices_cnt && atsru->devices == NULL) {
4037                         kfree(atsru);
4038                         return -ENOMEM;
4039                 }
4040         }
4041
4042         list_add_rcu(&atsru->list, &dmar_atsr_units);
4043
4044         return 0;
4045 }
4046
4047 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4048 {
4049         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4050         kfree(atsru);
4051 }
4052
4053 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4054 {
4055         struct acpi_dmar_atsr *atsr;
4056         struct dmar_atsr_unit *atsru;
4057
4058         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4059         atsru = dmar_find_atsr(atsr);
4060         if (atsru) {
4061                 list_del_rcu(&atsru->list);
4062                 synchronize_rcu();
4063                 intel_iommu_free_atsr(atsru);
4064         }
4065
4066         return 0;
4067 }
4068
4069 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4070 {
4071         int i;
4072         struct device *dev;
4073         struct acpi_dmar_atsr *atsr;
4074         struct dmar_atsr_unit *atsru;
4075
4076         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4077         atsru = dmar_find_atsr(atsr);
4078         if (!atsru)
4079                 return 0;
4080
4081         if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4082                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4083                                           i, dev)
4084                         return -EBUSY;
4085
4086         return 0;
4087 }
4088
4089 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4090 {
4091         int sp, ret = 0;
4092         struct intel_iommu *iommu = dmaru->iommu;
4093
4094         if (g_iommus[iommu->seq_id])
4095                 return 0;
4096
4097         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4098                 pr_warn("%s: Doesn't support hardware pass through.\n",
4099                         iommu->name);
4100                 return -ENXIO;
4101         }
4102         if (!ecap_sc_support(iommu->ecap) &&
4103             domain_update_iommu_snooping(iommu)) {
4104                 pr_warn("%s: Doesn't support snooping.\n",
4105                         iommu->name);
4106                 return -ENXIO;
4107         }
4108         sp = domain_update_iommu_superpage(iommu) - 1;
4109         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4110                 pr_warn("%s: Doesn't support large page.\n",
4111                         iommu->name);
4112                 return -ENXIO;
4113         }
4114
4115         /*
4116          * Disable translation if already enabled prior to OS handover.
4117          */
4118         if (iommu->gcmd & DMA_GCMD_TE)
4119                 iommu_disable_translation(iommu);
4120
4121         g_iommus[iommu->seq_id] = iommu;
4122         ret = iommu_init_domains(iommu);
4123         if (ret == 0)
4124                 ret = iommu_alloc_root_entry(iommu);
4125         if (ret)
4126                 goto out;
4127
4128         if (dmaru->ignored) {
4129                 /*
4130                  * we always have to disable PMRs or DMA may fail on this device
4131                  */
4132                 if (force_on)
4133                         iommu_disable_protect_mem_regions(iommu);
4134                 return 0;
4135         }
4136
4137         intel_iommu_init_qi(iommu);
4138         iommu_flush_write_buffer(iommu);
4139         ret = dmar_set_interrupt(iommu);
4140         if (ret)
4141                 goto disable_iommu;
4142
4143         iommu_set_root_entry(iommu);
4144         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4145         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4146         iommu_enable_translation(iommu);
4147
4148         iommu_disable_protect_mem_regions(iommu);
4149         return 0;
4150
4151 disable_iommu:
4152         disable_dmar_iommu(iommu);
4153 out:
4154         free_dmar_iommu(iommu);
4155         return ret;
4156 }
4157
4158 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4159 {
4160         int ret = 0;
4161         struct intel_iommu *iommu = dmaru->iommu;
4162
4163         if (!intel_iommu_enabled)
4164                 return 0;
4165         if (iommu == NULL)
4166                 return -EINVAL;
4167
4168         if (insert) {
4169                 ret = intel_iommu_add(dmaru);
4170         } else {
4171                 disable_dmar_iommu(iommu);
4172                 free_dmar_iommu(iommu);
4173         }
4174
4175         return ret;
4176 }
4177
4178 static void intel_iommu_free_dmars(void)
4179 {
4180         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4181         struct dmar_atsr_unit *atsru, *atsr_n;
4182
4183         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4184                 list_del(&rmrru->list);
4185                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4186                 kfree(rmrru);
4187         }
4188
4189         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4190                 list_del(&atsru->list);
4191                 intel_iommu_free_atsr(atsru);
4192         }
4193 }
4194
4195 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4196 {
4197         int i, ret = 1;
4198         struct pci_bus *bus;
4199         struct pci_dev *bridge = NULL;
4200         struct device *tmp;
4201         struct acpi_dmar_atsr *atsr;
4202         struct dmar_atsr_unit *atsru;
4203
4204         dev = pci_physfn(dev);
4205         for (bus = dev->bus; bus; bus = bus->parent) {
4206                 bridge = bus->self;
4207                 if (!bridge || !pci_is_pcie(bridge) ||
4208                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4209                         return 0;
4210                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4211                         break;
4212         }
4213         if (!bridge)
4214                 return 0;
4215
4216         rcu_read_lock();
4217         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4218                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4219                 if (atsr->segment != pci_domain_nr(dev->bus))
4220                         continue;
4221
4222                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4223                         if (tmp == &bridge->dev)
4224                                 goto out;
4225
4226                 if (atsru->include_all)
4227                         goto out;
4228         }
4229         ret = 0;
4230 out:
4231         rcu_read_unlock();
4232
4233         return ret;
4234 }
4235
4236 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4237 {
4238         int ret = 0;
4239         struct dmar_rmrr_unit *rmrru;
4240         struct dmar_atsr_unit *atsru;
4241         struct acpi_dmar_atsr *atsr;
4242         struct acpi_dmar_reserved_memory *rmrr;
4243
4244         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4245                 return 0;
4246
4247         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4248                 rmrr = container_of(rmrru->hdr,
4249                                     struct acpi_dmar_reserved_memory, header);
4250                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4251                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4252                                 ((void *)rmrr) + rmrr->header.length,
4253                                 rmrr->segment, rmrru->devices,
4254                                 rmrru->devices_cnt);
4255                         if(ret < 0)
4256                                 return ret;
4257                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4258                         dmar_remove_dev_scope(info, rmrr->segment,
4259                                 rmrru->devices, rmrru->devices_cnt);
4260                 }
4261         }
4262
4263         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4264                 if (atsru->include_all)
4265                         continue;
4266
4267                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4268                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4269                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4270                                         (void *)atsr + atsr->header.length,
4271                                         atsr->segment, atsru->devices,
4272                                         atsru->devices_cnt);
4273                         if (ret > 0)
4274                                 break;
4275                         else if(ret < 0)
4276                                 return ret;
4277                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4278                         if (dmar_remove_dev_scope(info, atsr->segment,
4279                                         atsru->devices, atsru->devices_cnt))
4280                                 break;
4281                 }
4282         }
4283
4284         return 0;
4285 }
4286
4287 /*
4288  * Here we only respond to action of unbound device from driver.
4289  *
4290  * Added device is not attached to its DMAR domain here yet. That will happen
4291  * when mapping the device to iova.
4292  */
4293 static int device_notifier(struct notifier_block *nb,
4294                                   unsigned long action, void *data)
4295 {
4296         struct device *dev = data;
4297         struct dmar_domain *domain;
4298
4299         if (iommu_dummy(dev))
4300                 return 0;
4301
4302         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4303                 return 0;
4304
4305         domain = find_domain(dev);
4306         if (!domain)
4307                 return 0;
4308
4309         down_read(&dmar_global_lock);
4310         domain_remove_one_dev_info(domain, dev);
4311         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4312                 domain_exit(domain);
4313         up_read(&dmar_global_lock);
4314
4315         return 0;
4316 }
4317
4318 static struct notifier_block device_nb = {
4319         .notifier_call = device_notifier,
4320 };
4321
4322 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4323                                        unsigned long val, void *v)
4324 {
4325         struct memory_notify *mhp = v;
4326         unsigned long long start, end;
4327         unsigned long start_vpfn, last_vpfn;
4328
4329         switch (val) {
4330         case MEM_GOING_ONLINE:
4331                 start = mhp->start_pfn << PAGE_SHIFT;
4332                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4333                 if (iommu_domain_identity_map(si_domain, start, end)) {
4334                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4335                                 start, end);
4336                         return NOTIFY_BAD;
4337                 }
4338                 break;
4339
4340         case MEM_OFFLINE:
4341         case MEM_CANCEL_ONLINE:
4342                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4343                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4344                 while (start_vpfn <= last_vpfn) {
4345                         struct iova *iova;
4346                         struct dmar_drhd_unit *drhd;
4347                         struct intel_iommu *iommu;
4348                         struct page *freelist;
4349
4350                         iova = find_iova(&si_domain->iovad, start_vpfn);
4351                         if (iova == NULL) {
4352                                 pr_debug("Failed get IOVA for PFN %lx\n",
4353                                          start_vpfn);
4354                                 break;
4355                         }
4356
4357                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4358                                                      start_vpfn, last_vpfn);
4359                         if (iova == NULL) {
4360                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4361                                         start_vpfn, last_vpfn);
4362                                 return NOTIFY_BAD;
4363                         }
4364
4365                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4366                                                iova->pfn_hi);
4367
4368                         rcu_read_lock();
4369                         for_each_active_iommu(iommu, drhd)
4370                                 iommu_flush_iotlb_psi(iommu, si_domain,
4371                                         iova->pfn_lo, iova_size(iova),
4372                                         !freelist, 0);
4373                         rcu_read_unlock();
4374                         dma_free_pagelist(freelist);
4375
4376                         start_vpfn = iova->pfn_hi + 1;
4377                         free_iova_mem(iova);
4378                 }
4379                 break;
4380         }
4381
4382         return NOTIFY_OK;
4383 }
4384
4385 static struct notifier_block intel_iommu_memory_nb = {
4386         .notifier_call = intel_iommu_memory_notifier,
4387         .priority = 0
4388 };
4389
4390
4391 static ssize_t intel_iommu_show_version(struct device *dev,
4392                                         struct device_attribute *attr,
4393                                         char *buf)
4394 {
4395         struct intel_iommu *iommu = dev_get_drvdata(dev);
4396         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4397         return sprintf(buf, "%d:%d\n",
4398                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4399 }
4400 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4401
4402 static ssize_t intel_iommu_show_address(struct device *dev,
4403                                         struct device_attribute *attr,
4404                                         char *buf)
4405 {
4406         struct intel_iommu *iommu = dev_get_drvdata(dev);
4407         return sprintf(buf, "%llx\n", iommu->reg_phys);
4408 }
4409 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4410
4411 static ssize_t intel_iommu_show_cap(struct device *dev,
4412                                     struct device_attribute *attr,
4413                                     char *buf)
4414 {
4415         struct intel_iommu *iommu = dev_get_drvdata(dev);
4416         return sprintf(buf, "%llx\n", iommu->cap);
4417 }
4418 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4419
4420 static ssize_t intel_iommu_show_ecap(struct device *dev,
4421                                     struct device_attribute *attr,
4422                                     char *buf)
4423 {
4424         struct intel_iommu *iommu = dev_get_drvdata(dev);
4425         return sprintf(buf, "%llx\n", iommu->ecap);
4426 }
4427 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4428
4429 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4430                                       struct device_attribute *attr,
4431                                       char *buf)
4432 {
4433         struct intel_iommu *iommu = dev_get_drvdata(dev);
4434         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4435 }
4436 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4437
4438 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4439                                            struct device_attribute *attr,
4440                                            char *buf)
4441 {
4442         struct intel_iommu *iommu = dev_get_drvdata(dev);
4443         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4444                                                   cap_ndoms(iommu->cap)));
4445 }
4446 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4447
4448 static struct attribute *intel_iommu_attrs[] = {
4449         &dev_attr_version.attr,
4450         &dev_attr_address.attr,
4451         &dev_attr_cap.attr,
4452         &dev_attr_ecap.attr,
4453         &dev_attr_domains_supported.attr,
4454         &dev_attr_domains_used.attr,
4455         NULL,
4456 };
4457
4458 static struct attribute_group intel_iommu_group = {
4459         .name = "intel-iommu",
4460         .attrs = intel_iommu_attrs,
4461 };
4462
4463 const struct attribute_group *intel_iommu_groups[] = {
4464         &intel_iommu_group,
4465         NULL,
4466 };
4467
4468 int __init intel_iommu_init(void)
4469 {
4470         int ret = -ENODEV;
4471         struct dmar_drhd_unit *drhd;
4472         struct intel_iommu *iommu;
4473
4474         /* VT-d is required for a TXT/tboot launch, so enforce that */
4475         force_on = tboot_force_iommu();
4476
4477         if (iommu_init_mempool()) {
4478                 if (force_on)
4479                         panic("tboot: Failed to initialize iommu memory\n");
4480                 return -ENOMEM;
4481         }
4482
4483         down_write(&dmar_global_lock);
4484         if (dmar_table_init()) {
4485                 if (force_on)
4486                         panic("tboot: Failed to initialize DMAR table\n");
4487                 goto out_free_dmar;
4488         }
4489
4490         if (dmar_dev_scope_init() < 0) {
4491                 if (force_on)
4492                         panic("tboot: Failed to initialize DMAR device scope\n");
4493                 goto out_free_dmar;
4494         }
4495
4496         if (no_iommu || dmar_disabled)
4497                 goto out_free_dmar;
4498
4499         if (list_empty(&dmar_rmrr_units))
4500                 pr_info("No RMRR found\n");
4501
4502         if (list_empty(&dmar_atsr_units))
4503                 pr_info("No ATSR found\n");
4504
4505         if (dmar_init_reserved_ranges()) {
4506                 if (force_on)
4507                         panic("tboot: Failed to reserve iommu ranges\n");
4508                 goto out_free_reserved_range;
4509         }
4510
4511         init_no_remapping_devices();
4512
4513         ret = init_dmars();
4514         if (ret) {
4515                 if (force_on)
4516                         panic("tboot: Failed to initialize DMARs\n");
4517                 pr_err("Initialization failed\n");
4518                 goto out_free_reserved_range;
4519         }
4520         up_write(&dmar_global_lock);
4521         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4522
4523         init_timer(&unmap_timer);
4524 #ifdef CONFIG_SWIOTLB
4525         swiotlb = 0;
4526 #endif
4527         dma_ops = &intel_dma_ops;
4528
4529         init_iommu_pm_ops();
4530
4531         for_each_active_iommu(iommu, drhd)
4532                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4533                                                        intel_iommu_groups,
4534                                                        "%s", iommu->name);
4535
4536         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4537         bus_register_notifier(&pci_bus_type, &device_nb);
4538         if (si_domain && !hw_pass_through)
4539                 register_memory_notifier(&intel_iommu_memory_nb);
4540
4541         intel_iommu_enabled = 1;
4542
4543         return 0;
4544
4545 out_free_reserved_range:
4546         put_iova_domain(&reserved_iova_list);
4547 out_free_dmar:
4548         intel_iommu_free_dmars();
4549         up_write(&dmar_global_lock);
4550         iommu_exit_mempool();
4551         return ret;
4552 }
4553
4554 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4555 {
4556         struct intel_iommu *iommu = opaque;
4557
4558         iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4559         return 0;
4560 }
4561
4562 /*
4563  * NB - intel-iommu lacks any sort of reference counting for the users of
4564  * dependent devices.  If multiple endpoints have intersecting dependent
4565  * devices, unbinding the driver from any one of them will possibly leave
4566  * the others unable to operate.
4567  */
4568 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4569                                            struct device *dev)
4570 {
4571         if (!iommu || !dev || !dev_is_pci(dev))
4572                 return;
4573
4574         pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4575 }
4576
4577 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4578                                        struct device *dev)
4579 {
4580         struct device_domain_info *info;
4581         struct intel_iommu *iommu;
4582         unsigned long flags;
4583         u8 bus, devfn;
4584
4585         iommu = device_to_iommu(dev, &bus, &devfn);
4586         if (!iommu)
4587                 return;
4588
4589         info = dev->archdata.iommu;
4590
4591         if (WARN_ON(!info))
4592                 return;
4593
4594         spin_lock_irqsave(&device_domain_lock, flags);
4595         unlink_domain_info(info);
4596         spin_unlock_irqrestore(&device_domain_lock, flags);
4597
4598         iommu_disable_dev_iotlb(info);
4599         iommu_detach_dev(iommu, info->bus, info->devfn);
4600         iommu_detach_dependent_devices(iommu, dev);
4601         free_devinfo_mem(info);
4602         domain_detach_iommu(domain, iommu);
4603
4604         spin_lock_irqsave(&domain->iommu_lock, flags);
4605         if (!domain->iommu_refcnt[iommu->seq_id])
4606                 iommu_detach_domain(domain, iommu);
4607         spin_unlock_irqrestore(&domain->iommu_lock, flags);
4608 }
4609
4610 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4611 {
4612         int adjust_width;
4613
4614         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4615                         DMA_32BIT_PFN);
4616         domain_reserve_special_ranges(domain);
4617
4618         /* calculate AGAW */
4619         domain->gaw = guest_width;
4620         adjust_width = guestwidth_to_adjustwidth(guest_width);
4621         domain->agaw = width_to_agaw(adjust_width);
4622
4623         domain->iommu_coherency = 0;
4624         domain->iommu_snooping = 0;
4625         domain->iommu_superpage = 0;
4626         domain->max_addr = 0;
4627
4628         /* always allocate the top pgd */
4629         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4630         if (!domain->pgd)
4631                 return -ENOMEM;
4632         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4633         return 0;
4634 }
4635
4636 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4637 {
4638         struct dmar_domain *dmar_domain;
4639         struct iommu_domain *domain;
4640
4641         if (type != IOMMU_DOMAIN_UNMANAGED)
4642                 return NULL;
4643
4644         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4645         if (!dmar_domain) {
4646                 pr_err("Can't allocate dmar_domain\n");
4647                 return NULL;
4648         }
4649         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4650                 pr_err("Domain initialization failed\n");
4651                 domain_exit(dmar_domain);
4652                 return NULL;
4653         }
4654         domain_update_iommu_cap(dmar_domain);
4655
4656         domain = &dmar_domain->domain;
4657         domain->geometry.aperture_start = 0;
4658         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4659         domain->geometry.force_aperture = true;
4660
4661         return domain;
4662 }
4663
4664 static void intel_iommu_domain_free(struct iommu_domain *domain)
4665 {
4666         domain_exit(to_dmar_domain(domain));
4667 }
4668
4669 static int intel_iommu_attach_device(struct iommu_domain *domain,
4670                                      struct device *dev)
4671 {
4672         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4673         struct intel_iommu *iommu;
4674         int addr_width;
4675         u8 bus, devfn;
4676
4677         if (device_is_rmrr_locked(dev)) {
4678                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4679                 return -EPERM;
4680         }
4681
4682         /* normally dev is not mapped */
4683         if (unlikely(domain_context_mapped(dev))) {
4684                 struct dmar_domain *old_domain;
4685
4686                 old_domain = find_domain(dev);
4687                 if (old_domain) {
4688                         if (domain_type_is_vm_or_si(dmar_domain))
4689                                 domain_remove_one_dev_info(old_domain, dev);
4690                         else
4691                                 domain_remove_dev_info(old_domain);
4692
4693                         if (!domain_type_is_vm_or_si(old_domain) &&
4694                              list_empty(&old_domain->devices))
4695                                 domain_exit(old_domain);
4696                 }
4697         }
4698
4699         iommu = device_to_iommu(dev, &bus, &devfn);
4700         if (!iommu)
4701                 return -ENODEV;
4702
4703         /* check if this iommu agaw is sufficient for max mapped address */
4704         addr_width = agaw_to_width(iommu->agaw);
4705         if (addr_width > cap_mgaw(iommu->cap))
4706                 addr_width = cap_mgaw(iommu->cap);
4707
4708         if (dmar_domain->max_addr > (1LL << addr_width)) {
4709                 pr_err("%s: iommu width (%d) is not "
4710                        "sufficient for the mapped address (%llx)\n",
4711                        __func__, addr_width, dmar_domain->max_addr);
4712                 return -EFAULT;
4713         }
4714         dmar_domain->gaw = addr_width;
4715
4716         /*
4717          * Knock out extra levels of page tables if necessary
4718          */
4719         while (iommu->agaw < dmar_domain->agaw) {
4720                 struct dma_pte *pte;
4721
4722                 pte = dmar_domain->pgd;
4723                 if (dma_pte_present(pte)) {
4724                         dmar_domain->pgd = (struct dma_pte *)
4725                                 phys_to_virt(dma_pte_addr(pte));
4726                         free_pgtable_page(pte);
4727                 }
4728                 dmar_domain->agaw--;
4729         }
4730
4731         return domain_add_dev_info(dmar_domain, dev);
4732 }
4733
4734 static void intel_iommu_detach_device(struct iommu_domain *domain,
4735                                       struct device *dev)
4736 {
4737         domain_remove_one_dev_info(to_dmar_domain(domain), dev);
4738 }
4739
4740 static int intel_iommu_map(struct iommu_domain *domain,
4741                            unsigned long iova, phys_addr_t hpa,
4742                            size_t size, int iommu_prot)
4743 {
4744         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4745         u64 max_addr;
4746         int prot = 0;
4747         int ret;
4748
4749         if (iommu_prot & IOMMU_READ)
4750                 prot |= DMA_PTE_READ;
4751         if (iommu_prot & IOMMU_WRITE)
4752                 prot |= DMA_PTE_WRITE;
4753         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4754                 prot |= DMA_PTE_SNP;
4755
4756         max_addr = iova + size;
4757         if (dmar_domain->max_addr < max_addr) {
4758                 u64 end;
4759
4760                 /* check if minimum agaw is sufficient for mapped address */
4761                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4762                 if (end < max_addr) {
4763                         pr_err("%s: iommu width (%d) is not "
4764                                "sufficient for the mapped address (%llx)\n",
4765                                __func__, dmar_domain->gaw, max_addr);
4766                         return -EFAULT;
4767                 }
4768                 dmar_domain->max_addr = max_addr;
4769         }
4770         /* Round up size to next multiple of PAGE_SIZE, if it and
4771            the low bits of hpa would take us onto the next page */
4772         size = aligned_nrpages(hpa, size);
4773         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4774                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4775         return ret;
4776 }
4777
4778 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4779                                 unsigned long iova, size_t size)
4780 {
4781         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4782         struct page *freelist = NULL;
4783         struct intel_iommu *iommu;
4784         unsigned long start_pfn, last_pfn;
4785         unsigned int npages;
4786         int iommu_id, level = 0;
4787
4788         /* Cope with horrid API which requires us to unmap more than the
4789            size argument if it happens to be a large-page mapping. */
4790         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4791                 BUG();
4792
4793         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4794                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4795
4796         start_pfn = iova >> VTD_PAGE_SHIFT;
4797         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4798
4799         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4800
4801         npages = last_pfn - start_pfn + 1;
4802
4803         for_each_domain_iommu(iommu_id, dmar_domain) {
4804                 iommu = g_iommus[iommu_id];
4805
4806                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
4807                                       start_pfn, npages, !freelist, 0);
4808         }
4809
4810         dma_free_pagelist(freelist);
4811
4812         if (dmar_domain->max_addr == iova + size)
4813                 dmar_domain->max_addr = iova;
4814
4815         return size;
4816 }
4817
4818 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4819                                             dma_addr_t iova)
4820 {
4821         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4822         struct dma_pte *pte;
4823         int level = 0;
4824         u64 phys = 0;
4825
4826         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4827         if (pte)
4828                 phys = dma_pte_addr(pte);
4829
4830         return phys;
4831 }
4832
4833 static bool intel_iommu_capable(enum iommu_cap cap)
4834 {
4835         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4836                 return domain_update_iommu_snooping(NULL) == 1;
4837         if (cap == IOMMU_CAP_INTR_REMAP)
4838                 return irq_remapping_enabled == 1;
4839
4840         return false;
4841 }
4842
4843 static int intel_iommu_add_device(struct device *dev)
4844 {
4845         struct intel_iommu *iommu;
4846         struct iommu_group *group;
4847         u8 bus, devfn;
4848
4849         iommu = device_to_iommu(dev, &bus, &devfn);
4850         if (!iommu)
4851                 return -ENODEV;
4852
4853         iommu_device_link(iommu->iommu_dev, dev);
4854
4855         group = iommu_group_get_for_dev(dev);
4856
4857         if (IS_ERR(group))
4858                 return PTR_ERR(group);
4859
4860         iommu_group_put(group);
4861         return 0;
4862 }
4863
4864 static void intel_iommu_remove_device(struct device *dev)
4865 {
4866         struct intel_iommu *iommu;
4867         u8 bus, devfn;
4868
4869         iommu = device_to_iommu(dev, &bus, &devfn);
4870         if (!iommu)
4871                 return;
4872
4873         iommu_group_remove_device(dev);
4874
4875         iommu_device_unlink(iommu->iommu_dev, dev);
4876 }
4877
4878 static const struct iommu_ops intel_iommu_ops = {
4879         .capable        = intel_iommu_capable,
4880         .domain_alloc   = intel_iommu_domain_alloc,
4881         .domain_free    = intel_iommu_domain_free,
4882         .attach_dev     = intel_iommu_attach_device,
4883         .detach_dev     = intel_iommu_detach_device,
4884         .map            = intel_iommu_map,
4885         .unmap          = intel_iommu_unmap,
4886         .map_sg         = default_iommu_map_sg,
4887         .iova_to_phys   = intel_iommu_iova_to_phys,
4888         .add_device     = intel_iommu_add_device,
4889         .remove_device  = intel_iommu_remove_device,
4890         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4891 };
4892
4893 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4894 {
4895         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4896         pr_info("Disabling IOMMU for graphics on this chipset\n");
4897         dmar_map_gfx = 0;
4898 }
4899
4900 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4901 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4903 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4904 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4905 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4906 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4907
4908 static void quirk_iommu_rwbf(struct pci_dev *dev)
4909 {
4910         /*
4911          * Mobile 4 Series Chipset neglects to set RWBF capability,
4912          * but needs it. Same seems to hold for the desktop versions.
4913          */
4914         pr_info("Forcing write-buffer flush capability\n");
4915         rwbf_quirk = 1;
4916 }
4917
4918 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4919 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4920 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4921 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4924 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4925
4926 #define GGC 0x52
4927 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4928 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4929 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4930 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4931 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4932 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4933 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4934 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4935
4936 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4937 {
4938         unsigned short ggc;
4939
4940         if (pci_read_config_word(dev, GGC, &ggc))
4941                 return;
4942
4943         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4944                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4945                 dmar_map_gfx = 0;
4946         } else if (dmar_map_gfx) {
4947                 /* we have to ensure the gfx device is idle before we flush */
4948                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4949                 intel_iommu_strict = 1;
4950        }
4951 }
4952 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4953 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4954 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4955 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4956
4957 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4958    ISOCH DMAR unit for the Azalia sound device, but not give it any
4959    TLB entries, which causes it to deadlock. Check for that.  We do
4960    this in a function called from init_dmars(), instead of in a PCI
4961    quirk, because we don't want to print the obnoxious "BIOS broken"
4962    message if VT-d is actually disabled.
4963 */
4964 static void __init check_tylersburg_isoch(void)
4965 {
4966         struct pci_dev *pdev;
4967         uint32_t vtisochctrl;
4968
4969         /* If there's no Azalia in the system anyway, forget it. */
4970         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4971         if (!pdev)
4972                 return;
4973         pci_dev_put(pdev);
4974
4975         /* System Management Registers. Might be hidden, in which case
4976            we can't do the sanity check. But that's OK, because the
4977            known-broken BIOSes _don't_ actually hide it, so far. */
4978         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4979         if (!pdev)
4980                 return;
4981
4982         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4983                 pci_dev_put(pdev);
4984                 return;
4985         }
4986
4987         pci_dev_put(pdev);
4988
4989         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4990         if (vtisochctrl & 1)
4991                 return;
4992
4993         /* Drop all bits other than the number of TLB entries */
4994         vtisochctrl &= 0x1c;
4995
4996         /* If we have the recommended number of TLB entries (16), fine. */
4997         if (vtisochctrl == 0x10)
4998                 return;
4999
5000         /* Zero TLB entries? You get to ride the short bus to school. */
5001         if (!vtisochctrl) {
5002                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5003                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5004                      dmi_get_system_info(DMI_BIOS_VENDOR),
5005                      dmi_get_system_info(DMI_BIOS_VERSION),
5006                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5007                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5008                 return;
5009         }
5010
5011         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5012                vtisochctrl);
5013 }