iommu/vt-d: Fix reference count in iommu_prepare_isa
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  */
19
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
46
47 #include "irq_remapping.h"
48 #include "pci.h"
49
50 #define ROOT_SIZE               VTD_PAGE_SIZE
51 #define CONTEXT_SIZE            VTD_PAGE_SIZE
52
53 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
54 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
55 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56
57 #define IOAPIC_RANGE_START      (0xfee00000)
58 #define IOAPIC_RANGE_END        (0xfeefffff)
59 #define IOVA_START_ADDR         (0x1000)
60
61 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62
63 #define MAX_AGAW_WIDTH 64
64 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65
66 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
67 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68
69 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
70    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
71 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
72                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
73 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74
75 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
76 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
77 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
78
79 /* page table handling */
80 #define LEVEL_STRIDE            (9)
81 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
82
83 /*
84  * This bitmap is used to advertise the page sizes our hardware support
85  * to the IOMMU core, which will then use this information to split
86  * physically contiguous memory regions it is mapping into page sizes
87  * that we support.
88  *
89  * Traditionally the IOMMU core just handed us the mappings directly,
90  * after making sure the size is an order of a 4KiB page and that the
91  * mapping has natural alignment.
92  *
93  * To retain this behavior, we currently advertise that we support
94  * all page sizes that are an order of 4KiB.
95  *
96  * If at some point we'd like to utilize the IOMMU core's new behavior,
97  * we could change this to advertise the real page sizes we support.
98  */
99 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
100
101 static inline int agaw_to_level(int agaw)
102 {
103         return agaw + 2;
104 }
105
106 static inline int agaw_to_width(int agaw)
107 {
108         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
109 }
110
111 static inline int width_to_agaw(int width)
112 {
113         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
114 }
115
116 static inline unsigned int level_to_offset_bits(int level)
117 {
118         return (level - 1) * LEVEL_STRIDE;
119 }
120
121 static inline int pfn_level_offset(unsigned long pfn, int level)
122 {
123         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124 }
125
126 static inline unsigned long level_mask(int level)
127 {
128         return -1UL << level_to_offset_bits(level);
129 }
130
131 static inline unsigned long level_size(int level)
132 {
133         return 1UL << level_to_offset_bits(level);
134 }
135
136 static inline unsigned long align_to_level(unsigned long pfn, int level)
137 {
138         return (pfn + level_size(level) - 1) & level_mask(level);
139 }
140
141 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142 {
143         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
144 }
145
146 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
147    are never going to work. */
148 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149 {
150         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151 }
152
153 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154 {
155         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156 }
157 static inline unsigned long page_to_dma_pfn(struct page *pg)
158 {
159         return mm_to_dma_pfn(page_to_pfn(pg));
160 }
161 static inline unsigned long virt_to_dma_pfn(void *p)
162 {
163         return page_to_dma_pfn(virt_to_page(p));
164 }
165
166 /* global iommu list, set NULL for ignored DMAR units */
167 static struct intel_iommu **g_iommus;
168
169 static void __init check_tylersburg_isoch(void);
170 static int rwbf_quirk;
171
172 /*
173  * set to 1 to panic kernel if can't successfully enable VT-d
174  * (used when kernel is launched w/ TXT)
175  */
176 static int force_on = 0;
177
178 /*
179  * 0: Present
180  * 1-11: Reserved
181  * 12-63: Context Ptr (12 - (haw-1))
182  * 64-127: Reserved
183  */
184 struct root_entry {
185         u64     val;
186         u64     rsvd1;
187 };
188 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
189 static inline bool root_present(struct root_entry *root)
190 {
191         return (root->val & 1);
192 }
193 static inline void set_root_present(struct root_entry *root)
194 {
195         root->val |= 1;
196 }
197 static inline void set_root_value(struct root_entry *root, unsigned long value)
198 {
199         root->val |= value & VTD_PAGE_MASK;
200 }
201
202 static inline struct context_entry *
203 get_context_addr_from_root(struct root_entry *root)
204 {
205         return (struct context_entry *)
206                 (root_present(root)?phys_to_virt(
207                 root->val & VTD_PAGE_MASK) :
208                 NULL);
209 }
210
211 /*
212  * low 64 bits:
213  * 0: present
214  * 1: fault processing disable
215  * 2-3: translation type
216  * 12-63: address space root
217  * high 64 bits:
218  * 0-2: address width
219  * 3-6: aval
220  * 8-23: domain id
221  */
222 struct context_entry {
223         u64 lo;
224         u64 hi;
225 };
226
227 static inline bool context_present(struct context_entry *context)
228 {
229         return (context->lo & 1);
230 }
231 static inline void context_set_present(struct context_entry *context)
232 {
233         context->lo |= 1;
234 }
235
236 static inline void context_set_fault_enable(struct context_entry *context)
237 {
238         context->lo &= (((u64)-1) << 2) | 1;
239 }
240
241 static inline void context_set_translation_type(struct context_entry *context,
242                                                 unsigned long value)
243 {
244         context->lo &= (((u64)-1) << 4) | 3;
245         context->lo |= (value & 3) << 2;
246 }
247
248 static inline void context_set_address_root(struct context_entry *context,
249                                             unsigned long value)
250 {
251         context->lo |= value & VTD_PAGE_MASK;
252 }
253
254 static inline void context_set_address_width(struct context_entry *context,
255                                              unsigned long value)
256 {
257         context->hi |= value & 7;
258 }
259
260 static inline void context_set_domain_id(struct context_entry *context,
261                                          unsigned long value)
262 {
263         context->hi |= (value & ((1 << 16) - 1)) << 8;
264 }
265
266 static inline void context_clear_entry(struct context_entry *context)
267 {
268         context->lo = 0;
269         context->hi = 0;
270 }
271
272 /*
273  * 0: readable
274  * 1: writable
275  * 2-6: reserved
276  * 7: super page
277  * 8-10: available
278  * 11: snoop behavior
279  * 12-63: Host physcial address
280  */
281 struct dma_pte {
282         u64 val;
283 };
284
285 static inline void dma_clear_pte(struct dma_pte *pte)
286 {
287         pte->val = 0;
288 }
289
290 static inline u64 dma_pte_addr(struct dma_pte *pte)
291 {
292 #ifdef CONFIG_64BIT
293         return pte->val & VTD_PAGE_MASK;
294 #else
295         /* Must have a full atomic 64-bit read */
296         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
297 #endif
298 }
299
300 static inline bool dma_pte_present(struct dma_pte *pte)
301 {
302         return (pte->val & 3) != 0;
303 }
304
305 static inline bool dma_pte_superpage(struct dma_pte *pte)
306 {
307         return (pte->val & (1 << 7));
308 }
309
310 static inline int first_pte_in_page(struct dma_pte *pte)
311 {
312         return !((unsigned long)pte & ~VTD_PAGE_MASK);
313 }
314
315 /*
316  * This domain is a statically identity mapping domain.
317  *      1. This domain creats a static 1:1 mapping to all usable memory.
318  *      2. It maps to each iommu if successful.
319  *      3. Each iommu mapps to this domain if successful.
320  */
321 static struct dmar_domain *si_domain;
322 static int hw_pass_through = 1;
323
324 /* devices under the same p2p bridge are owned in one domain */
325 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
326
327 /* domain represents a virtual machine, more than one devices
328  * across iommus may be owned in one domain, e.g. kvm guest.
329  */
330 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 1)
331
332 /* si_domain contains mulitple devices */
333 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 2)
334
335 /* define the limit of IOMMUs supported in each domain */
336 #ifdef  CONFIG_X86
337 # define        IOMMU_UNITS_SUPPORTED   MAX_IO_APICS
338 #else
339 # define        IOMMU_UNITS_SUPPORTED   64
340 #endif
341
342 struct dmar_domain {
343         int     id;                     /* domain id */
344         int     nid;                    /* node id */
345         DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
346                                         /* bitmap of iommus this domain uses*/
347
348         struct list_head devices;       /* all devices' list */
349         struct iova_domain iovad;       /* iova's that belong to this domain */
350
351         struct dma_pte  *pgd;           /* virtual address */
352         int             gaw;            /* max guest address width */
353
354         /* adjusted guest address width, 0 is level 2 30-bit */
355         int             agaw;
356
357         int             flags;          /* flags to find out type of domain */
358
359         int             iommu_coherency;/* indicate coherency of iommu access */
360         int             iommu_snooping; /* indicate snooping control feature*/
361         int             iommu_count;    /* reference count of iommu */
362         int             iommu_superpage;/* Level of superpages supported:
363                                            0 == 4KiB (no superpages), 1 == 2MiB,
364                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
365         spinlock_t      iommu_lock;     /* protect iommu set in domain */
366         u64             max_addr;       /* maximum mapped address */
367 };
368
369 /* PCI domain-device relationship */
370 struct device_domain_info {
371         struct list_head link;  /* link to domain siblings */
372         struct list_head global; /* link to global list */
373         u8 bus;                 /* PCI bus number */
374         u8 devfn;               /* PCI devfn number */
375         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
376         struct intel_iommu *iommu; /* IOMMU used by this device */
377         struct dmar_domain *domain; /* pointer to domain */
378 };
379
380 struct dmar_rmrr_unit {
381         struct list_head list;          /* list of rmrr units   */
382         struct acpi_dmar_header *hdr;   /* ACPI header          */
383         u64     base_address;           /* reserved base address*/
384         u64     end_address;            /* reserved end address */
385         struct dmar_dev_scope *devices; /* target devices */
386         int     devices_cnt;            /* target device count */
387 };
388
389 struct dmar_atsr_unit {
390         struct list_head list;          /* list of ATSR units */
391         struct acpi_dmar_header *hdr;   /* ACPI header */
392         struct dmar_dev_scope *devices; /* target devices */
393         int devices_cnt;                /* target device count */
394         u8 include_all:1;               /* include all ports */
395 };
396
397 static LIST_HEAD(dmar_atsr_units);
398 static LIST_HEAD(dmar_rmrr_units);
399
400 #define for_each_rmrr_units(rmrr) \
401         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
402
403 static void flush_unmaps_timeout(unsigned long data);
404
405 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
406
407 #define HIGH_WATER_MARK 250
408 struct deferred_flush_tables {
409         int next;
410         struct iova *iova[HIGH_WATER_MARK];
411         struct dmar_domain *domain[HIGH_WATER_MARK];
412         struct page *freelist[HIGH_WATER_MARK];
413 };
414
415 static struct deferred_flush_tables *deferred_flush;
416
417 /* bitmap for indexing intel_iommus */
418 static int g_num_of_iommus;
419
420 static DEFINE_SPINLOCK(async_umap_flush_lock);
421 static LIST_HEAD(unmaps_to_do);
422
423 static int timer_on;
424 static long list_size;
425
426 static void domain_exit(struct dmar_domain *domain);
427 static void domain_remove_dev_info(struct dmar_domain *domain);
428 static void domain_remove_one_dev_info(struct dmar_domain *domain,
429                                        struct device *dev);
430 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
431                                            struct device *dev);
432
433 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
434 int dmar_disabled = 0;
435 #else
436 int dmar_disabled = 1;
437 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
438
439 int intel_iommu_enabled = 0;
440 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
441
442 static int dmar_map_gfx = 1;
443 static int dmar_forcedac;
444 static int intel_iommu_strict;
445 static int intel_iommu_superpage = 1;
446
447 int intel_iommu_gfx_mapped;
448 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
449
450 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
451 static DEFINE_SPINLOCK(device_domain_lock);
452 static LIST_HEAD(device_domain_list);
453
454 static struct iommu_ops intel_iommu_ops;
455
456 static int __init intel_iommu_setup(char *str)
457 {
458         if (!str)
459                 return -EINVAL;
460         while (*str) {
461                 if (!strncmp(str, "on", 2)) {
462                         dmar_disabled = 0;
463                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
464                 } else if (!strncmp(str, "off", 3)) {
465                         dmar_disabled = 1;
466                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
467                 } else if (!strncmp(str, "igfx_off", 8)) {
468                         dmar_map_gfx = 0;
469                         printk(KERN_INFO
470                                 "Intel-IOMMU: disable GFX device mapping\n");
471                 } else if (!strncmp(str, "forcedac", 8)) {
472                         printk(KERN_INFO
473                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
474                         dmar_forcedac = 1;
475                 } else if (!strncmp(str, "strict", 6)) {
476                         printk(KERN_INFO
477                                 "Intel-IOMMU: disable batched IOTLB flush\n");
478                         intel_iommu_strict = 1;
479                 } else if (!strncmp(str, "sp_off", 6)) {
480                         printk(KERN_INFO
481                                 "Intel-IOMMU: disable supported super page\n");
482                         intel_iommu_superpage = 0;
483                 }
484
485                 str += strcspn(str, ",");
486                 while (*str == ',')
487                         str++;
488         }
489         return 0;
490 }
491 __setup("intel_iommu=", intel_iommu_setup);
492
493 static struct kmem_cache *iommu_domain_cache;
494 static struct kmem_cache *iommu_devinfo_cache;
495 static struct kmem_cache *iommu_iova_cache;
496
497 static inline void *alloc_pgtable_page(int node)
498 {
499         struct page *page;
500         void *vaddr = NULL;
501
502         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
503         if (page)
504                 vaddr = page_address(page);
505         return vaddr;
506 }
507
508 static inline void free_pgtable_page(void *vaddr)
509 {
510         free_page((unsigned long)vaddr);
511 }
512
513 static inline void *alloc_domain_mem(void)
514 {
515         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
516 }
517
518 static void free_domain_mem(void *vaddr)
519 {
520         kmem_cache_free(iommu_domain_cache, vaddr);
521 }
522
523 static inline void * alloc_devinfo_mem(void)
524 {
525         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
526 }
527
528 static inline void free_devinfo_mem(void *vaddr)
529 {
530         kmem_cache_free(iommu_devinfo_cache, vaddr);
531 }
532
533 struct iova *alloc_iova_mem(void)
534 {
535         return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
536 }
537
538 void free_iova_mem(struct iova *iova)
539 {
540         kmem_cache_free(iommu_iova_cache, iova);
541 }
542
543
544 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
545 {
546         unsigned long sagaw;
547         int agaw = -1;
548
549         sagaw = cap_sagaw(iommu->cap);
550         for (agaw = width_to_agaw(max_gaw);
551              agaw >= 0; agaw--) {
552                 if (test_bit(agaw, &sagaw))
553                         break;
554         }
555
556         return agaw;
557 }
558
559 /*
560  * Calculate max SAGAW for each iommu.
561  */
562 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
563 {
564         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
565 }
566
567 /*
568  * calculate agaw for each iommu.
569  * "SAGAW" may be different across iommus, use a default agaw, and
570  * get a supported less agaw for iommus that don't support the default agaw.
571  */
572 int iommu_calculate_agaw(struct intel_iommu *iommu)
573 {
574         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
575 }
576
577 /* This functionin only returns single iommu in a domain */
578 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
579 {
580         int iommu_id;
581
582         /* si_domain and vm domain should not get here. */
583         BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
584         BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
585
586         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
587         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
588                 return NULL;
589
590         return g_iommus[iommu_id];
591 }
592
593 static void domain_update_iommu_coherency(struct dmar_domain *domain)
594 {
595         struct dmar_drhd_unit *drhd;
596         struct intel_iommu *iommu;
597         int i, found = 0;
598
599         domain->iommu_coherency = 1;
600
601         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
602                 found = 1;
603                 if (!ecap_coherent(g_iommus[i]->ecap)) {
604                         domain->iommu_coherency = 0;
605                         break;
606                 }
607         }
608         if (found)
609                 return;
610
611         /* No hardware attached; use lowest common denominator */
612         rcu_read_lock();
613         for_each_active_iommu(iommu, drhd) {
614                 if (!ecap_coherent(iommu->ecap)) {
615                         domain->iommu_coherency = 0;
616                         break;
617                 }
618         }
619         rcu_read_unlock();
620 }
621
622 static void domain_update_iommu_snooping(struct dmar_domain *domain)
623 {
624         int i;
625
626         domain->iommu_snooping = 1;
627
628         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
629                 if (!ecap_sc_support(g_iommus[i]->ecap)) {
630                         domain->iommu_snooping = 0;
631                         break;
632                 }
633         }
634 }
635
636 static void domain_update_iommu_superpage(struct dmar_domain *domain)
637 {
638         struct dmar_drhd_unit *drhd;
639         struct intel_iommu *iommu = NULL;
640         int mask = 0xf;
641
642         if (!intel_iommu_superpage) {
643                 domain->iommu_superpage = 0;
644                 return;
645         }
646
647         /* set iommu_superpage to the smallest common denominator */
648         rcu_read_lock();
649         for_each_active_iommu(iommu, drhd) {
650                 mask &= cap_super_page_val(iommu->cap);
651                 if (!mask) {
652                         break;
653                 }
654         }
655         rcu_read_unlock();
656
657         domain->iommu_superpage = fls(mask);
658 }
659
660 /* Some capabilities may be different across iommus */
661 static void domain_update_iommu_cap(struct dmar_domain *domain)
662 {
663         domain_update_iommu_coherency(domain);
664         domain_update_iommu_snooping(domain);
665         domain_update_iommu_superpage(domain);
666 }
667
668 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
669 {
670         struct dmar_drhd_unit *drhd = NULL;
671         struct intel_iommu *iommu;
672         struct device *tmp;
673         struct pci_dev *ptmp, *pdev = NULL;
674         u16 segment;
675         int i;
676
677         if (dev_is_pci(dev)) {
678                 pdev = to_pci_dev(dev);
679                 segment = pci_domain_nr(pdev->bus);
680         } else if (ACPI_COMPANION(dev))
681                 dev = &ACPI_COMPANION(dev)->dev;
682
683         rcu_read_lock();
684         for_each_active_iommu(iommu, drhd) {
685                 if (pdev && segment != drhd->segment)
686                         continue;
687
688                 for_each_active_dev_scope(drhd->devices,
689                                           drhd->devices_cnt, i, tmp) {
690                         if (tmp == dev) {
691                                 *bus = drhd->devices[i].bus;
692                                 *devfn = drhd->devices[i].devfn;
693                                 goto out;
694                         }
695
696                         if (!pdev || !dev_is_pci(tmp))
697                                 continue;
698
699                         ptmp = to_pci_dev(tmp);
700                         if (ptmp->subordinate &&
701                             ptmp->subordinate->number <= pdev->bus->number &&
702                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
703                                 goto got_pdev;
704                 }
705
706                 if (pdev && drhd->include_all) {
707                 got_pdev:
708                         *bus = pdev->bus->number;
709                         *devfn = pdev->devfn;
710                         goto out;
711                 }
712         }
713         iommu = NULL;
714  out:
715         rcu_read_unlock();
716
717         return iommu;
718 }
719
720 static void domain_flush_cache(struct dmar_domain *domain,
721                                void *addr, int size)
722 {
723         if (!domain->iommu_coherency)
724                 clflush_cache_range(addr, size);
725 }
726
727 /* Gets context entry for a given bus and devfn */
728 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
729                 u8 bus, u8 devfn)
730 {
731         struct root_entry *root;
732         struct context_entry *context;
733         unsigned long phy_addr;
734         unsigned long flags;
735
736         spin_lock_irqsave(&iommu->lock, flags);
737         root = &iommu->root_entry[bus];
738         context = get_context_addr_from_root(root);
739         if (!context) {
740                 context = (struct context_entry *)
741                                 alloc_pgtable_page(iommu->node);
742                 if (!context) {
743                         spin_unlock_irqrestore(&iommu->lock, flags);
744                         return NULL;
745                 }
746                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
747                 phy_addr = virt_to_phys((void *)context);
748                 set_root_value(root, phy_addr);
749                 set_root_present(root);
750                 __iommu_flush_cache(iommu, root, sizeof(*root));
751         }
752         spin_unlock_irqrestore(&iommu->lock, flags);
753         return &context[devfn];
754 }
755
756 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
757 {
758         struct root_entry *root;
759         struct context_entry *context;
760         int ret;
761         unsigned long flags;
762
763         spin_lock_irqsave(&iommu->lock, flags);
764         root = &iommu->root_entry[bus];
765         context = get_context_addr_from_root(root);
766         if (!context) {
767                 ret = 0;
768                 goto out;
769         }
770         ret = context_present(&context[devfn]);
771 out:
772         spin_unlock_irqrestore(&iommu->lock, flags);
773         return ret;
774 }
775
776 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
777 {
778         struct root_entry *root;
779         struct context_entry *context;
780         unsigned long flags;
781
782         spin_lock_irqsave(&iommu->lock, flags);
783         root = &iommu->root_entry[bus];
784         context = get_context_addr_from_root(root);
785         if (context) {
786                 context_clear_entry(&context[devfn]);
787                 __iommu_flush_cache(iommu, &context[devfn], \
788                         sizeof(*context));
789         }
790         spin_unlock_irqrestore(&iommu->lock, flags);
791 }
792
793 static void free_context_table(struct intel_iommu *iommu)
794 {
795         struct root_entry *root;
796         int i;
797         unsigned long flags;
798         struct context_entry *context;
799
800         spin_lock_irqsave(&iommu->lock, flags);
801         if (!iommu->root_entry) {
802                 goto out;
803         }
804         for (i = 0; i < ROOT_ENTRY_NR; i++) {
805                 root = &iommu->root_entry[i];
806                 context = get_context_addr_from_root(root);
807                 if (context)
808                         free_pgtable_page(context);
809         }
810         free_pgtable_page(iommu->root_entry);
811         iommu->root_entry = NULL;
812 out:
813         spin_unlock_irqrestore(&iommu->lock, flags);
814 }
815
816 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
817                                       unsigned long pfn, int *target_level)
818 {
819         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
820         struct dma_pte *parent, *pte = NULL;
821         int level = agaw_to_level(domain->agaw);
822         int offset;
823
824         BUG_ON(!domain->pgd);
825
826         if (addr_width < BITS_PER_LONG && pfn >> addr_width)
827                 /* Address beyond IOMMU's addressing capabilities. */
828                 return NULL;
829
830         parent = domain->pgd;
831
832         while (1) {
833                 void *tmp_page;
834
835                 offset = pfn_level_offset(pfn, level);
836                 pte = &parent[offset];
837                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
838                         break;
839                 if (level == *target_level)
840                         break;
841
842                 if (!dma_pte_present(pte)) {
843                         uint64_t pteval;
844
845                         tmp_page = alloc_pgtable_page(domain->nid);
846
847                         if (!tmp_page)
848                                 return NULL;
849
850                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
851                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
852                         if (cmpxchg64(&pte->val, 0ULL, pteval)) {
853                                 /* Someone else set it while we were thinking; use theirs. */
854                                 free_pgtable_page(tmp_page);
855                         } else {
856                                 dma_pte_addr(pte);
857                                 domain_flush_cache(domain, pte, sizeof(*pte));
858                         }
859                 }
860                 if (level == 1)
861                         break;
862
863                 parent = phys_to_virt(dma_pte_addr(pte));
864                 level--;
865         }
866
867         if (!*target_level)
868                 *target_level = level;
869
870         return pte;
871 }
872
873
874 /* return address's pte at specific level */
875 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
876                                          unsigned long pfn,
877                                          int level, int *large_page)
878 {
879         struct dma_pte *parent, *pte = NULL;
880         int total = agaw_to_level(domain->agaw);
881         int offset;
882
883         parent = domain->pgd;
884         while (level <= total) {
885                 offset = pfn_level_offset(pfn, total);
886                 pte = &parent[offset];
887                 if (level == total)
888                         return pte;
889
890                 if (!dma_pte_present(pte)) {
891                         *large_page = total;
892                         break;
893                 }
894
895                 if (dma_pte_superpage(pte)) {
896                         *large_page = total;
897                         return pte;
898                 }
899
900                 parent = phys_to_virt(dma_pte_addr(pte));
901                 total--;
902         }
903         return NULL;
904 }
905
906 /* clear last level pte, a tlb flush should be followed */
907 static void dma_pte_clear_range(struct dmar_domain *domain,
908                                 unsigned long start_pfn,
909                                 unsigned long last_pfn)
910 {
911         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
912         unsigned int large_page = 1;
913         struct dma_pte *first_pte, *pte;
914
915         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
916         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
917         BUG_ON(start_pfn > last_pfn);
918
919         /* we don't need lock here; nobody else touches the iova range */
920         do {
921                 large_page = 1;
922                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
923                 if (!pte) {
924                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
925                         continue;
926                 }
927                 do {
928                         dma_clear_pte(pte);
929                         start_pfn += lvl_to_nr_pages(large_page);
930                         pte++;
931                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
932
933                 domain_flush_cache(domain, first_pte,
934                                    (void *)pte - (void *)first_pte);
935
936         } while (start_pfn && start_pfn <= last_pfn);
937 }
938
939 static void dma_pte_free_level(struct dmar_domain *domain, int level,
940                                struct dma_pte *pte, unsigned long pfn,
941                                unsigned long start_pfn, unsigned long last_pfn)
942 {
943         pfn = max(start_pfn, pfn);
944         pte = &pte[pfn_level_offset(pfn, level)];
945
946         do {
947                 unsigned long level_pfn;
948                 struct dma_pte *level_pte;
949
950                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
951                         goto next;
952
953                 level_pfn = pfn & level_mask(level - 1);
954                 level_pte = phys_to_virt(dma_pte_addr(pte));
955
956                 if (level > 2)
957                         dma_pte_free_level(domain, level - 1, level_pte,
958                                            level_pfn, start_pfn, last_pfn);
959
960                 /* If range covers entire pagetable, free it */
961                 if (!(start_pfn > level_pfn ||
962                       last_pfn < level_pfn + level_size(level) - 1)) {
963                         dma_clear_pte(pte);
964                         domain_flush_cache(domain, pte, sizeof(*pte));
965                         free_pgtable_page(level_pte);
966                 }
967 next:
968                 pfn += level_size(level);
969         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
970 }
971
972 /* free page table pages. last level pte should already be cleared */
973 static void dma_pte_free_pagetable(struct dmar_domain *domain,
974                                    unsigned long start_pfn,
975                                    unsigned long last_pfn)
976 {
977         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
978
979         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
980         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
981         BUG_ON(start_pfn > last_pfn);
982
983         /* We don't need lock here; nobody else touches the iova range */
984         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
985                            domain->pgd, 0, start_pfn, last_pfn);
986
987         /* free pgd */
988         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
989                 free_pgtable_page(domain->pgd);
990                 domain->pgd = NULL;
991         }
992 }
993
994 /* When a page at a given level is being unlinked from its parent, we don't
995    need to *modify* it at all. All we need to do is make a list of all the
996    pages which can be freed just as soon as we've flushed the IOTLB and we
997    know the hardware page-walk will no longer touch them.
998    The 'pte' argument is the *parent* PTE, pointing to the page that is to
999    be freed. */
1000 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1001                                             int level, struct dma_pte *pte,
1002                                             struct page *freelist)
1003 {
1004         struct page *pg;
1005
1006         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1007         pg->freelist = freelist;
1008         freelist = pg;
1009
1010         if (level == 1)
1011                 return freelist;
1012
1013         pte = page_address(pg);
1014         do {
1015                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1016                         freelist = dma_pte_list_pagetables(domain, level - 1,
1017                                                            pte, freelist);
1018                 pte++;
1019         } while (!first_pte_in_page(pte));
1020
1021         return freelist;
1022 }
1023
1024 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1025                                         struct dma_pte *pte, unsigned long pfn,
1026                                         unsigned long start_pfn,
1027                                         unsigned long last_pfn,
1028                                         struct page *freelist)
1029 {
1030         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1031
1032         pfn = max(start_pfn, pfn);
1033         pte = &pte[pfn_level_offset(pfn, level)];
1034
1035         do {
1036                 unsigned long level_pfn;
1037
1038                 if (!dma_pte_present(pte))
1039                         goto next;
1040
1041                 level_pfn = pfn & level_mask(level);
1042
1043                 /* If range covers entire pagetable, free it */
1044                 if (start_pfn <= level_pfn &&
1045                     last_pfn >= level_pfn + level_size(level) - 1) {
1046                         /* These suborbinate page tables are going away entirely. Don't
1047                            bother to clear them; we're just going to *free* them. */
1048                         if (level > 1 && !dma_pte_superpage(pte))
1049                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1050
1051                         dma_clear_pte(pte);
1052                         if (!first_pte)
1053                                 first_pte = pte;
1054                         last_pte = pte;
1055                 } else if (level > 1) {
1056                         /* Recurse down into a level that isn't *entirely* obsolete */
1057                         freelist = dma_pte_clear_level(domain, level - 1,
1058                                                        phys_to_virt(dma_pte_addr(pte)),
1059                                                        level_pfn, start_pfn, last_pfn,
1060                                                        freelist);
1061                 }
1062 next:
1063                 pfn += level_size(level);
1064         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1065
1066         if (first_pte)
1067                 domain_flush_cache(domain, first_pte,
1068                                    (void *)++last_pte - (void *)first_pte);
1069
1070         return freelist;
1071 }
1072
1073 /* We can't just free the pages because the IOMMU may still be walking
1074    the page tables, and may have cached the intermediate levels. The
1075    pages can only be freed after the IOTLB flush has been done. */
1076 struct page *domain_unmap(struct dmar_domain *domain,
1077                           unsigned long start_pfn,
1078                           unsigned long last_pfn)
1079 {
1080         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1081         struct page *freelist = NULL;
1082
1083         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1084         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1085         BUG_ON(start_pfn > last_pfn);
1086
1087         /* we don't need lock here; nobody else touches the iova range */
1088         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1089                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1090
1091         /* free pgd */
1092         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1093                 struct page *pgd_page = virt_to_page(domain->pgd);
1094                 pgd_page->freelist = freelist;
1095                 freelist = pgd_page;
1096
1097                 domain->pgd = NULL;
1098         }
1099
1100         return freelist;
1101 }
1102
1103 void dma_free_pagelist(struct page *freelist)
1104 {
1105         struct page *pg;
1106
1107         while ((pg = freelist)) {
1108                 freelist = pg->freelist;
1109                 free_pgtable_page(page_address(pg));
1110         }
1111 }
1112
1113 /* iommu handling */
1114 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1115 {
1116         struct root_entry *root;
1117         unsigned long flags;
1118
1119         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1120         if (!root)
1121                 return -ENOMEM;
1122
1123         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1124
1125         spin_lock_irqsave(&iommu->lock, flags);
1126         iommu->root_entry = root;
1127         spin_unlock_irqrestore(&iommu->lock, flags);
1128
1129         return 0;
1130 }
1131
1132 static void iommu_set_root_entry(struct intel_iommu *iommu)
1133 {
1134         void *addr;
1135         u32 sts;
1136         unsigned long flag;
1137
1138         addr = iommu->root_entry;
1139
1140         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1141         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1142
1143         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1144
1145         /* Make sure hardware complete it */
1146         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1147                       readl, (sts & DMA_GSTS_RTPS), sts);
1148
1149         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1150 }
1151
1152 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1153 {
1154         u32 val;
1155         unsigned long flag;
1156
1157         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1158                 return;
1159
1160         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1161         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1162
1163         /* Make sure hardware complete it */
1164         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1165                       readl, (!(val & DMA_GSTS_WBFS)), val);
1166
1167         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1168 }
1169
1170 /* return value determine if we need a write buffer flush */
1171 static void __iommu_flush_context(struct intel_iommu *iommu,
1172                                   u16 did, u16 source_id, u8 function_mask,
1173                                   u64 type)
1174 {
1175         u64 val = 0;
1176         unsigned long flag;
1177
1178         switch (type) {
1179         case DMA_CCMD_GLOBAL_INVL:
1180                 val = DMA_CCMD_GLOBAL_INVL;
1181                 break;
1182         case DMA_CCMD_DOMAIN_INVL:
1183                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1184                 break;
1185         case DMA_CCMD_DEVICE_INVL:
1186                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1187                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1188                 break;
1189         default:
1190                 BUG();
1191         }
1192         val |= DMA_CCMD_ICC;
1193
1194         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1195         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1196
1197         /* Make sure hardware complete it */
1198         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1199                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1200
1201         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1202 }
1203
1204 /* return value determine if we need a write buffer flush */
1205 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1206                                 u64 addr, unsigned int size_order, u64 type)
1207 {
1208         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1209         u64 val = 0, val_iva = 0;
1210         unsigned long flag;
1211
1212         switch (type) {
1213         case DMA_TLB_GLOBAL_FLUSH:
1214                 /* global flush doesn't need set IVA_REG */
1215                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1216                 break;
1217         case DMA_TLB_DSI_FLUSH:
1218                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1219                 break;
1220         case DMA_TLB_PSI_FLUSH:
1221                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1222                 /* IH bit is passed in as part of address */
1223                 val_iva = size_order | addr;
1224                 break;
1225         default:
1226                 BUG();
1227         }
1228         /* Note: set drain read/write */
1229 #if 0
1230         /*
1231          * This is probably to be super secure.. Looks like we can
1232          * ignore it without any impact.
1233          */
1234         if (cap_read_drain(iommu->cap))
1235                 val |= DMA_TLB_READ_DRAIN;
1236 #endif
1237         if (cap_write_drain(iommu->cap))
1238                 val |= DMA_TLB_WRITE_DRAIN;
1239
1240         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1241         /* Note: Only uses first TLB reg currently */
1242         if (val_iva)
1243                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1244         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1245
1246         /* Make sure hardware complete it */
1247         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1248                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1249
1250         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1251
1252         /* check IOTLB invalidation granularity */
1253         if (DMA_TLB_IAIG(val) == 0)
1254                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1255         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1256                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1257                         (unsigned long long)DMA_TLB_IIRG(type),
1258                         (unsigned long long)DMA_TLB_IAIG(val));
1259 }
1260
1261 static struct device_domain_info *
1262 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1263                          u8 bus, u8 devfn)
1264 {
1265         int found = 0;
1266         unsigned long flags;
1267         struct device_domain_info *info;
1268         struct pci_dev *pdev;
1269
1270         if (!ecap_dev_iotlb_support(iommu->ecap))
1271                 return NULL;
1272
1273         if (!iommu->qi)
1274                 return NULL;
1275
1276         spin_lock_irqsave(&device_domain_lock, flags);
1277         list_for_each_entry(info, &domain->devices, link)
1278                 if (info->bus == bus && info->devfn == devfn) {
1279                         found = 1;
1280                         break;
1281                 }
1282         spin_unlock_irqrestore(&device_domain_lock, flags);
1283
1284         if (!found || !info->dev || !dev_is_pci(info->dev))
1285                 return NULL;
1286
1287         pdev = to_pci_dev(info->dev);
1288
1289         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1290                 return NULL;
1291
1292         if (!dmar_find_matched_atsr_unit(pdev))
1293                 return NULL;
1294
1295         return info;
1296 }
1297
1298 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1299 {
1300         if (!info || !dev_is_pci(info->dev))
1301                 return;
1302
1303         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1304 }
1305
1306 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1307 {
1308         if (!info->dev || !dev_is_pci(info->dev) ||
1309             !pci_ats_enabled(to_pci_dev(info->dev)))
1310                 return;
1311
1312         pci_disable_ats(to_pci_dev(info->dev));
1313 }
1314
1315 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1316                                   u64 addr, unsigned mask)
1317 {
1318         u16 sid, qdep;
1319         unsigned long flags;
1320         struct device_domain_info *info;
1321
1322         spin_lock_irqsave(&device_domain_lock, flags);
1323         list_for_each_entry(info, &domain->devices, link) {
1324                 struct pci_dev *pdev;
1325                 if (!info->dev || !dev_is_pci(info->dev))
1326                         continue;
1327
1328                 pdev = to_pci_dev(info->dev);
1329                 if (!pci_ats_enabled(pdev))
1330                         continue;
1331
1332                 sid = info->bus << 8 | info->devfn;
1333                 qdep = pci_ats_queue_depth(pdev);
1334                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1335         }
1336         spin_unlock_irqrestore(&device_domain_lock, flags);
1337 }
1338
1339 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1340                                   unsigned long pfn, unsigned int pages, int ih, int map)
1341 {
1342         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1343         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1344
1345         BUG_ON(pages == 0);
1346
1347         if (ih)
1348                 ih = 1 << 6;
1349         /*
1350          * Fallback to domain selective flush if no PSI support or the size is
1351          * too big.
1352          * PSI requires page size to be 2 ^ x, and the base address is naturally
1353          * aligned to the size
1354          */
1355         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1356                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1357                                                 DMA_TLB_DSI_FLUSH);
1358         else
1359                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1360                                                 DMA_TLB_PSI_FLUSH);
1361
1362         /*
1363          * In caching mode, changes of pages from non-present to present require
1364          * flush. However, device IOTLB doesn't need to be flushed in this case.
1365          */
1366         if (!cap_caching_mode(iommu->cap) || !map)
1367                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1368 }
1369
1370 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1371 {
1372         u32 pmen;
1373         unsigned long flags;
1374
1375         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1376         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1377         pmen &= ~DMA_PMEN_EPM;
1378         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1379
1380         /* wait for the protected region status bit to clear */
1381         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1382                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1383
1384         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1385 }
1386
1387 static int iommu_enable_translation(struct intel_iommu *iommu)
1388 {
1389         u32 sts;
1390         unsigned long flags;
1391
1392         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1393         iommu->gcmd |= DMA_GCMD_TE;
1394         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1395
1396         /* Make sure hardware complete it */
1397         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1398                       readl, (sts & DMA_GSTS_TES), sts);
1399
1400         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1401         return 0;
1402 }
1403
1404 static int iommu_disable_translation(struct intel_iommu *iommu)
1405 {
1406         u32 sts;
1407         unsigned long flag;
1408
1409         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1410         iommu->gcmd &= ~DMA_GCMD_TE;
1411         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412
1413         /* Make sure hardware complete it */
1414         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1415                       readl, (!(sts & DMA_GSTS_TES)), sts);
1416
1417         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1418         return 0;
1419 }
1420
1421
1422 static int iommu_init_domains(struct intel_iommu *iommu)
1423 {
1424         unsigned long ndomains;
1425         unsigned long nlongs;
1426
1427         ndomains = cap_ndoms(iommu->cap);
1428         pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1429                  iommu->seq_id, ndomains);
1430         nlongs = BITS_TO_LONGS(ndomains);
1431
1432         spin_lock_init(&iommu->lock);
1433
1434         /* TBD: there might be 64K domains,
1435          * consider other allocation for future chip
1436          */
1437         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1438         if (!iommu->domain_ids) {
1439                 pr_err("IOMMU%d: allocating domain id array failed\n",
1440                        iommu->seq_id);
1441                 return -ENOMEM;
1442         }
1443         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1444                         GFP_KERNEL);
1445         if (!iommu->domains) {
1446                 pr_err("IOMMU%d: allocating domain array failed\n",
1447                        iommu->seq_id);
1448                 kfree(iommu->domain_ids);
1449                 iommu->domain_ids = NULL;
1450                 return -ENOMEM;
1451         }
1452
1453         /*
1454          * if Caching mode is set, then invalid translations are tagged
1455          * with domainid 0. Hence we need to pre-allocate it.
1456          */
1457         if (cap_caching_mode(iommu->cap))
1458                 set_bit(0, iommu->domain_ids);
1459         return 0;
1460 }
1461
1462 static void free_dmar_iommu(struct intel_iommu *iommu)
1463 {
1464         struct dmar_domain *domain;
1465         int i, count;
1466         unsigned long flags;
1467
1468         if ((iommu->domains) && (iommu->domain_ids)) {
1469                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1470                         /*
1471                          * Domain id 0 is reserved for invalid translation
1472                          * if hardware supports caching mode.
1473                          */
1474                         if (cap_caching_mode(iommu->cap) && i == 0)
1475                                 continue;
1476
1477                         domain = iommu->domains[i];
1478                         clear_bit(i, iommu->domain_ids);
1479
1480                         spin_lock_irqsave(&domain->iommu_lock, flags);
1481                         count = --domain->iommu_count;
1482                         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1483                         if (count == 0)
1484                                 domain_exit(domain);
1485                 }
1486         }
1487
1488         if (iommu->gcmd & DMA_GCMD_TE)
1489                 iommu_disable_translation(iommu);
1490
1491         kfree(iommu->domains);
1492         kfree(iommu->domain_ids);
1493         iommu->domains = NULL;
1494         iommu->domain_ids = NULL;
1495
1496         g_iommus[iommu->seq_id] = NULL;
1497
1498         /* free context mapping */
1499         free_context_table(iommu);
1500 }
1501
1502 static struct dmar_domain *alloc_domain(bool vm)
1503 {
1504         /* domain id for virtual machine, it won't be set in context */
1505         static atomic_t vm_domid = ATOMIC_INIT(0);
1506         struct dmar_domain *domain;
1507
1508         domain = alloc_domain_mem();
1509         if (!domain)
1510                 return NULL;
1511
1512         domain->nid = -1;
1513         domain->iommu_count = 0;
1514         memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1515         domain->flags = 0;
1516         spin_lock_init(&domain->iommu_lock);
1517         INIT_LIST_HEAD(&domain->devices);
1518         if (vm) {
1519                 domain->id = atomic_inc_return(&vm_domid);
1520                 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
1521         }
1522
1523         return domain;
1524 }
1525
1526 static int iommu_attach_domain(struct dmar_domain *domain,
1527                                struct intel_iommu *iommu)
1528 {
1529         int num;
1530         unsigned long ndomains;
1531         unsigned long flags;
1532
1533         ndomains = cap_ndoms(iommu->cap);
1534
1535         spin_lock_irqsave(&iommu->lock, flags);
1536
1537         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1538         if (num >= ndomains) {
1539                 spin_unlock_irqrestore(&iommu->lock, flags);
1540                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1541                 return -ENOMEM;
1542         }
1543
1544         domain->id = num;
1545         domain->iommu_count++;
1546         set_bit(num, iommu->domain_ids);
1547         set_bit(iommu->seq_id, domain->iommu_bmp);
1548         iommu->domains[num] = domain;
1549         spin_unlock_irqrestore(&iommu->lock, flags);
1550
1551         return 0;
1552 }
1553
1554 static void iommu_detach_domain(struct dmar_domain *domain,
1555                                 struct intel_iommu *iommu)
1556 {
1557         unsigned long flags;
1558         int num, ndomains;
1559
1560         spin_lock_irqsave(&iommu->lock, flags);
1561         ndomains = cap_ndoms(iommu->cap);
1562         for_each_set_bit(num, iommu->domain_ids, ndomains) {
1563                 if (iommu->domains[num] == domain) {
1564                         clear_bit(num, iommu->domain_ids);
1565                         iommu->domains[num] = NULL;
1566                         break;
1567                 }
1568         }
1569         spin_unlock_irqrestore(&iommu->lock, flags);
1570 }
1571
1572 static struct iova_domain reserved_iova_list;
1573 static struct lock_class_key reserved_rbtree_key;
1574
1575 static int dmar_init_reserved_ranges(void)
1576 {
1577         struct pci_dev *pdev = NULL;
1578         struct iova *iova;
1579         int i;
1580
1581         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1582
1583         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1584                 &reserved_rbtree_key);
1585
1586         /* IOAPIC ranges shouldn't be accessed by DMA */
1587         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1588                 IOVA_PFN(IOAPIC_RANGE_END));
1589         if (!iova) {
1590                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1591                 return -ENODEV;
1592         }
1593
1594         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1595         for_each_pci_dev(pdev) {
1596                 struct resource *r;
1597
1598                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1599                         r = &pdev->resource[i];
1600                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1601                                 continue;
1602                         iova = reserve_iova(&reserved_iova_list,
1603                                             IOVA_PFN(r->start),
1604                                             IOVA_PFN(r->end));
1605                         if (!iova) {
1606                                 printk(KERN_ERR "Reserve iova failed\n");
1607                                 return -ENODEV;
1608                         }
1609                 }
1610         }
1611         return 0;
1612 }
1613
1614 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1615 {
1616         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1617 }
1618
1619 static inline int guestwidth_to_adjustwidth(int gaw)
1620 {
1621         int agaw;
1622         int r = (gaw - 12) % 9;
1623
1624         if (r == 0)
1625                 agaw = gaw;
1626         else
1627                 agaw = gaw + 9 - r;
1628         if (agaw > 64)
1629                 agaw = 64;
1630         return agaw;
1631 }
1632
1633 static int domain_init(struct dmar_domain *domain, int guest_width)
1634 {
1635         struct intel_iommu *iommu;
1636         int adjust_width, agaw;
1637         unsigned long sagaw;
1638
1639         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1640         domain_reserve_special_ranges(domain);
1641
1642         /* calculate AGAW */
1643         iommu = domain_get_iommu(domain);
1644         if (guest_width > cap_mgaw(iommu->cap))
1645                 guest_width = cap_mgaw(iommu->cap);
1646         domain->gaw = guest_width;
1647         adjust_width = guestwidth_to_adjustwidth(guest_width);
1648         agaw = width_to_agaw(adjust_width);
1649         sagaw = cap_sagaw(iommu->cap);
1650         if (!test_bit(agaw, &sagaw)) {
1651                 /* hardware doesn't support it, choose a bigger one */
1652                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1653                 agaw = find_next_bit(&sagaw, 5, agaw);
1654                 if (agaw >= 5)
1655                         return -ENODEV;
1656         }
1657         domain->agaw = agaw;
1658
1659         if (ecap_coherent(iommu->ecap))
1660                 domain->iommu_coherency = 1;
1661         else
1662                 domain->iommu_coherency = 0;
1663
1664         if (ecap_sc_support(iommu->ecap))
1665                 domain->iommu_snooping = 1;
1666         else
1667                 domain->iommu_snooping = 0;
1668
1669         if (intel_iommu_superpage)
1670                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1671         else
1672                 domain->iommu_superpage = 0;
1673
1674         domain->nid = iommu->node;
1675
1676         /* always allocate the top pgd */
1677         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1678         if (!domain->pgd)
1679                 return -ENOMEM;
1680         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1681         return 0;
1682 }
1683
1684 static void domain_exit(struct dmar_domain *domain)
1685 {
1686         struct dmar_drhd_unit *drhd;
1687         struct intel_iommu *iommu;
1688         struct page *freelist = NULL;
1689
1690         /* Domain 0 is reserved, so dont process it */
1691         if (!domain)
1692                 return;
1693
1694         /* Flush any lazy unmaps that may reference this domain */
1695         if (!intel_iommu_strict)
1696                 flush_unmaps_timeout(0);
1697
1698         /* remove associated devices */
1699         domain_remove_dev_info(domain);
1700
1701         /* destroy iovas */
1702         put_iova_domain(&domain->iovad);
1703
1704         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1705
1706         /* clear attached or cached domains */
1707         rcu_read_lock();
1708         for_each_active_iommu(iommu, drhd)
1709                 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1710                     test_bit(iommu->seq_id, domain->iommu_bmp))
1711                         iommu_detach_domain(domain, iommu);
1712         rcu_read_unlock();
1713
1714         dma_free_pagelist(freelist);
1715
1716         free_domain_mem(domain);
1717 }
1718
1719 static int domain_context_mapping_one(struct dmar_domain *domain,
1720                                       struct intel_iommu *iommu,
1721                                       u8 bus, u8 devfn, int translation)
1722 {
1723         struct context_entry *context;
1724         unsigned long flags;
1725         struct dma_pte *pgd;
1726         unsigned long num;
1727         unsigned long ndomains;
1728         int id;
1729         int agaw;
1730         struct device_domain_info *info = NULL;
1731
1732         pr_debug("Set context mapping for %02x:%02x.%d\n",
1733                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1734
1735         BUG_ON(!domain->pgd);
1736         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1737                translation != CONTEXT_TT_MULTI_LEVEL);
1738
1739         context = device_to_context_entry(iommu, bus, devfn);
1740         if (!context)
1741                 return -ENOMEM;
1742         spin_lock_irqsave(&iommu->lock, flags);
1743         if (context_present(context)) {
1744                 spin_unlock_irqrestore(&iommu->lock, flags);
1745                 return 0;
1746         }
1747
1748         id = domain->id;
1749         pgd = domain->pgd;
1750
1751         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1752             domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1753                 int found = 0;
1754
1755                 /* find an available domain id for this device in iommu */
1756                 ndomains = cap_ndoms(iommu->cap);
1757                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1758                         if (iommu->domains[num] == domain) {
1759                                 id = num;
1760                                 found = 1;
1761                                 break;
1762                         }
1763                 }
1764
1765                 if (found == 0) {
1766                         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1767                         if (num >= ndomains) {
1768                                 spin_unlock_irqrestore(&iommu->lock, flags);
1769                                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1770                                 return -EFAULT;
1771                         }
1772
1773                         set_bit(num, iommu->domain_ids);
1774                         iommu->domains[num] = domain;
1775                         id = num;
1776                 }
1777
1778                 /* Skip top levels of page tables for
1779                  * iommu which has less agaw than default.
1780                  * Unnecessary for PT mode.
1781                  */
1782                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1783                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1784                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1785                                 if (!dma_pte_present(pgd)) {
1786                                         spin_unlock_irqrestore(&iommu->lock, flags);
1787                                         return -ENOMEM;
1788                                 }
1789                         }
1790                 }
1791         }
1792
1793         context_set_domain_id(context, id);
1794
1795         if (translation != CONTEXT_TT_PASS_THROUGH) {
1796                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1797                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1798                                      CONTEXT_TT_MULTI_LEVEL;
1799         }
1800         /*
1801          * In pass through mode, AW must be programmed to indicate the largest
1802          * AGAW value supported by hardware. And ASR is ignored by hardware.
1803          */
1804         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1805                 context_set_address_width(context, iommu->msagaw);
1806         else {
1807                 context_set_address_root(context, virt_to_phys(pgd));
1808                 context_set_address_width(context, iommu->agaw);
1809         }
1810
1811         context_set_translation_type(context, translation);
1812         context_set_fault_enable(context);
1813         context_set_present(context);
1814         domain_flush_cache(domain, context, sizeof(*context));
1815
1816         /*
1817          * It's a non-present to present mapping. If hardware doesn't cache
1818          * non-present entry we only need to flush the write-buffer. If the
1819          * _does_ cache non-present entries, then it does so in the special
1820          * domain #0, which we have to flush:
1821          */
1822         if (cap_caching_mode(iommu->cap)) {
1823                 iommu->flush.flush_context(iommu, 0,
1824                                            (((u16)bus) << 8) | devfn,
1825                                            DMA_CCMD_MASK_NOBIT,
1826                                            DMA_CCMD_DEVICE_INVL);
1827                 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1828         } else {
1829                 iommu_flush_write_buffer(iommu);
1830         }
1831         iommu_enable_dev_iotlb(info);
1832         spin_unlock_irqrestore(&iommu->lock, flags);
1833
1834         spin_lock_irqsave(&domain->iommu_lock, flags);
1835         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1836                 domain->iommu_count++;
1837                 if (domain->iommu_count == 1)
1838                         domain->nid = iommu->node;
1839                 domain_update_iommu_cap(domain);
1840         }
1841         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1842         return 0;
1843 }
1844
1845 static int
1846 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1847                        int translation)
1848 {
1849         int ret;
1850         struct pci_dev *pdev, *tmp, *parent;
1851         struct intel_iommu *iommu;
1852         u8 bus, devfn;
1853
1854         iommu = device_to_iommu(dev, &bus, &devfn);
1855         if (!iommu)
1856                 return -ENODEV;
1857
1858         ret = domain_context_mapping_one(domain, iommu, bus, devfn,
1859                                          translation);
1860         if (ret || !dev_is_pci(dev))
1861                 return ret;
1862
1863         /* dependent device mapping */
1864         pdev = to_pci_dev(dev);
1865         tmp = pci_find_upstream_pcie_bridge(pdev);
1866         if (!tmp)
1867                 return 0;
1868         /* Secondary interface's bus number and devfn 0 */
1869         parent = pdev->bus->self;
1870         while (parent != tmp) {
1871                 ret = domain_context_mapping_one(domain, iommu,
1872                                                  parent->bus->number,
1873                                                  parent->devfn, translation);
1874                 if (ret)
1875                         return ret;
1876                 parent = parent->bus->self;
1877         }
1878         if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1879                 return domain_context_mapping_one(domain, iommu,
1880                                         tmp->subordinate->number, 0,
1881                                         translation);
1882         else /* this is a legacy PCI bridge */
1883                 return domain_context_mapping_one(domain, iommu,
1884                                                   tmp->bus->number,
1885                                                   tmp->devfn,
1886                                                   translation);
1887 }
1888
1889 static int domain_context_mapped(struct device *dev)
1890 {
1891         int ret;
1892         struct pci_dev *pdev, *tmp, *parent;
1893         struct intel_iommu *iommu;
1894         u8 bus, devfn;
1895
1896         iommu = device_to_iommu(dev, &bus, &devfn);
1897         if (!iommu)
1898                 return -ENODEV;
1899
1900         ret = device_context_mapped(iommu, bus, devfn);
1901         if (!ret || !dev_is_pci(dev))
1902                 return ret;
1903
1904         /* dependent device mapping */
1905         pdev = to_pci_dev(dev);
1906         tmp = pci_find_upstream_pcie_bridge(pdev);
1907         if (!tmp)
1908                 return ret;
1909         /* Secondary interface's bus number and devfn 0 */
1910         parent = pdev->bus->self;
1911         while (parent != tmp) {
1912                 ret = device_context_mapped(iommu, parent->bus->number,
1913                                             parent->devfn);
1914                 if (!ret)
1915                         return ret;
1916                 parent = parent->bus->self;
1917         }
1918         if (pci_is_pcie(tmp))
1919                 return device_context_mapped(iommu, tmp->subordinate->number,
1920                                              0);
1921         else
1922                 return device_context_mapped(iommu, tmp->bus->number,
1923                                              tmp->devfn);
1924 }
1925
1926 /* Returns a number of VTD pages, but aligned to MM page size */
1927 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1928                                             size_t size)
1929 {
1930         host_addr &= ~PAGE_MASK;
1931         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1932 }
1933
1934 /* Return largest possible superpage level for a given mapping */
1935 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1936                                           unsigned long iov_pfn,
1937                                           unsigned long phy_pfn,
1938                                           unsigned long pages)
1939 {
1940         int support, level = 1;
1941         unsigned long pfnmerge;
1942
1943         support = domain->iommu_superpage;
1944
1945         /* To use a large page, the virtual *and* physical addresses
1946            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1947            of them will mean we have to use smaller pages. So just
1948            merge them and check both at once. */
1949         pfnmerge = iov_pfn | phy_pfn;
1950
1951         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1952                 pages >>= VTD_STRIDE_SHIFT;
1953                 if (!pages)
1954                         break;
1955                 pfnmerge >>= VTD_STRIDE_SHIFT;
1956                 level++;
1957                 support--;
1958         }
1959         return level;
1960 }
1961
1962 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1963                             struct scatterlist *sg, unsigned long phys_pfn,
1964                             unsigned long nr_pages, int prot)
1965 {
1966         struct dma_pte *first_pte = NULL, *pte = NULL;
1967         phys_addr_t uninitialized_var(pteval);
1968         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1969         unsigned long sg_res;
1970         unsigned int largepage_lvl = 0;
1971         unsigned long lvl_pages = 0;
1972
1973         BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1974
1975         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1976                 return -EINVAL;
1977
1978         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1979
1980         if (sg)
1981                 sg_res = 0;
1982         else {
1983                 sg_res = nr_pages + 1;
1984                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1985         }
1986
1987         while (nr_pages > 0) {
1988                 uint64_t tmp;
1989
1990                 if (!sg_res) {
1991                         sg_res = aligned_nrpages(sg->offset, sg->length);
1992                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1993                         sg->dma_length = sg->length;
1994                         pteval = page_to_phys(sg_page(sg)) | prot;
1995                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
1996                 }
1997
1998                 if (!pte) {
1999                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2000
2001                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2002                         if (!pte)
2003                                 return -ENOMEM;
2004                         /* It is large page*/
2005                         if (largepage_lvl > 1) {
2006                                 pteval |= DMA_PTE_LARGE_PAGE;
2007                                 /* Ensure that old small page tables are removed to make room
2008                                    for superpage, if they exist. */
2009                                 dma_pte_clear_range(domain, iov_pfn,
2010                                                     iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2011                                 dma_pte_free_pagetable(domain, iov_pfn,
2012                                                        iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2013                         } else {
2014                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2015                         }
2016
2017                 }
2018                 /* We don't need lock here, nobody else
2019                  * touches the iova range
2020                  */
2021                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2022                 if (tmp) {
2023                         static int dumps = 5;
2024                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2025                                iov_pfn, tmp, (unsigned long long)pteval);
2026                         if (dumps) {
2027                                 dumps--;
2028                                 debug_dma_dump_mappings(NULL);
2029                         }
2030                         WARN_ON(1);
2031                 }
2032
2033                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2034
2035                 BUG_ON(nr_pages < lvl_pages);
2036                 BUG_ON(sg_res < lvl_pages);
2037
2038                 nr_pages -= lvl_pages;
2039                 iov_pfn += lvl_pages;
2040                 phys_pfn += lvl_pages;
2041                 pteval += lvl_pages * VTD_PAGE_SIZE;
2042                 sg_res -= lvl_pages;
2043
2044                 /* If the next PTE would be the first in a new page, then we
2045                    need to flush the cache on the entries we've just written.
2046                    And then we'll need to recalculate 'pte', so clear it and
2047                    let it get set again in the if (!pte) block above.
2048
2049                    If we're done (!nr_pages) we need to flush the cache too.
2050
2051                    Also if we've been setting superpages, we may need to
2052                    recalculate 'pte' and switch back to smaller pages for the
2053                    end of the mapping, if the trailing size is not enough to
2054                    use another superpage (i.e. sg_res < lvl_pages). */
2055                 pte++;
2056                 if (!nr_pages || first_pte_in_page(pte) ||
2057                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2058                         domain_flush_cache(domain, first_pte,
2059                                            (void *)pte - (void *)first_pte);
2060                         pte = NULL;
2061                 }
2062
2063                 if (!sg_res && nr_pages)
2064                         sg = sg_next(sg);
2065         }
2066         return 0;
2067 }
2068
2069 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2070                                     struct scatterlist *sg, unsigned long nr_pages,
2071                                     int prot)
2072 {
2073         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2074 }
2075
2076 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2077                                      unsigned long phys_pfn, unsigned long nr_pages,
2078                                      int prot)
2079 {
2080         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2081 }
2082
2083 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2084 {
2085         if (!iommu)
2086                 return;
2087
2088         clear_context_table(iommu, bus, devfn);
2089         iommu->flush.flush_context(iommu, 0, 0, 0,
2090                                            DMA_CCMD_GLOBAL_INVL);
2091         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2092 }
2093
2094 static inline void unlink_domain_info(struct device_domain_info *info)
2095 {
2096         assert_spin_locked(&device_domain_lock);
2097         list_del(&info->link);
2098         list_del(&info->global);
2099         if (info->dev)
2100                 info->dev->archdata.iommu = NULL;
2101 }
2102
2103 static void domain_remove_dev_info(struct dmar_domain *domain)
2104 {
2105         struct device_domain_info *info, *tmp;
2106         unsigned long flags, flags2;
2107
2108         spin_lock_irqsave(&device_domain_lock, flags);
2109         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2110                 unlink_domain_info(info);
2111                 spin_unlock_irqrestore(&device_domain_lock, flags);
2112
2113                 iommu_disable_dev_iotlb(info);
2114                 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2115
2116                 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
2117                         iommu_detach_dependent_devices(info->iommu, info->dev);
2118                         /* clear this iommu in iommu_bmp, update iommu count
2119                          * and capabilities
2120                          */
2121                         spin_lock_irqsave(&domain->iommu_lock, flags2);
2122                         if (test_and_clear_bit(info->iommu->seq_id,
2123                                                domain->iommu_bmp)) {
2124                                 domain->iommu_count--;
2125                                 domain_update_iommu_cap(domain);
2126                         }
2127                         spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2128                 }
2129
2130                 free_devinfo_mem(info);
2131                 spin_lock_irqsave(&device_domain_lock, flags);
2132         }
2133         spin_unlock_irqrestore(&device_domain_lock, flags);
2134 }
2135
2136 /*
2137  * find_domain
2138  * Note: we use struct device->archdata.iommu stores the info
2139  */
2140 static struct dmar_domain *find_domain(struct device *dev)
2141 {
2142         struct device_domain_info *info;
2143
2144         /* No lock here, assumes no domain exit in normal case */
2145         info = dev->archdata.iommu;
2146         if (info)
2147                 return info->domain;
2148         return NULL;
2149 }
2150
2151 static inline struct device_domain_info *
2152 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2153 {
2154         struct device_domain_info *info;
2155
2156         list_for_each_entry(info, &device_domain_list, global)
2157                 if (info->iommu->segment == segment && info->bus == bus &&
2158                     info->devfn == devfn)
2159                         return info;
2160
2161         return NULL;
2162 }
2163
2164 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2165                                                 int bus, int devfn,
2166                                                 struct device *dev,
2167                                                 struct dmar_domain *domain)
2168 {
2169         struct dmar_domain *found = NULL;
2170         struct device_domain_info *info;
2171         unsigned long flags;
2172
2173         info = alloc_devinfo_mem();
2174         if (!info)
2175                 return NULL;
2176
2177         info->bus = bus;
2178         info->devfn = devfn;
2179         info->dev = dev;
2180         info->domain = domain;
2181         info->iommu = iommu;
2182         if (!dev)
2183                 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2184
2185         spin_lock_irqsave(&device_domain_lock, flags);
2186         if (dev)
2187                 found = find_domain(dev);
2188         else {
2189                 struct device_domain_info *info2;
2190                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2191                 if (info2)
2192                         found = info2->domain;
2193         }
2194         if (found) {
2195                 spin_unlock_irqrestore(&device_domain_lock, flags);
2196                 free_devinfo_mem(info);
2197                 /* Caller must free the original domain */
2198                 return found;
2199         }
2200
2201         list_add(&info->link, &domain->devices);
2202         list_add(&info->global, &device_domain_list);
2203         if (dev)
2204                 dev->archdata.iommu = info;
2205         spin_unlock_irqrestore(&device_domain_lock, flags);
2206
2207         return domain;
2208 }
2209
2210 /* domain is initialized */
2211 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2212 {
2213         struct dmar_domain *domain, *free = NULL;
2214         struct intel_iommu *iommu = NULL;
2215         struct device_domain_info *info;
2216         struct pci_dev *dev_tmp = NULL;
2217         unsigned long flags;
2218         u8 bus, devfn, bridge_bus, bridge_devfn;
2219
2220         domain = find_domain(dev);
2221         if (domain)
2222                 return domain;
2223
2224         if (dev_is_pci(dev)) {
2225                 struct pci_dev *pdev = to_pci_dev(dev);
2226                 u16 segment;
2227
2228                 segment = pci_domain_nr(pdev->bus);
2229                 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
2230                 if (dev_tmp) {
2231                         if (pci_is_pcie(dev_tmp)) {
2232                                 bridge_bus = dev_tmp->subordinate->number;
2233                                 bridge_devfn = 0;
2234                         } else {
2235                                 bridge_bus = dev_tmp->bus->number;
2236                                 bridge_devfn = dev_tmp->devfn;
2237                         }
2238                         spin_lock_irqsave(&device_domain_lock, flags);
2239                         info = dmar_search_domain_by_dev_info(segment,
2240                                                               bridge_bus,
2241                                                               bridge_devfn);
2242                         if (info) {
2243                                 iommu = info->iommu;
2244                                 domain = info->domain;
2245                         }
2246                         spin_unlock_irqrestore(&device_domain_lock, flags);
2247                         /* pcie-pci bridge already has a domain, uses it */
2248                         if (info)
2249                                 goto found_domain;
2250                 }
2251         }
2252
2253         iommu = device_to_iommu(dev, &bus, &devfn);
2254         if (!iommu)
2255                 goto error;
2256
2257         /* Allocate and initialize new domain for the device */
2258         domain = alloc_domain(false);
2259         if (!domain)
2260                 goto error;
2261         if (iommu_attach_domain(domain, iommu)) {
2262                 free_domain_mem(domain);
2263                 domain = NULL;
2264                 goto error;
2265         }
2266         free = domain;
2267         if (domain_init(domain, gaw))
2268                 goto error;
2269
2270         /* register pcie-to-pci device */
2271         if (dev_tmp) {
2272                 domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
2273                                               NULL, domain);
2274                 if (!domain)
2275                         goto error;
2276         }
2277
2278 found_domain:
2279         domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2280 error:
2281         if (free != domain)
2282                 domain_exit(free);
2283
2284         return domain;
2285 }
2286
2287 static int iommu_identity_mapping;
2288 #define IDENTMAP_ALL            1
2289 #define IDENTMAP_GFX            2
2290 #define IDENTMAP_AZALIA         4
2291
2292 static int iommu_domain_identity_map(struct dmar_domain *domain,
2293                                      unsigned long long start,
2294                                      unsigned long long end)
2295 {
2296         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2297         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2298
2299         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2300                           dma_to_mm_pfn(last_vpfn))) {
2301                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2302                 return -ENOMEM;
2303         }
2304
2305         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2306                  start, end, domain->id);
2307         /*
2308          * RMRR range might have overlap with physical memory range,
2309          * clear it first
2310          */
2311         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2312
2313         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2314                                   last_vpfn - first_vpfn + 1,
2315                                   DMA_PTE_READ|DMA_PTE_WRITE);
2316 }
2317
2318 static int iommu_prepare_identity_map(struct device *dev,
2319                                       unsigned long long start,
2320                                       unsigned long long end)
2321 {
2322         struct dmar_domain *domain;
2323         int ret;
2324
2325         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2326         if (!domain)
2327                 return -ENOMEM;
2328
2329         /* For _hardware_ passthrough, don't bother. But for software
2330            passthrough, we do it anyway -- it may indicate a memory
2331            range which is reserved in E820, so which didn't get set
2332            up to start with in si_domain */
2333         if (domain == si_domain && hw_pass_through) {
2334                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2335                        dev_name(dev), start, end);
2336                 return 0;
2337         }
2338
2339         printk(KERN_INFO
2340                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2341                dev_name(dev), start, end);
2342         
2343         if (end < start) {
2344                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2345                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2346                         dmi_get_system_info(DMI_BIOS_VENDOR),
2347                         dmi_get_system_info(DMI_BIOS_VERSION),
2348                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2349                 ret = -EIO;
2350                 goto error;
2351         }
2352
2353         if (end >> agaw_to_width(domain->agaw)) {
2354                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2355                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2356                      agaw_to_width(domain->agaw),
2357                      dmi_get_system_info(DMI_BIOS_VENDOR),
2358                      dmi_get_system_info(DMI_BIOS_VERSION),
2359                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2360                 ret = -EIO;
2361                 goto error;
2362         }
2363
2364         ret = iommu_domain_identity_map(domain, start, end);
2365         if (ret)
2366                 goto error;
2367
2368         /* context entry init */
2369         ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2370         if (ret)
2371                 goto error;
2372
2373         return 0;
2374
2375  error:
2376         domain_exit(domain);
2377         return ret;
2378 }
2379
2380 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2381                                          struct device *dev)
2382 {
2383         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2384                 return 0;
2385         return iommu_prepare_identity_map(dev, rmrr->base_address,
2386                                           rmrr->end_address);
2387 }
2388
2389 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2390 static inline void iommu_prepare_isa(void)
2391 {
2392         struct pci_dev *pdev;
2393         int ret;
2394
2395         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2396         if (!pdev)
2397                 return;
2398
2399         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2400         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2401
2402         if (ret)
2403                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2404                        "floppy might not work\n");
2405
2406         pci_dev_put(pdev);
2407 }
2408 #else
2409 static inline void iommu_prepare_isa(void)
2410 {
2411         return;
2412 }
2413 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2414
2415 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2416
2417 static int __init si_domain_init(int hw)
2418 {
2419         struct dmar_drhd_unit *drhd;
2420         struct intel_iommu *iommu;
2421         int nid, ret = 0;
2422
2423         si_domain = alloc_domain(false);
2424         if (!si_domain)
2425                 return -EFAULT;
2426
2427         si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2428
2429         for_each_active_iommu(iommu, drhd) {
2430                 ret = iommu_attach_domain(si_domain, iommu);
2431                 if (ret) {
2432                         domain_exit(si_domain);
2433                         return -EFAULT;
2434                 }
2435         }
2436
2437         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2438                 domain_exit(si_domain);
2439                 return -EFAULT;
2440         }
2441
2442         pr_debug("IOMMU: identity mapping domain is domain %d\n",
2443                  si_domain->id);
2444
2445         if (hw)
2446                 return 0;
2447
2448         for_each_online_node(nid) {
2449                 unsigned long start_pfn, end_pfn;
2450                 int i;
2451
2452                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2453                         ret = iommu_domain_identity_map(si_domain,
2454                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2455                         if (ret)
2456                                 return ret;
2457                 }
2458         }
2459
2460         return 0;
2461 }
2462
2463 static int identity_mapping(struct device *dev)
2464 {
2465         struct device_domain_info *info;
2466
2467         if (likely(!iommu_identity_mapping))
2468                 return 0;
2469
2470         info = dev->archdata.iommu;
2471         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2472                 return (info->domain == si_domain);
2473
2474         return 0;
2475 }
2476
2477 static int domain_add_dev_info(struct dmar_domain *domain,
2478                                struct device *dev, int translation)
2479 {
2480         struct dmar_domain *ndomain;
2481         struct intel_iommu *iommu;
2482         u8 bus, devfn;
2483         int ret;
2484
2485         iommu = device_to_iommu(dev, &bus, &devfn);
2486         if (!iommu)
2487                 return -ENODEV;
2488
2489         ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2490         if (ndomain != domain)
2491                 return -EBUSY;
2492
2493         ret = domain_context_mapping(domain, dev, translation);
2494         if (ret) {
2495                 domain_remove_one_dev_info(domain, dev);
2496                 return ret;
2497         }
2498
2499         return 0;
2500 }
2501
2502 static bool device_has_rmrr(struct device *dev)
2503 {
2504         struct dmar_rmrr_unit *rmrr;
2505         struct device *tmp;
2506         int i;
2507
2508         rcu_read_lock();
2509         for_each_rmrr_units(rmrr) {
2510                 /*
2511                  * Return TRUE if this RMRR contains the device that
2512                  * is passed in.
2513                  */
2514                 for_each_active_dev_scope(rmrr->devices,
2515                                           rmrr->devices_cnt, i, tmp)
2516                         if (tmp == dev) {
2517                                 rcu_read_unlock();
2518                                 return true;
2519                         }
2520         }
2521         rcu_read_unlock();
2522         return false;
2523 }
2524
2525 static int iommu_should_identity_map(struct device *dev, int startup)
2526 {
2527
2528         if (dev_is_pci(dev)) {
2529                 struct pci_dev *pdev = to_pci_dev(dev);
2530
2531                 /*
2532                  * We want to prevent any device associated with an RMRR from
2533                  * getting placed into the SI Domain. This is done because
2534                  * problems exist when devices are moved in and out of domains
2535                  * and their respective RMRR info is lost. We exempt USB devices
2536                  * from this process due to their usage of RMRRs that are known
2537                  * to not be needed after BIOS hand-off to OS.
2538                  */
2539                 if (device_has_rmrr(dev) &&
2540                     (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2541                         return 0;
2542
2543                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2544                         return 1;
2545
2546                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2547                         return 1;
2548
2549                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2550                         return 0;
2551
2552                 /*
2553                  * We want to start off with all devices in the 1:1 domain, and
2554                  * take them out later if we find they can't access all of memory.
2555                  *
2556                  * However, we can't do this for PCI devices behind bridges,
2557                  * because all PCI devices behind the same bridge will end up
2558                  * with the same source-id on their transactions.
2559                  *
2560                  * Practically speaking, we can't change things around for these
2561                  * devices at run-time, because we can't be sure there'll be no
2562                  * DMA transactions in flight for any of their siblings.
2563                  *
2564                  * So PCI devices (unless they're on the root bus) as well as
2565                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2566                  * the 1:1 domain, just in _case_ one of their siblings turns out
2567                  * not to be able to map all of memory.
2568                  */
2569                 if (!pci_is_pcie(pdev)) {
2570                         if (!pci_is_root_bus(pdev->bus))
2571                                 return 0;
2572                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2573                                 return 0;
2574                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2575                         return 0;
2576         } else {
2577                 if (device_has_rmrr(dev))
2578                         return 0;
2579         }
2580
2581         /*
2582          * At boot time, we don't yet know if devices will be 64-bit capable.
2583          * Assume that they will — if they turn out not to be, then we can
2584          * take them out of the 1:1 domain later.
2585          */
2586         if (!startup) {
2587                 /*
2588                  * If the device's dma_mask is less than the system's memory
2589                  * size then this is not a candidate for identity mapping.
2590                  */
2591                 u64 dma_mask = *dev->dma_mask;
2592
2593                 if (dev->coherent_dma_mask &&
2594                     dev->coherent_dma_mask < dma_mask)
2595                         dma_mask = dev->coherent_dma_mask;
2596
2597                 return dma_mask >= dma_get_required_mask(dev);
2598         }
2599
2600         return 1;
2601 }
2602
2603 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2604 {
2605         int ret;
2606
2607         if (!iommu_should_identity_map(dev, 1))
2608                 return 0;
2609
2610         ret = domain_add_dev_info(si_domain, dev,
2611                                   hw ? CONTEXT_TT_PASS_THROUGH :
2612                                        CONTEXT_TT_MULTI_LEVEL);
2613         if (!ret)
2614                 pr_info("IOMMU: %s identity mapping for device %s\n",
2615                         hw ? "hardware" : "software", dev_name(dev));
2616         else if (ret == -ENODEV)
2617                 /* device not associated with an iommu */
2618                 ret = 0;
2619
2620         return ret;
2621 }
2622
2623
2624 static int __init iommu_prepare_static_identity_mapping(int hw)
2625 {
2626         struct pci_dev *pdev = NULL;
2627         struct dmar_drhd_unit *drhd;
2628         struct intel_iommu *iommu;
2629         struct device *dev;
2630         int i;
2631         int ret = 0;
2632
2633         ret = si_domain_init(hw);
2634         if (ret)
2635                 return -EFAULT;
2636
2637         for_each_pci_dev(pdev) {
2638                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2639                 if (ret)
2640                         return ret;
2641         }
2642
2643         for_each_active_iommu(iommu, drhd)
2644                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2645                         struct acpi_device_physical_node *pn;
2646                         struct acpi_device *adev;
2647
2648                         if (dev->bus != &acpi_bus_type)
2649                                 continue;
2650                                 
2651                         adev= to_acpi_device(dev);
2652                         mutex_lock(&adev->physical_node_lock);
2653                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2654                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2655                                 if (ret)
2656                                         break;
2657                         }
2658                         mutex_unlock(&adev->physical_node_lock);
2659                         if (ret)
2660                                 return ret;
2661                 }
2662
2663         return 0;
2664 }
2665
2666 static int __init init_dmars(void)
2667 {
2668         struct dmar_drhd_unit *drhd;
2669         struct dmar_rmrr_unit *rmrr;
2670         struct device *dev;
2671         struct intel_iommu *iommu;
2672         int i, ret;
2673
2674         /*
2675          * for each drhd
2676          *    allocate root
2677          *    initialize and program root entry to not present
2678          * endfor
2679          */
2680         for_each_drhd_unit(drhd) {
2681                 /*
2682                  * lock not needed as this is only incremented in the single
2683                  * threaded kernel __init code path all other access are read
2684                  * only
2685                  */
2686                 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2687                         g_num_of_iommus++;
2688                         continue;
2689                 }
2690                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2691                           IOMMU_UNITS_SUPPORTED);
2692         }
2693
2694         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2695                         GFP_KERNEL);
2696         if (!g_iommus) {
2697                 printk(KERN_ERR "Allocating global iommu array failed\n");
2698                 ret = -ENOMEM;
2699                 goto error;
2700         }
2701
2702         deferred_flush = kzalloc(g_num_of_iommus *
2703                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2704         if (!deferred_flush) {
2705                 ret = -ENOMEM;
2706                 goto free_g_iommus;
2707         }
2708
2709         for_each_active_iommu(iommu, drhd) {
2710                 g_iommus[iommu->seq_id] = iommu;
2711
2712                 ret = iommu_init_domains(iommu);
2713                 if (ret)
2714                         goto free_iommu;
2715
2716                 /*
2717                  * TBD:
2718                  * we could share the same root & context tables
2719                  * among all IOMMU's. Need to Split it later.
2720                  */
2721                 ret = iommu_alloc_root_entry(iommu);
2722                 if (ret) {
2723                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2724                         goto free_iommu;
2725                 }
2726                 if (!ecap_pass_through(iommu->ecap))
2727                         hw_pass_through = 0;
2728         }
2729
2730         /*
2731          * Start from the sane iommu hardware state.
2732          */
2733         for_each_active_iommu(iommu, drhd) {
2734                 /*
2735                  * If the queued invalidation is already initialized by us
2736                  * (for example, while enabling interrupt-remapping) then
2737                  * we got the things already rolling from a sane state.
2738                  */
2739                 if (iommu->qi)
2740                         continue;
2741
2742                 /*
2743                  * Clear any previous faults.
2744                  */
2745                 dmar_fault(-1, iommu);
2746                 /*
2747                  * Disable queued invalidation if supported and already enabled
2748                  * before OS handover.
2749                  */
2750                 dmar_disable_qi(iommu);
2751         }
2752
2753         for_each_active_iommu(iommu, drhd) {
2754                 if (dmar_enable_qi(iommu)) {
2755                         /*
2756                          * Queued Invalidate not enabled, use Register Based
2757                          * Invalidate
2758                          */
2759                         iommu->flush.flush_context = __iommu_flush_context;
2760                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2761                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2762                                "invalidation\n",
2763                                 iommu->seq_id,
2764                                (unsigned long long)drhd->reg_base_addr);
2765                 } else {
2766                         iommu->flush.flush_context = qi_flush_context;
2767                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2768                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2769                                "invalidation\n",
2770                                 iommu->seq_id,
2771                                (unsigned long long)drhd->reg_base_addr);
2772                 }
2773         }
2774
2775         if (iommu_pass_through)
2776                 iommu_identity_mapping |= IDENTMAP_ALL;
2777
2778 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2779         iommu_identity_mapping |= IDENTMAP_GFX;
2780 #endif
2781
2782         check_tylersburg_isoch();
2783
2784         /*
2785          * If pass through is not set or not enabled, setup context entries for
2786          * identity mappings for rmrr, gfx, and isa and may fall back to static
2787          * identity mapping if iommu_identity_mapping is set.
2788          */
2789         if (iommu_identity_mapping) {
2790                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2791                 if (ret) {
2792                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2793                         goto free_iommu;
2794                 }
2795         }
2796         /*
2797          * For each rmrr
2798          *   for each dev attached to rmrr
2799          *   do
2800          *     locate drhd for dev, alloc domain for dev
2801          *     allocate free domain
2802          *     allocate page table entries for rmrr
2803          *     if context not allocated for bus
2804          *           allocate and init context
2805          *           set present in root table for this bus
2806          *     init context with domain, translation etc
2807          *    endfor
2808          * endfor
2809          */
2810         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2811         for_each_rmrr_units(rmrr) {
2812                 /* some BIOS lists non-exist devices in DMAR table. */
2813                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2814                                           i, dev) {
2815                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
2816                         if (ret)
2817                                 printk(KERN_ERR
2818                                        "IOMMU: mapping reserved region failed\n");
2819                 }
2820         }
2821
2822         iommu_prepare_isa();
2823
2824         /*
2825          * for each drhd
2826          *   enable fault log
2827          *   global invalidate context cache
2828          *   global invalidate iotlb
2829          *   enable translation
2830          */
2831         for_each_iommu(iommu, drhd) {
2832                 if (drhd->ignored) {
2833                         /*
2834                          * we always have to disable PMRs or DMA may fail on
2835                          * this device
2836                          */
2837                         if (force_on)
2838                                 iommu_disable_protect_mem_regions(iommu);
2839                         continue;
2840                 }
2841
2842                 iommu_flush_write_buffer(iommu);
2843
2844                 ret = dmar_set_interrupt(iommu);
2845                 if (ret)
2846                         goto free_iommu;
2847
2848                 iommu_set_root_entry(iommu);
2849
2850                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2851                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2852
2853                 ret = iommu_enable_translation(iommu);
2854                 if (ret)
2855                         goto free_iommu;
2856
2857                 iommu_disable_protect_mem_regions(iommu);
2858         }
2859
2860         return 0;
2861
2862 free_iommu:
2863         for_each_active_iommu(iommu, drhd)
2864                 free_dmar_iommu(iommu);
2865         kfree(deferred_flush);
2866 free_g_iommus:
2867         kfree(g_iommus);
2868 error:
2869         return ret;
2870 }
2871
2872 /* This takes a number of _MM_ pages, not VTD pages */
2873 static struct iova *intel_alloc_iova(struct device *dev,
2874                                      struct dmar_domain *domain,
2875                                      unsigned long nrpages, uint64_t dma_mask)
2876 {
2877         struct iova *iova = NULL;
2878
2879         /* Restrict dma_mask to the width that the iommu can handle */
2880         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2881
2882         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2883                 /*
2884                  * First try to allocate an io virtual address in
2885                  * DMA_BIT_MASK(32) and if that fails then try allocating
2886                  * from higher range
2887                  */
2888                 iova = alloc_iova(&domain->iovad, nrpages,
2889                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2890                 if (iova)
2891                         return iova;
2892         }
2893         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2894         if (unlikely(!iova)) {
2895                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2896                        nrpages, dev_name(dev));
2897                 return NULL;
2898         }
2899
2900         return iova;
2901 }
2902
2903 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2904 {
2905         struct dmar_domain *domain;
2906         int ret;
2907
2908         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2909         if (!domain) {
2910                 printk(KERN_ERR "Allocating domain for %s failed",
2911                        dev_name(dev));
2912                 return NULL;
2913         }
2914
2915         /* make sure context mapping is ok */
2916         if (unlikely(!domain_context_mapped(dev))) {
2917                 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2918                 if (ret) {
2919                         printk(KERN_ERR "Domain context map for %s failed",
2920                                dev_name(dev));
2921                         return NULL;
2922                 }
2923         }
2924
2925         return domain;
2926 }
2927
2928 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2929 {
2930         struct device_domain_info *info;
2931
2932         /* No lock here, assumes no domain exit in normal case */
2933         info = dev->archdata.iommu;
2934         if (likely(info))
2935                 return info->domain;
2936
2937         return __get_valid_domain_for_dev(dev);
2938 }
2939
2940 static int iommu_dummy(struct device *dev)
2941 {
2942         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2943 }
2944
2945 /* Check if the dev needs to go through non-identity map and unmap process.*/
2946 static int iommu_no_mapping(struct device *dev)
2947 {
2948         int found;
2949
2950         if (iommu_dummy(dev))
2951                 return 1;
2952
2953         if (!iommu_identity_mapping)
2954                 return 0;
2955
2956         found = identity_mapping(dev);
2957         if (found) {
2958                 if (iommu_should_identity_map(dev, 0))
2959                         return 1;
2960                 else {
2961                         /*
2962                          * 32 bit DMA is removed from si_domain and fall back
2963                          * to non-identity mapping.
2964                          */
2965                         domain_remove_one_dev_info(si_domain, dev);
2966                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2967                                dev_name(dev));
2968                         return 0;
2969                 }
2970         } else {
2971                 /*
2972                  * In case of a detached 64 bit DMA device from vm, the device
2973                  * is put into si_domain for identity mapping.
2974                  */
2975                 if (iommu_should_identity_map(dev, 0)) {
2976                         int ret;
2977                         ret = domain_add_dev_info(si_domain, dev,
2978                                                   hw_pass_through ?
2979                                                   CONTEXT_TT_PASS_THROUGH :
2980                                                   CONTEXT_TT_MULTI_LEVEL);
2981                         if (!ret) {
2982                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
2983                                        dev_name(dev));
2984                                 return 1;
2985                         }
2986                 }
2987         }
2988
2989         return 0;
2990 }
2991
2992 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
2993                                      size_t size, int dir, u64 dma_mask)
2994 {
2995         struct dmar_domain *domain;
2996         phys_addr_t start_paddr;
2997         struct iova *iova;
2998         int prot = 0;
2999         int ret;
3000         struct intel_iommu *iommu;
3001         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3002
3003         BUG_ON(dir == DMA_NONE);
3004
3005         if (iommu_no_mapping(dev))
3006                 return paddr;
3007
3008         domain = get_valid_domain_for_dev(dev);
3009         if (!domain)
3010                 return 0;
3011
3012         iommu = domain_get_iommu(domain);
3013         size = aligned_nrpages(paddr, size);
3014
3015         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3016         if (!iova)
3017                 goto error;
3018
3019         /*
3020          * Check if DMAR supports zero-length reads on write only
3021          * mappings..
3022          */
3023         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3024                         !cap_zlr(iommu->cap))
3025                 prot |= DMA_PTE_READ;
3026         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3027                 prot |= DMA_PTE_WRITE;
3028         /*
3029          * paddr - (paddr + size) might be partial page, we should map the whole
3030          * page.  Note: if two part of one page are separately mapped, we
3031          * might have two guest_addr mapping to the same host paddr, but this
3032          * is not a big problem
3033          */
3034         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3035                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3036         if (ret)
3037                 goto error;
3038
3039         /* it's a non-present to present mapping. Only flush if caching mode */
3040         if (cap_caching_mode(iommu->cap))
3041                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3042         else
3043                 iommu_flush_write_buffer(iommu);
3044
3045         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3046         start_paddr += paddr & ~PAGE_MASK;
3047         return start_paddr;
3048
3049 error:
3050         if (iova)
3051                 __free_iova(&domain->iovad, iova);
3052         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3053                 dev_name(dev), size, (unsigned long long)paddr, dir);
3054         return 0;
3055 }
3056
3057 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3058                                  unsigned long offset, size_t size,
3059                                  enum dma_data_direction dir,
3060                                  struct dma_attrs *attrs)
3061 {
3062         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3063                                   dir, *dev->dma_mask);
3064 }
3065
3066 static void flush_unmaps(void)
3067 {
3068         int i, j;
3069
3070         timer_on = 0;
3071
3072         /* just flush them all */
3073         for (i = 0; i < g_num_of_iommus; i++) {
3074                 struct intel_iommu *iommu = g_iommus[i];
3075                 if (!iommu)
3076                         continue;
3077
3078                 if (!deferred_flush[i].next)
3079                         continue;
3080
3081                 /* In caching mode, global flushes turn emulation expensive */
3082                 if (!cap_caching_mode(iommu->cap))
3083                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3084                                          DMA_TLB_GLOBAL_FLUSH);
3085                 for (j = 0; j < deferred_flush[i].next; j++) {
3086                         unsigned long mask;
3087                         struct iova *iova = deferred_flush[i].iova[j];
3088                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3089
3090                         /* On real hardware multiple invalidations are expensive */
3091                         if (cap_caching_mode(iommu->cap))
3092                                 iommu_flush_iotlb_psi(iommu, domain->id,
3093                                         iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
3094                                         !deferred_flush[i].freelist[j], 0);
3095                         else {
3096                                 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
3097                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3098                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3099                         }
3100                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3101                         if (deferred_flush[i].freelist[j])
3102                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3103                 }
3104                 deferred_flush[i].next = 0;
3105         }
3106
3107         list_size = 0;
3108 }
3109
3110 static void flush_unmaps_timeout(unsigned long data)
3111 {
3112         unsigned long flags;
3113
3114         spin_lock_irqsave(&async_umap_flush_lock, flags);
3115         flush_unmaps();
3116         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3117 }
3118
3119 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3120 {
3121         unsigned long flags;
3122         int next, iommu_id;
3123         struct intel_iommu *iommu;
3124
3125         spin_lock_irqsave(&async_umap_flush_lock, flags);
3126         if (list_size == HIGH_WATER_MARK)
3127                 flush_unmaps();
3128
3129         iommu = domain_get_iommu(dom);
3130         iommu_id = iommu->seq_id;
3131
3132         next = deferred_flush[iommu_id].next;
3133         deferred_flush[iommu_id].domain[next] = dom;
3134         deferred_flush[iommu_id].iova[next] = iova;
3135         deferred_flush[iommu_id].freelist[next] = freelist;
3136         deferred_flush[iommu_id].next++;
3137
3138         if (!timer_on) {
3139                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3140                 timer_on = 1;
3141         }
3142         list_size++;
3143         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3144 }
3145
3146 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3147                              size_t size, enum dma_data_direction dir,
3148                              struct dma_attrs *attrs)
3149 {
3150         struct dmar_domain *domain;
3151         unsigned long start_pfn, last_pfn;
3152         struct iova *iova;
3153         struct intel_iommu *iommu;
3154         struct page *freelist;
3155
3156         if (iommu_no_mapping(dev))
3157                 return;
3158
3159         domain = find_domain(dev);
3160         BUG_ON(!domain);
3161
3162         iommu = domain_get_iommu(domain);
3163
3164         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3165         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3166                       (unsigned long long)dev_addr))
3167                 return;
3168
3169         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3170         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3171
3172         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3173                  dev_name(dev), start_pfn, last_pfn);
3174
3175         freelist = domain_unmap(domain, start_pfn, last_pfn);
3176
3177         if (intel_iommu_strict) {
3178                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3179                                       last_pfn - start_pfn + 1, !freelist, 0);
3180                 /* free iova */
3181                 __free_iova(&domain->iovad, iova);
3182                 dma_free_pagelist(freelist);
3183         } else {
3184                 add_unmap(domain, iova, freelist);
3185                 /*
3186                  * queue up the release of the unmap to save the 1/6th of the
3187                  * cpu used up by the iotlb flush operation...
3188                  */
3189         }
3190 }
3191
3192 static void *intel_alloc_coherent(struct device *dev, size_t size,
3193                                   dma_addr_t *dma_handle, gfp_t flags,
3194                                   struct dma_attrs *attrs)
3195 {
3196         struct page *page = NULL;
3197         int order;
3198
3199         size = PAGE_ALIGN(size);
3200         order = get_order(size);
3201
3202         if (!iommu_no_mapping(dev))
3203                 flags &= ~(GFP_DMA | GFP_DMA32);
3204         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3205                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3206                         flags |= GFP_DMA;
3207                 else
3208                         flags |= GFP_DMA32;
3209         }
3210
3211         if (flags & __GFP_WAIT) {
3212                 unsigned int count = size >> PAGE_SHIFT;
3213
3214                 page = dma_alloc_from_contiguous(dev, count, order);
3215                 if (page && iommu_no_mapping(dev) &&
3216                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3217                         dma_release_from_contiguous(dev, page, count);
3218                         page = NULL;
3219                 }
3220         }
3221
3222         if (!page)
3223                 page = alloc_pages(flags, order);
3224         if (!page)
3225                 return NULL;
3226         memset(page_address(page), 0, size);
3227
3228         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3229                                          DMA_BIDIRECTIONAL,
3230                                          dev->coherent_dma_mask);
3231         if (*dma_handle)
3232                 return page_address(page);
3233         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3234                 __free_pages(page, order);
3235
3236         return NULL;
3237 }
3238
3239 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3240                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3241 {
3242         int order;
3243         struct page *page = virt_to_page(vaddr);
3244
3245         size = PAGE_ALIGN(size);
3246         order = get_order(size);
3247
3248         intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3249         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3250                 __free_pages(page, order);
3251 }
3252
3253 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3254                            int nelems, enum dma_data_direction dir,
3255                            struct dma_attrs *attrs)
3256 {
3257         struct dmar_domain *domain;
3258         unsigned long start_pfn, last_pfn;
3259         struct iova *iova;
3260         struct intel_iommu *iommu;
3261         struct page *freelist;
3262
3263         if (iommu_no_mapping(dev))
3264                 return;
3265
3266         domain = find_domain(dev);
3267         BUG_ON(!domain);
3268
3269         iommu = domain_get_iommu(domain);
3270
3271         iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3272         if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3273                       (unsigned long long)sglist[0].dma_address))
3274                 return;
3275
3276         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3277         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3278
3279         freelist = domain_unmap(domain, start_pfn, last_pfn);
3280
3281         if (intel_iommu_strict) {
3282                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3283                                       last_pfn - start_pfn + 1, !freelist, 0);
3284                 /* free iova */
3285                 __free_iova(&domain->iovad, iova);
3286                 dma_free_pagelist(freelist);
3287         } else {
3288                 add_unmap(domain, iova, freelist);
3289                 /*
3290                  * queue up the release of the unmap to save the 1/6th of the
3291                  * cpu used up by the iotlb flush operation...
3292                  */
3293         }
3294 }
3295
3296 static int intel_nontranslate_map_sg(struct device *hddev,
3297         struct scatterlist *sglist, int nelems, int dir)
3298 {
3299         int i;
3300         struct scatterlist *sg;
3301
3302         for_each_sg(sglist, sg, nelems, i) {
3303                 BUG_ON(!sg_page(sg));
3304                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3305                 sg->dma_length = sg->length;
3306         }
3307         return nelems;
3308 }
3309
3310 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3311                         enum dma_data_direction dir, struct dma_attrs *attrs)
3312 {
3313         int i;
3314         struct dmar_domain *domain;
3315         size_t size = 0;
3316         int prot = 0;
3317         struct iova *iova = NULL;
3318         int ret;
3319         struct scatterlist *sg;
3320         unsigned long start_vpfn;
3321         struct intel_iommu *iommu;
3322
3323         BUG_ON(dir == DMA_NONE);
3324         if (iommu_no_mapping(dev))
3325                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3326
3327         domain = get_valid_domain_for_dev(dev);
3328         if (!domain)
3329                 return 0;
3330
3331         iommu = domain_get_iommu(domain);
3332
3333         for_each_sg(sglist, sg, nelems, i)
3334                 size += aligned_nrpages(sg->offset, sg->length);
3335
3336         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3337                                 *dev->dma_mask);
3338         if (!iova) {
3339                 sglist->dma_length = 0;
3340                 return 0;
3341         }
3342
3343         /*
3344          * Check if DMAR supports zero-length reads on write only
3345          * mappings..
3346          */
3347         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3348                         !cap_zlr(iommu->cap))
3349                 prot |= DMA_PTE_READ;
3350         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3351                 prot |= DMA_PTE_WRITE;
3352
3353         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3354
3355         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3356         if (unlikely(ret)) {
3357                 /*  clear the page */
3358                 dma_pte_clear_range(domain, start_vpfn,
3359                                     start_vpfn + size - 1);
3360                 /* free page tables */
3361                 dma_pte_free_pagetable(domain, start_vpfn,
3362                                        start_vpfn + size - 1);
3363                 /* free iova */
3364                 __free_iova(&domain->iovad, iova);
3365                 return 0;
3366         }
3367
3368         /* it's a non-present to present mapping. Only flush if caching mode */
3369         if (cap_caching_mode(iommu->cap))
3370                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3371         else
3372                 iommu_flush_write_buffer(iommu);
3373
3374         return nelems;
3375 }
3376
3377 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3378 {
3379         return !dma_addr;
3380 }
3381
3382 struct dma_map_ops intel_dma_ops = {
3383         .alloc = intel_alloc_coherent,
3384         .free = intel_free_coherent,
3385         .map_sg = intel_map_sg,
3386         .unmap_sg = intel_unmap_sg,
3387         .map_page = intel_map_page,
3388         .unmap_page = intel_unmap_page,
3389         .mapping_error = intel_mapping_error,
3390 };
3391
3392 static inline int iommu_domain_cache_init(void)
3393 {
3394         int ret = 0;
3395
3396         iommu_domain_cache = kmem_cache_create("iommu_domain",
3397                                          sizeof(struct dmar_domain),
3398                                          0,
3399                                          SLAB_HWCACHE_ALIGN,
3400
3401                                          NULL);
3402         if (!iommu_domain_cache) {
3403                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3404                 ret = -ENOMEM;
3405         }
3406
3407         return ret;
3408 }
3409
3410 static inline int iommu_devinfo_cache_init(void)
3411 {
3412         int ret = 0;
3413
3414         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3415                                          sizeof(struct device_domain_info),
3416                                          0,
3417                                          SLAB_HWCACHE_ALIGN,
3418                                          NULL);
3419         if (!iommu_devinfo_cache) {
3420                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3421                 ret = -ENOMEM;
3422         }
3423
3424         return ret;
3425 }
3426
3427 static inline int iommu_iova_cache_init(void)
3428 {
3429         int ret = 0;
3430
3431         iommu_iova_cache = kmem_cache_create("iommu_iova",
3432                                          sizeof(struct iova),
3433                                          0,
3434                                          SLAB_HWCACHE_ALIGN,
3435                                          NULL);
3436         if (!iommu_iova_cache) {
3437                 printk(KERN_ERR "Couldn't create iova cache\n");
3438                 ret = -ENOMEM;
3439         }
3440
3441         return ret;
3442 }
3443
3444 static int __init iommu_init_mempool(void)
3445 {
3446         int ret;
3447         ret = iommu_iova_cache_init();
3448         if (ret)
3449                 return ret;
3450
3451         ret = iommu_domain_cache_init();
3452         if (ret)
3453                 goto domain_error;
3454
3455         ret = iommu_devinfo_cache_init();
3456         if (!ret)
3457                 return ret;
3458
3459         kmem_cache_destroy(iommu_domain_cache);
3460 domain_error:
3461         kmem_cache_destroy(iommu_iova_cache);
3462
3463         return -ENOMEM;
3464 }
3465
3466 static void __init iommu_exit_mempool(void)
3467 {
3468         kmem_cache_destroy(iommu_devinfo_cache);
3469         kmem_cache_destroy(iommu_domain_cache);
3470         kmem_cache_destroy(iommu_iova_cache);
3471
3472 }
3473
3474 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3475 {
3476         struct dmar_drhd_unit *drhd;
3477         u32 vtbar;
3478         int rc;
3479
3480         /* We know that this device on this chipset has its own IOMMU.
3481          * If we find it under a different IOMMU, then the BIOS is lying
3482          * to us. Hope that the IOMMU for this device is actually
3483          * disabled, and it needs no translation...
3484          */
3485         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3486         if (rc) {
3487                 /* "can't" happen */
3488                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3489                 return;
3490         }
3491         vtbar &= 0xffff0000;
3492
3493         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3494         drhd = dmar_find_matched_drhd_unit(pdev);
3495         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3496                             TAINT_FIRMWARE_WORKAROUND,
3497                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3498                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3499 }
3500 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3501
3502 static void __init init_no_remapping_devices(void)
3503 {
3504         struct dmar_drhd_unit *drhd;
3505         struct device *dev;
3506         int i;
3507
3508         for_each_drhd_unit(drhd) {
3509                 if (!drhd->include_all) {
3510                         for_each_active_dev_scope(drhd->devices,
3511                                                   drhd->devices_cnt, i, dev)
3512                                 break;
3513                         /* ignore DMAR unit if no devices exist */
3514                         if (i == drhd->devices_cnt)
3515                                 drhd->ignored = 1;
3516                 }
3517         }
3518
3519         for_each_active_drhd_unit(drhd) {
3520                 if (drhd->include_all)
3521                         continue;
3522
3523                 for_each_active_dev_scope(drhd->devices,
3524                                           drhd->devices_cnt, i, dev)
3525                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3526                                 break;
3527                 if (i < drhd->devices_cnt)
3528                         continue;
3529
3530                 /* This IOMMU has *only* gfx devices. Either bypass it or
3531                    set the gfx_mapped flag, as appropriate */
3532                 if (dmar_map_gfx) {
3533                         intel_iommu_gfx_mapped = 1;
3534                 } else {
3535                         drhd->ignored = 1;
3536                         for_each_active_dev_scope(drhd->devices,
3537                                                   drhd->devices_cnt, i, dev)
3538                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3539                 }
3540         }
3541 }
3542
3543 #ifdef CONFIG_SUSPEND
3544 static int init_iommu_hw(void)
3545 {
3546         struct dmar_drhd_unit *drhd;
3547         struct intel_iommu *iommu = NULL;
3548
3549         for_each_active_iommu(iommu, drhd)
3550                 if (iommu->qi)
3551                         dmar_reenable_qi(iommu);
3552
3553         for_each_iommu(iommu, drhd) {
3554                 if (drhd->ignored) {
3555                         /*
3556                          * we always have to disable PMRs or DMA may fail on
3557                          * this device
3558                          */
3559                         if (force_on)
3560                                 iommu_disable_protect_mem_regions(iommu);
3561                         continue;
3562                 }
3563         
3564                 iommu_flush_write_buffer(iommu);
3565
3566                 iommu_set_root_entry(iommu);
3567
3568                 iommu->flush.flush_context(iommu, 0, 0, 0,
3569                                            DMA_CCMD_GLOBAL_INVL);
3570                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3571                                          DMA_TLB_GLOBAL_FLUSH);
3572                 if (iommu_enable_translation(iommu))
3573                         return 1;
3574                 iommu_disable_protect_mem_regions(iommu);
3575         }
3576
3577         return 0;
3578 }
3579
3580 static void iommu_flush_all(void)
3581 {
3582         struct dmar_drhd_unit *drhd;
3583         struct intel_iommu *iommu;
3584
3585         for_each_active_iommu(iommu, drhd) {
3586                 iommu->flush.flush_context(iommu, 0, 0, 0,
3587                                            DMA_CCMD_GLOBAL_INVL);
3588                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3589                                          DMA_TLB_GLOBAL_FLUSH);
3590         }
3591 }
3592
3593 static int iommu_suspend(void)
3594 {
3595         struct dmar_drhd_unit *drhd;
3596         struct intel_iommu *iommu = NULL;
3597         unsigned long flag;
3598
3599         for_each_active_iommu(iommu, drhd) {
3600                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3601                                                  GFP_ATOMIC);
3602                 if (!iommu->iommu_state)
3603                         goto nomem;
3604         }
3605
3606         iommu_flush_all();
3607
3608         for_each_active_iommu(iommu, drhd) {
3609                 iommu_disable_translation(iommu);
3610
3611                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3612
3613                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3614                         readl(iommu->reg + DMAR_FECTL_REG);
3615                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3616                         readl(iommu->reg + DMAR_FEDATA_REG);
3617                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3618                         readl(iommu->reg + DMAR_FEADDR_REG);
3619                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3620                         readl(iommu->reg + DMAR_FEUADDR_REG);
3621
3622                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3623         }
3624         return 0;
3625
3626 nomem:
3627         for_each_active_iommu(iommu, drhd)
3628                 kfree(iommu->iommu_state);
3629
3630         return -ENOMEM;
3631 }
3632
3633 static void iommu_resume(void)
3634 {
3635         struct dmar_drhd_unit *drhd;
3636         struct intel_iommu *iommu = NULL;
3637         unsigned long flag;
3638
3639         if (init_iommu_hw()) {
3640                 if (force_on)
3641                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3642                 else
3643                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3644                 return;
3645         }
3646
3647         for_each_active_iommu(iommu, drhd) {
3648
3649                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3650
3651                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3652                         iommu->reg + DMAR_FECTL_REG);
3653                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3654                         iommu->reg + DMAR_FEDATA_REG);
3655                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3656                         iommu->reg + DMAR_FEADDR_REG);
3657                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3658                         iommu->reg + DMAR_FEUADDR_REG);
3659
3660                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3661         }
3662
3663         for_each_active_iommu(iommu, drhd)
3664                 kfree(iommu->iommu_state);
3665 }
3666
3667 static struct syscore_ops iommu_syscore_ops = {
3668         .resume         = iommu_resume,
3669         .suspend        = iommu_suspend,
3670 };
3671
3672 static void __init init_iommu_pm_ops(void)
3673 {
3674         register_syscore_ops(&iommu_syscore_ops);
3675 }
3676
3677 #else
3678 static inline void init_iommu_pm_ops(void) {}
3679 #endif  /* CONFIG_PM */
3680
3681
3682 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3683 {
3684         struct acpi_dmar_reserved_memory *rmrr;
3685         struct dmar_rmrr_unit *rmrru;
3686
3687         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3688         if (!rmrru)
3689                 return -ENOMEM;
3690
3691         rmrru->hdr = header;
3692         rmrr = (struct acpi_dmar_reserved_memory *)header;
3693         rmrru->base_address = rmrr->base_address;
3694         rmrru->end_address = rmrr->end_address;
3695         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3696                                 ((void *)rmrr) + rmrr->header.length,
3697                                 &rmrru->devices_cnt);
3698         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3699                 kfree(rmrru);
3700                 return -ENOMEM;
3701         }
3702
3703         list_add(&rmrru->list, &dmar_rmrr_units);
3704
3705         return 0;
3706 }
3707
3708 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3709 {
3710         struct acpi_dmar_atsr *atsr;
3711         struct dmar_atsr_unit *atsru;
3712
3713         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3714         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3715         if (!atsru)
3716                 return -ENOMEM;
3717
3718         atsru->hdr = hdr;
3719         atsru->include_all = atsr->flags & 0x1;
3720         if (!atsru->include_all) {
3721                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3722                                 (void *)atsr + atsr->header.length,
3723                                 &atsru->devices_cnt);
3724                 if (atsru->devices_cnt && atsru->devices == NULL) {
3725                         kfree(atsru);
3726                         return -ENOMEM;
3727                 }
3728         }
3729
3730         list_add_rcu(&atsru->list, &dmar_atsr_units);
3731
3732         return 0;
3733 }
3734
3735 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3736 {
3737         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3738         kfree(atsru);
3739 }
3740
3741 static void intel_iommu_free_dmars(void)
3742 {
3743         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3744         struct dmar_atsr_unit *atsru, *atsr_n;
3745
3746         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3747                 list_del(&rmrru->list);
3748                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3749                 kfree(rmrru);
3750         }
3751
3752         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3753                 list_del(&atsru->list);
3754                 intel_iommu_free_atsr(atsru);
3755         }
3756 }
3757
3758 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3759 {
3760         int i, ret = 1;
3761         struct pci_bus *bus;
3762         struct pci_dev *bridge = NULL;
3763         struct device *tmp;
3764         struct acpi_dmar_atsr *atsr;
3765         struct dmar_atsr_unit *atsru;
3766
3767         dev = pci_physfn(dev);
3768         for (bus = dev->bus; bus; bus = bus->parent) {
3769                 bridge = bus->self;
3770                 if (!bridge || !pci_is_pcie(bridge) ||
3771                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3772                         return 0;
3773                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3774                         break;
3775         }
3776         if (!bridge)
3777                 return 0;
3778
3779         rcu_read_lock();
3780         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3781                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3782                 if (atsr->segment != pci_domain_nr(dev->bus))
3783                         continue;
3784
3785                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3786                         if (tmp == &bridge->dev)
3787                                 goto out;
3788
3789                 if (atsru->include_all)
3790                         goto out;
3791         }
3792         ret = 0;
3793 out:
3794         rcu_read_unlock();
3795
3796         return ret;
3797 }
3798
3799 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3800 {
3801         int ret = 0;
3802         struct dmar_rmrr_unit *rmrru;
3803         struct dmar_atsr_unit *atsru;
3804         struct acpi_dmar_atsr *atsr;
3805         struct acpi_dmar_reserved_memory *rmrr;
3806
3807         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3808                 return 0;
3809
3810         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3811                 rmrr = container_of(rmrru->hdr,
3812                                     struct acpi_dmar_reserved_memory, header);
3813                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3814                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3815                                 ((void *)rmrr) + rmrr->header.length,
3816                                 rmrr->segment, rmrru->devices,
3817                                 rmrru->devices_cnt);
3818                         if(ret < 0)
3819                                 return ret;
3820                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3821                         dmar_remove_dev_scope(info, rmrr->segment,
3822                                 rmrru->devices, rmrru->devices_cnt);
3823                 }
3824         }
3825
3826         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3827                 if (atsru->include_all)
3828                         continue;
3829
3830                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3831                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3832                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3833                                         (void *)atsr + atsr->header.length,
3834                                         atsr->segment, atsru->devices,
3835                                         atsru->devices_cnt);
3836                         if (ret > 0)
3837                                 break;
3838                         else if(ret < 0)
3839                                 return ret;
3840                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3841                         if (dmar_remove_dev_scope(info, atsr->segment,
3842                                         atsru->devices, atsru->devices_cnt))
3843                                 break;
3844                 }
3845         }
3846
3847         return 0;
3848 }
3849
3850 /*
3851  * Here we only respond to action of unbound device from driver.
3852  *
3853  * Added device is not attached to its DMAR domain here yet. That will happen
3854  * when mapping the device to iova.
3855  */
3856 static int device_notifier(struct notifier_block *nb,
3857                                   unsigned long action, void *data)
3858 {
3859         struct device *dev = data;
3860         struct dmar_domain *domain;
3861
3862         if (iommu_dummy(dev))
3863                 return 0;
3864
3865         if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3866             action != BUS_NOTIFY_DEL_DEVICE)
3867                 return 0;
3868
3869         domain = find_domain(dev);
3870         if (!domain)
3871                 return 0;
3872
3873         down_read(&dmar_global_lock);
3874         domain_remove_one_dev_info(domain, dev);
3875         if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3876             !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3877             list_empty(&domain->devices))
3878                 domain_exit(domain);
3879         up_read(&dmar_global_lock);
3880
3881         return 0;
3882 }
3883
3884 static struct notifier_block device_nb = {
3885         .notifier_call = device_notifier,
3886 };
3887
3888 static int intel_iommu_memory_notifier(struct notifier_block *nb,
3889                                        unsigned long val, void *v)
3890 {
3891         struct memory_notify *mhp = v;
3892         unsigned long long start, end;
3893         unsigned long start_vpfn, last_vpfn;
3894
3895         switch (val) {
3896         case MEM_GOING_ONLINE:
3897                 start = mhp->start_pfn << PAGE_SHIFT;
3898                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3899                 if (iommu_domain_identity_map(si_domain, start, end)) {
3900                         pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3901                                 start, end);
3902                         return NOTIFY_BAD;
3903                 }
3904                 break;
3905
3906         case MEM_OFFLINE:
3907         case MEM_CANCEL_ONLINE:
3908                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3909                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3910                 while (start_vpfn <= last_vpfn) {
3911                         struct iova *iova;
3912                         struct dmar_drhd_unit *drhd;
3913                         struct intel_iommu *iommu;
3914                         struct page *freelist;
3915
3916                         iova = find_iova(&si_domain->iovad, start_vpfn);
3917                         if (iova == NULL) {
3918                                 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3919                                          start_vpfn);
3920                                 break;
3921                         }
3922
3923                         iova = split_and_remove_iova(&si_domain->iovad, iova,
3924                                                      start_vpfn, last_vpfn);
3925                         if (iova == NULL) {
3926                                 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3927                                         start_vpfn, last_vpfn);
3928                                 return NOTIFY_BAD;
3929                         }
3930
3931                         freelist = domain_unmap(si_domain, iova->pfn_lo,
3932                                                iova->pfn_hi);
3933
3934                         rcu_read_lock();
3935                         for_each_active_iommu(iommu, drhd)
3936                                 iommu_flush_iotlb_psi(iommu, si_domain->id,
3937                                         iova->pfn_lo,
3938                                         iova->pfn_hi - iova->pfn_lo + 1,
3939                                         !freelist, 0);
3940                         rcu_read_unlock();
3941                         dma_free_pagelist(freelist);
3942
3943                         start_vpfn = iova->pfn_hi + 1;
3944                         free_iova_mem(iova);
3945                 }
3946                 break;
3947         }
3948
3949         return NOTIFY_OK;
3950 }
3951
3952 static struct notifier_block intel_iommu_memory_nb = {
3953         .notifier_call = intel_iommu_memory_notifier,
3954         .priority = 0
3955 };
3956
3957 int __init intel_iommu_init(void)
3958 {
3959         int ret = -ENODEV;
3960         struct dmar_drhd_unit *drhd;
3961         struct intel_iommu *iommu;
3962
3963         /* VT-d is required for a TXT/tboot launch, so enforce that */
3964         force_on = tboot_force_iommu();
3965
3966         if (iommu_init_mempool()) {
3967                 if (force_on)
3968                         panic("tboot: Failed to initialize iommu memory\n");
3969                 return -ENOMEM;
3970         }
3971
3972         down_write(&dmar_global_lock);
3973         if (dmar_table_init()) {
3974                 if (force_on)
3975                         panic("tboot: Failed to initialize DMAR table\n");
3976                 goto out_free_dmar;
3977         }
3978
3979         /*
3980          * Disable translation if already enabled prior to OS handover.
3981          */
3982         for_each_active_iommu(iommu, drhd)
3983                 if (iommu->gcmd & DMA_GCMD_TE)
3984                         iommu_disable_translation(iommu);
3985
3986         if (dmar_dev_scope_init() < 0) {
3987                 if (force_on)
3988                         panic("tboot: Failed to initialize DMAR device scope\n");
3989                 goto out_free_dmar;
3990         }
3991
3992         if (no_iommu || dmar_disabled)
3993                 goto out_free_dmar;
3994
3995         if (list_empty(&dmar_rmrr_units))
3996                 printk(KERN_INFO "DMAR: No RMRR found\n");
3997
3998         if (list_empty(&dmar_atsr_units))
3999                 printk(KERN_INFO "DMAR: No ATSR found\n");
4000
4001         if (dmar_init_reserved_ranges()) {
4002                 if (force_on)
4003                         panic("tboot: Failed to reserve iommu ranges\n");
4004                 goto out_free_reserved_range;
4005         }
4006
4007         init_no_remapping_devices();
4008
4009         ret = init_dmars();
4010         if (ret) {
4011                 if (force_on)
4012                         panic("tboot: Failed to initialize DMARs\n");
4013                 printk(KERN_ERR "IOMMU: dmar init failed\n");
4014                 goto out_free_reserved_range;
4015         }
4016         up_write(&dmar_global_lock);
4017         printk(KERN_INFO
4018         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4019
4020         init_timer(&unmap_timer);
4021 #ifdef CONFIG_SWIOTLB
4022         swiotlb = 0;
4023 #endif
4024         dma_ops = &intel_dma_ops;
4025
4026         init_iommu_pm_ops();
4027
4028         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4029         bus_register_notifier(&pci_bus_type, &device_nb);
4030         if (si_domain && !hw_pass_through)
4031                 register_memory_notifier(&intel_iommu_memory_nb);
4032
4033         intel_iommu_enabled = 1;
4034
4035         return 0;
4036
4037 out_free_reserved_range:
4038         put_iova_domain(&reserved_iova_list);
4039 out_free_dmar:
4040         intel_iommu_free_dmars();
4041         up_write(&dmar_global_lock);
4042         iommu_exit_mempool();
4043         return ret;
4044 }
4045
4046 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4047                                            struct device *dev)
4048 {
4049         struct pci_dev *tmp, *parent, *pdev;
4050
4051         if (!iommu || !dev || !dev_is_pci(dev))
4052                 return;
4053
4054         pdev = to_pci_dev(dev);
4055
4056         /* dependent device detach */
4057         tmp = pci_find_upstream_pcie_bridge(pdev);
4058         /* Secondary interface's bus number and devfn 0 */
4059         if (tmp) {
4060                 parent = pdev->bus->self;
4061                 while (parent != tmp) {
4062                         iommu_detach_dev(iommu, parent->bus->number,
4063                                          parent->devfn);
4064                         parent = parent->bus->self;
4065                 }
4066                 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
4067                         iommu_detach_dev(iommu,
4068                                 tmp->subordinate->number, 0);
4069                 else /* this is a legacy PCI bridge */
4070                         iommu_detach_dev(iommu, tmp->bus->number,
4071                                          tmp->devfn);
4072         }
4073 }
4074
4075 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4076                                        struct device *dev)
4077 {
4078         struct device_domain_info *info, *tmp;
4079         struct intel_iommu *iommu;
4080         unsigned long flags;
4081         int found = 0;
4082         u8 bus, devfn;
4083
4084         iommu = device_to_iommu(dev, &bus, &devfn);
4085         if (!iommu)
4086                 return;
4087
4088         spin_lock_irqsave(&device_domain_lock, flags);
4089         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4090                 if (info->iommu == iommu && info->bus == bus &&
4091                     info->devfn == devfn) {
4092                         unlink_domain_info(info);
4093                         spin_unlock_irqrestore(&device_domain_lock, flags);
4094
4095                         iommu_disable_dev_iotlb(info);
4096                         iommu_detach_dev(iommu, info->bus, info->devfn);
4097                         iommu_detach_dependent_devices(iommu, dev);
4098                         free_devinfo_mem(info);
4099
4100                         spin_lock_irqsave(&device_domain_lock, flags);
4101
4102                         if (found)
4103                                 break;
4104                         else
4105                                 continue;
4106                 }
4107
4108                 /* if there is no other devices under the same iommu
4109                  * owned by this domain, clear this iommu in iommu_bmp
4110                  * update iommu count and coherency
4111                  */
4112                 if (info->iommu == iommu)
4113                         found = 1;
4114         }
4115
4116         spin_unlock_irqrestore(&device_domain_lock, flags);
4117
4118         if (found == 0) {
4119                 unsigned long tmp_flags;
4120                 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
4121                 clear_bit(iommu->seq_id, domain->iommu_bmp);
4122                 domain->iommu_count--;
4123                 domain_update_iommu_cap(domain);
4124                 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
4125
4126                 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
4127                     !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
4128                         spin_lock_irqsave(&iommu->lock, tmp_flags);
4129                         clear_bit(domain->id, iommu->domain_ids);
4130                         iommu->domains[domain->id] = NULL;
4131                         spin_unlock_irqrestore(&iommu->lock, tmp_flags);
4132                 }
4133         }
4134 }
4135
4136 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4137 {
4138         int adjust_width;
4139
4140         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
4141         domain_reserve_special_ranges(domain);
4142
4143         /* calculate AGAW */
4144         domain->gaw = guest_width;
4145         adjust_width = guestwidth_to_adjustwidth(guest_width);
4146         domain->agaw = width_to_agaw(adjust_width);
4147
4148         domain->iommu_coherency = 0;
4149         domain->iommu_snooping = 0;
4150         domain->iommu_superpage = 0;
4151         domain->max_addr = 0;
4152
4153         /* always allocate the top pgd */
4154         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4155         if (!domain->pgd)
4156                 return -ENOMEM;
4157         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4158         return 0;
4159 }
4160
4161 static int intel_iommu_domain_init(struct iommu_domain *domain)
4162 {
4163         struct dmar_domain *dmar_domain;
4164
4165         dmar_domain = alloc_domain(true);
4166         if (!dmar_domain) {
4167                 printk(KERN_ERR
4168                         "intel_iommu_domain_init: dmar_domain == NULL\n");
4169                 return -ENOMEM;
4170         }
4171         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4172                 printk(KERN_ERR
4173                         "intel_iommu_domain_init() failed\n");
4174                 domain_exit(dmar_domain);
4175                 return -ENOMEM;
4176         }
4177         domain_update_iommu_cap(dmar_domain);
4178         domain->priv = dmar_domain;
4179
4180         domain->geometry.aperture_start = 0;
4181         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4182         domain->geometry.force_aperture = true;
4183
4184         return 0;
4185 }
4186
4187 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4188 {
4189         struct dmar_domain *dmar_domain = domain->priv;
4190
4191         domain->priv = NULL;
4192         domain_exit(dmar_domain);
4193 }
4194
4195 static int intel_iommu_attach_device(struct iommu_domain *domain,
4196                                      struct device *dev)
4197 {
4198         struct dmar_domain *dmar_domain = domain->priv;
4199         struct intel_iommu *iommu;
4200         int addr_width;
4201         u8 bus, devfn;
4202
4203         /* normally dev is not mapped */
4204         if (unlikely(domain_context_mapped(dev))) {
4205                 struct dmar_domain *old_domain;
4206
4207                 old_domain = find_domain(dev);
4208                 if (old_domain) {
4209                         if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4210                             dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4211                                 domain_remove_one_dev_info(old_domain, dev);
4212                         else
4213                                 domain_remove_dev_info(old_domain);
4214                 }
4215         }
4216
4217         iommu = device_to_iommu(dev, &bus, &devfn);
4218         if (!iommu)
4219                 return -ENODEV;
4220
4221         /* check if this iommu agaw is sufficient for max mapped address */
4222         addr_width = agaw_to_width(iommu->agaw);
4223         if (addr_width > cap_mgaw(iommu->cap))
4224                 addr_width = cap_mgaw(iommu->cap);
4225
4226         if (dmar_domain->max_addr > (1LL << addr_width)) {
4227                 printk(KERN_ERR "%s: iommu width (%d) is not "
4228                        "sufficient for the mapped address (%llx)\n",
4229                        __func__, addr_width, dmar_domain->max_addr);
4230                 return -EFAULT;
4231         }
4232         dmar_domain->gaw = addr_width;
4233
4234         /*
4235          * Knock out extra levels of page tables if necessary
4236          */
4237         while (iommu->agaw < dmar_domain->agaw) {
4238                 struct dma_pte *pte;
4239
4240                 pte = dmar_domain->pgd;
4241                 if (dma_pte_present(pte)) {
4242                         dmar_domain->pgd = (struct dma_pte *)
4243                                 phys_to_virt(dma_pte_addr(pte));
4244                         free_pgtable_page(pte);
4245                 }
4246                 dmar_domain->agaw--;
4247         }
4248
4249         return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4250 }
4251
4252 static void intel_iommu_detach_device(struct iommu_domain *domain,
4253                                       struct device *dev)
4254 {
4255         struct dmar_domain *dmar_domain = domain->priv;
4256
4257         domain_remove_one_dev_info(dmar_domain, dev);
4258 }
4259
4260 static int intel_iommu_map(struct iommu_domain *domain,
4261                            unsigned long iova, phys_addr_t hpa,
4262                            size_t size, int iommu_prot)
4263 {
4264         struct dmar_domain *dmar_domain = domain->priv;
4265         u64 max_addr;
4266         int prot = 0;
4267         int ret;
4268
4269         if (iommu_prot & IOMMU_READ)
4270                 prot |= DMA_PTE_READ;
4271         if (iommu_prot & IOMMU_WRITE)
4272                 prot |= DMA_PTE_WRITE;
4273         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4274                 prot |= DMA_PTE_SNP;
4275
4276         max_addr = iova + size;
4277         if (dmar_domain->max_addr < max_addr) {
4278                 u64 end;
4279
4280                 /* check if minimum agaw is sufficient for mapped address */
4281                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4282                 if (end < max_addr) {
4283                         printk(KERN_ERR "%s: iommu width (%d) is not "
4284                                "sufficient for the mapped address (%llx)\n",
4285                                __func__, dmar_domain->gaw, max_addr);
4286                         return -EFAULT;
4287                 }
4288                 dmar_domain->max_addr = max_addr;
4289         }
4290         /* Round up size to next multiple of PAGE_SIZE, if it and
4291            the low bits of hpa would take us onto the next page */
4292         size = aligned_nrpages(hpa, size);
4293         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4294                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4295         return ret;
4296 }
4297
4298 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4299                                 unsigned long iova, size_t size)
4300 {
4301         struct dmar_domain *dmar_domain = domain->priv;
4302         struct page *freelist = NULL;
4303         struct intel_iommu *iommu;
4304         unsigned long start_pfn, last_pfn;
4305         unsigned int npages;
4306         int iommu_id, num, ndomains, level = 0;
4307
4308         /* Cope with horrid API which requires us to unmap more than the
4309            size argument if it happens to be a large-page mapping. */
4310         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4311                 BUG();
4312
4313         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4314                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4315
4316         start_pfn = iova >> VTD_PAGE_SHIFT;
4317         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4318
4319         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4320
4321         npages = last_pfn - start_pfn + 1;
4322
4323         for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4324                iommu = g_iommus[iommu_id];
4325
4326                /*
4327                 * find bit position of dmar_domain
4328                 */
4329                ndomains = cap_ndoms(iommu->cap);
4330                for_each_set_bit(num, iommu->domain_ids, ndomains) {
4331                        if (iommu->domains[num] == dmar_domain)
4332                                iommu_flush_iotlb_psi(iommu, num, start_pfn,
4333                                                      npages, !freelist, 0);
4334                }
4335
4336         }
4337
4338         dma_free_pagelist(freelist);
4339
4340         if (dmar_domain->max_addr == iova + size)
4341                 dmar_domain->max_addr = iova;
4342
4343         return size;
4344 }
4345
4346 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4347                                             dma_addr_t iova)
4348 {
4349         struct dmar_domain *dmar_domain = domain->priv;
4350         struct dma_pte *pte;
4351         int level = 0;
4352         u64 phys = 0;
4353
4354         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4355         if (pte)
4356                 phys = dma_pte_addr(pte);
4357
4358         return phys;
4359 }
4360
4361 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4362                                       unsigned long cap)
4363 {
4364         struct dmar_domain *dmar_domain = domain->priv;
4365
4366         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4367                 return dmar_domain->iommu_snooping;
4368         if (cap == IOMMU_CAP_INTR_REMAP)
4369                 return irq_remapping_enabled;
4370
4371         return 0;
4372 }
4373
4374 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4375
4376 static int intel_iommu_add_device(struct device *dev)
4377 {
4378         struct pci_dev *pdev = to_pci_dev(dev);
4379         struct pci_dev *bridge, *dma_pdev = NULL;
4380         struct iommu_group *group;
4381         int ret;
4382         u8 bus, devfn;
4383
4384         if (!device_to_iommu(dev, &bus, &devfn))
4385                 return -ENODEV;
4386
4387         bridge = pci_find_upstream_pcie_bridge(pdev);
4388         if (bridge) {
4389                 if (pci_is_pcie(bridge))
4390                         dma_pdev = pci_get_domain_bus_and_slot(
4391                                                 pci_domain_nr(pdev->bus),
4392                                                 bridge->subordinate->number, 0);
4393                 if (!dma_pdev)
4394                         dma_pdev = pci_dev_get(bridge);
4395         } else
4396                 dma_pdev = pci_dev_get(pdev);
4397
4398         /* Account for quirked devices */
4399         swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4400
4401         /*
4402          * If it's a multifunction device that does not support our
4403          * required ACS flags, add to the same group as lowest numbered
4404          * function that also does not suport the required ACS flags.
4405          */
4406         if (dma_pdev->multifunction &&
4407             !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
4408                 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
4409
4410                 for (i = 0; i < 8; i++) {
4411                         struct pci_dev *tmp;
4412
4413                         tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
4414                         if (!tmp)
4415                                 continue;
4416
4417                         if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
4418                                 swap_pci_ref(&dma_pdev, tmp);
4419                                 break;
4420                         }
4421                         pci_dev_put(tmp);
4422                 }
4423         }
4424
4425         /*
4426          * Devices on the root bus go through the iommu.  If that's not us,
4427          * find the next upstream device and test ACS up to the root bus.
4428          * Finding the next device may require skipping virtual buses.
4429          */
4430         while (!pci_is_root_bus(dma_pdev->bus)) {
4431                 struct pci_bus *bus = dma_pdev->bus;
4432
4433                 while (!bus->self) {
4434                         if (!pci_is_root_bus(bus))
4435                                 bus = bus->parent;
4436                         else
4437                                 goto root_bus;
4438                 }
4439
4440                 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4441                         break;
4442
4443                 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4444         }
4445
4446 root_bus:
4447         group = iommu_group_get(&dma_pdev->dev);
4448         pci_dev_put(dma_pdev);
4449         if (!group) {
4450                 group = iommu_group_alloc();
4451                 if (IS_ERR(group))
4452                         return PTR_ERR(group);
4453         }
4454
4455         ret = iommu_group_add_device(group, dev);
4456
4457         iommu_group_put(group);
4458         return ret;
4459 }
4460
4461 static void intel_iommu_remove_device(struct device *dev)
4462 {
4463         iommu_group_remove_device(dev);
4464 }
4465
4466 static struct iommu_ops intel_iommu_ops = {
4467         .domain_init    = intel_iommu_domain_init,
4468         .domain_destroy = intel_iommu_domain_destroy,
4469         .attach_dev     = intel_iommu_attach_device,
4470         .detach_dev     = intel_iommu_detach_device,
4471         .map            = intel_iommu_map,
4472         .unmap          = intel_iommu_unmap,
4473         .iova_to_phys   = intel_iommu_iova_to_phys,
4474         .domain_has_cap = intel_iommu_domain_has_cap,
4475         .add_device     = intel_iommu_add_device,
4476         .remove_device  = intel_iommu_remove_device,
4477         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4478 };
4479
4480 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4481 {
4482         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4483         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4484         dmar_map_gfx = 0;
4485 }
4486
4487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4488 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4490 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4491 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4492 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4493 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4494
4495 static void quirk_iommu_rwbf(struct pci_dev *dev)
4496 {
4497         /*
4498          * Mobile 4 Series Chipset neglects to set RWBF capability,
4499          * but needs it. Same seems to hold for the desktop versions.
4500          */
4501         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4502         rwbf_quirk = 1;
4503 }
4504
4505 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4507 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4508 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4509 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4510 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4511 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4512
4513 #define GGC 0x52
4514 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4515 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4516 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4517 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4518 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4519 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4520 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4521 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4522
4523 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4524 {
4525         unsigned short ggc;
4526
4527         if (pci_read_config_word(dev, GGC, &ggc))
4528                 return;
4529
4530         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4531                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4532                 dmar_map_gfx = 0;
4533         } else if (dmar_map_gfx) {
4534                 /* we have to ensure the gfx device is idle before we flush */
4535                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4536                 intel_iommu_strict = 1;
4537        }
4538 }
4539 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4540 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4541 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4542 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4543
4544 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4545    ISOCH DMAR unit for the Azalia sound device, but not give it any
4546    TLB entries, which causes it to deadlock. Check for that.  We do
4547    this in a function called from init_dmars(), instead of in a PCI
4548    quirk, because we don't want to print the obnoxious "BIOS broken"
4549    message if VT-d is actually disabled.
4550 */
4551 static void __init check_tylersburg_isoch(void)
4552 {
4553         struct pci_dev *pdev;
4554         uint32_t vtisochctrl;
4555
4556         /* If there's no Azalia in the system anyway, forget it. */
4557         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4558         if (!pdev)
4559                 return;
4560         pci_dev_put(pdev);
4561
4562         /* System Management Registers. Might be hidden, in which case
4563            we can't do the sanity check. But that's OK, because the
4564            known-broken BIOSes _don't_ actually hide it, so far. */
4565         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4566         if (!pdev)
4567                 return;
4568
4569         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4570                 pci_dev_put(pdev);
4571                 return;
4572         }
4573
4574         pci_dev_put(pdev);
4575
4576         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4577         if (vtisochctrl & 1)
4578                 return;
4579
4580         /* Drop all bits other than the number of TLB entries */
4581         vtisochctrl &= 0x1c;
4582
4583         /* If we have the recommended number of TLB entries (16), fine. */
4584         if (vtisochctrl == 0x10)
4585                 return;
4586
4587         /* Zero TLB entries? You get to ride the short bus to school. */
4588         if (!vtisochctrl) {
4589                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4590                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4591                      dmi_get_system_info(DMI_BIOS_VENDOR),
4592                      dmi_get_system_info(DMI_BIOS_VERSION),
4593                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4594                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4595                 return;
4596         }
4597         
4598         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4599                vtisochctrl);
4600 }