intel-iommu: Detect DMAR in hyperspace at probe time.
[firefly-linux-kernel-4.4.55.git] / drivers / pci / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38
39 #define PREFIX "DMAR: "
40
41 /* No locks are needed as DMA remapping hardware unit
42  * list is constructed at boot time and hotplug of
43  * these units are not supported by the architecture.
44  */
45 LIST_HEAD(dmar_drhd_units);
46
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
49
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51 {
52         /*
53          * add INCLUDE_ALL at the tail, so scan the list will find it at
54          * the very end.
55          */
56         if (drhd->include_all)
57                 list_add_tail(&drhd->list, &dmar_drhd_units);
58         else
59                 list_add(&drhd->list, &dmar_drhd_units);
60 }
61
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63                                            struct pci_dev **dev, u16 segment)
64 {
65         struct pci_bus *bus;
66         struct pci_dev *pdev = NULL;
67         struct acpi_dmar_pci_path *path;
68         int count;
69
70         bus = pci_find_bus(segment, scope->bus);
71         path = (struct acpi_dmar_pci_path *)(scope + 1);
72         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73                 / sizeof(struct acpi_dmar_pci_path);
74
75         while (count) {
76                 if (pdev)
77                         pci_dev_put(pdev);
78                 /*
79                  * Some BIOSes list non-exist devices in DMAR table, just
80                  * ignore it
81                  */
82                 if (!bus) {
83                         printk(KERN_WARNING
84                         PREFIX "Device scope bus [%d] not found\n",
85                         scope->bus);
86                         break;
87                 }
88                 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89                 if (!pdev) {
90                         printk(KERN_WARNING PREFIX
91                         "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92                                 segment, bus->number, path->dev, path->fn);
93                         break;
94                 }
95                 path ++;
96                 count --;
97                 bus = pdev->subordinate;
98         }
99         if (!pdev) {
100                 printk(KERN_WARNING PREFIX
101                 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                 segment, scope->bus, path->dev, path->fn);
103                 *dev = NULL;
104                 return 0;
105         }
106         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107                         pdev->subordinate) || (scope->entry_type == \
108                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109                 pci_dev_put(pdev);
110                 printk(KERN_WARNING PREFIX
111                         "Device scope type does not match for %s\n",
112                          pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                        struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else
134                         printk(KERN_WARNING PREFIX
135                                 "Unsupported device scope\n");
136                 start += scope->length;
137         }
138         if (*cnt == 0)
139                 return 0;
140
141         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142         if (!*devices)
143                 return -ENOMEM;
144
145         start = tmp;
146         index = 0;
147         while (start < end) {
148                 scope = start;
149                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151                         ret = dmar_parse_one_dev_scope(scope,
152                                 &(*devices)[index], segment);
153                         if (ret) {
154                                 kfree(*devices);
155                                 return ret;
156                         }
157                         index ++;
158                 }
159                 start += scope->length;
160         }
161
162         return 0;
163 }
164
165 /**
166  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167  * structure which uniquely represent one DMA remapping hardware unit
168  * present in the platform
169  */
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 {
173         struct acpi_dmar_hardware_unit *drhd;
174         struct dmar_drhd_unit *dmaru;
175         int ret = 0;
176
177         drhd = (struct acpi_dmar_hardware_unit *)header;
178         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179         if (!dmaru)
180                 return -ENOMEM;
181
182         dmaru->hdr = header;
183         dmaru->reg_base_addr = drhd->address;
184         dmaru->segment = drhd->segment;
185         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
187         ret = alloc_iommu(dmaru);
188         if (ret) {
189                 kfree(dmaru);
190                 return ret;
191         }
192         dmar_register_drhd_unit(dmaru);
193         return 0;
194 }
195
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197 {
198         struct acpi_dmar_hardware_unit *drhd;
199         int ret = 0;
200
201         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
203         if (dmaru->include_all)
204                 return 0;
205
206         ret = dmar_parse_dev_scope((void *)(drhd + 1),
207                                 ((void *)drhd) + drhd->header.length,
208                                 &dmaru->devices_cnt, &dmaru->devices,
209                                 drhd->segment);
210         if (ret) {
211                 list_del(&dmaru->list);
212                 kfree(dmaru);
213         }
214         return ret;
215 }
216
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
219
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221 {
222         list_add(&rmrr->list, &dmar_rmrr_units);
223 }
224
225
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228 {
229         struct acpi_dmar_reserved_memory *rmrr;
230         struct dmar_rmrr_unit *rmrru;
231
232         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233         if (!rmrru)
234                 return -ENOMEM;
235
236         rmrru->hdr = header;
237         rmrr = (struct acpi_dmar_reserved_memory *)header;
238         rmrru->base_address = rmrr->base_address;
239         rmrru->end_address = rmrr->end_address;
240
241         dmar_register_rmrr_unit(rmrru);
242         return 0;
243 }
244
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247 {
248         struct acpi_dmar_reserved_memory *rmrr;
249         int ret;
250
251         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253                 ((void *)rmrr) + rmrr->header.length,
254                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
256         if (ret || (rmrru->devices_cnt == 0)) {
257                 list_del(&rmrru->list);
258                 kfree(rmrru);
259         }
260         return ret;
261 }
262
263 static LIST_HEAD(dmar_atsr_units);
264
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266 {
267         struct acpi_dmar_atsr *atsr;
268         struct dmar_atsr_unit *atsru;
269
270         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272         if (!atsru)
273                 return -ENOMEM;
274
275         atsru->hdr = hdr;
276         atsru->include_all = atsr->flags & 0x1;
277
278         list_add(&atsru->list, &dmar_atsr_units);
279
280         return 0;
281 }
282
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284 {
285         int rc;
286         struct acpi_dmar_atsr *atsr;
287
288         if (atsru->include_all)
289                 return 0;
290
291         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292         rc = dmar_parse_dev_scope((void *)(atsr + 1),
293                                 (void *)atsr + atsr->header.length,
294                                 &atsru->devices_cnt, &atsru->devices,
295                                 atsr->segment);
296         if (rc || !atsru->devices_cnt) {
297                 list_del(&atsru->list);
298                 kfree(atsru);
299         }
300
301         return rc;
302 }
303
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305 {
306         int i;
307         struct pci_bus *bus;
308         struct acpi_dmar_atsr *atsr;
309         struct dmar_atsr_unit *atsru;
310
311         list_for_each_entry(atsru, &dmar_atsr_units, list) {
312                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313                 if (atsr->segment == pci_domain_nr(dev->bus))
314                         goto found;
315         }
316
317         return 0;
318
319 found:
320         for (bus = dev->bus; bus; bus = bus->parent) {
321                 struct pci_dev *bridge = bus->self;
322
323                 if (!bridge || !bridge->is_pcie ||
324                     bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325                         return 0;
326
327                 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328                         for (i = 0; i < atsru->devices_cnt; i++)
329                                 if (atsru->devices[i] == bridge)
330                                         return 1;
331                         break;
332                 }
333         }
334
335         if (atsru->include_all)
336                 return 1;
337
338         return 0;
339 }
340 #endif
341
342 static void __init
343 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
344 {
345         struct acpi_dmar_hardware_unit *drhd;
346         struct acpi_dmar_reserved_memory *rmrr;
347         struct acpi_dmar_atsr *atsr;
348         struct acpi_dmar_rhsa *rhsa;
349
350         switch (header->type) {
351         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
352                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
353                                     header);
354                 printk (KERN_INFO PREFIX
355                         "DRHD base: %#016Lx flags: %#x\n",
356                         (unsigned long long)drhd->address, drhd->flags);
357                 break;
358         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
359                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
360                                     header);
361                 printk (KERN_INFO PREFIX
362                         "RMRR base: %#016Lx end: %#016Lx\n",
363                         (unsigned long long)rmrr->base_address,
364                         (unsigned long long)rmrr->end_address);
365                 break;
366         case ACPI_DMAR_TYPE_ATSR:
367                 atsr = container_of(header, struct acpi_dmar_atsr, header);
368                 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
369                 break;
370         case ACPI_DMAR_HARDWARE_AFFINITY:
371                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372                 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373                        (unsigned long long)rhsa->base_address,
374                        rhsa->proximity_domain);
375                 break;
376         }
377 }
378
379 /**
380  * dmar_table_detect - checks to see if the platform supports DMAR devices
381  */
382 static int __init dmar_table_detect(void)
383 {
384         acpi_status status = AE_OK;
385
386         /* if we could find DMAR table, then there are DMAR devices */
387         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388                                 (struct acpi_table_header **)&dmar_tbl,
389                                 &dmar_tbl_size);
390
391         if (ACPI_SUCCESS(status) && !dmar_tbl) {
392                 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393                 status = AE_NOT_FOUND;
394         }
395
396         return (ACPI_SUCCESS(status) ? 1 : 0);
397 }
398
399 /**
400  * parse_dmar_table - parses the DMA reporting table
401  */
402 static int __init
403 parse_dmar_table(void)
404 {
405         struct acpi_table_dmar *dmar;
406         struct acpi_dmar_header *entry_header;
407         int ret = 0;
408
409         /*
410          * Do it again, earlier dmar_tbl mapping could be mapped with
411          * fixed map.
412          */
413         dmar_table_detect();
414
415         /*
416          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
417          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
418          */
419         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
420
421         dmar = (struct acpi_table_dmar *)dmar_tbl;
422         if (!dmar)
423                 return -ENODEV;
424
425         if (dmar->width < PAGE_SHIFT - 1) {
426                 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
427                 return -EINVAL;
428         }
429
430         printk (KERN_INFO PREFIX "Host address width %d\n",
431                 dmar->width + 1);
432
433         entry_header = (struct acpi_dmar_header *)(dmar + 1);
434         while (((unsigned long)entry_header) <
435                         (((unsigned long)dmar) + dmar_tbl->length)) {
436                 /* Avoid looping forever on bad ACPI tables */
437                 if (entry_header->length == 0) {
438                         printk(KERN_WARNING PREFIX
439                                 "Invalid 0-length structure\n");
440                         ret = -EINVAL;
441                         break;
442                 }
443
444                 dmar_table_print_dmar_entry(entry_header);
445
446                 switch (entry_header->type) {
447                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448                         ret = dmar_parse_one_drhd(entry_header);
449                         break;
450                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
451 #ifdef CONFIG_DMAR
452                         ret = dmar_parse_one_rmrr(entry_header);
453 #endif
454                         break;
455                 case ACPI_DMAR_TYPE_ATSR:
456 #ifdef CONFIG_DMAR
457                         ret = dmar_parse_one_atsr(entry_header);
458 #endif
459                         break;
460                 case ACPI_DMAR_HARDWARE_AFFINITY:
461                         /* We don't do anything with RHSA (yet?) */
462                         break;
463                 default:
464                         printk(KERN_WARNING PREFIX
465                                 "Unknown DMAR structure type %d\n",
466                                 entry_header->type);
467                         ret = 0; /* for forward compatibility */
468                         break;
469                 }
470                 if (ret)
471                         break;
472
473                 entry_header = ((void *)entry_header + entry_header->length);
474         }
475         return ret;
476 }
477
478 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
479                           struct pci_dev *dev)
480 {
481         int index;
482
483         while (dev) {
484                 for (index = 0; index < cnt; index++)
485                         if (dev == devices[index])
486                                 return 1;
487
488                 /* Check our parent */
489                 dev = dev->bus->self;
490         }
491
492         return 0;
493 }
494
495 struct dmar_drhd_unit *
496 dmar_find_matched_drhd_unit(struct pci_dev *dev)
497 {
498         struct dmar_drhd_unit *dmaru = NULL;
499         struct acpi_dmar_hardware_unit *drhd;
500
501         list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502                 drhd = container_of(dmaru->hdr,
503                                     struct acpi_dmar_hardware_unit,
504                                     header);
505
506                 if (dmaru->include_all &&
507                     drhd->segment == pci_domain_nr(dev->bus))
508                         return dmaru;
509
510                 if (dmar_pci_device_match(dmaru->devices,
511                                           dmaru->devices_cnt, dev))
512                         return dmaru;
513         }
514
515         return NULL;
516 }
517
518 int __init dmar_dev_scope_init(void)
519 {
520         struct dmar_drhd_unit *drhd, *drhd_n;
521         int ret = -ENODEV;
522
523         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
524                 ret = dmar_parse_dev(drhd);
525                 if (ret)
526                         return ret;
527         }
528
529 #ifdef CONFIG_DMAR
530         {
531                 struct dmar_rmrr_unit *rmrr, *rmrr_n;
532                 struct dmar_atsr_unit *atsr, *atsr_n;
533
534                 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
535                         ret = rmrr_parse_dev(rmrr);
536                         if (ret)
537                                 return ret;
538                 }
539
540                 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541                         ret = atsr_parse_dev(atsr);
542                         if (ret)
543                                 return ret;
544                 }
545         }
546 #endif
547
548         return ret;
549 }
550
551
552 int __init dmar_table_init(void)
553 {
554         static int dmar_table_initialized;
555         int ret;
556
557         if (dmar_table_initialized)
558                 return 0;
559
560         dmar_table_initialized = 1;
561
562         ret = parse_dmar_table();
563         if (ret) {
564                 if (ret != -ENODEV)
565                         printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
566                 return ret;
567         }
568
569         if (list_empty(&dmar_drhd_units)) {
570                 printk(KERN_INFO PREFIX "No DMAR devices found\n");
571                 return -ENODEV;
572         }
573
574 #ifdef CONFIG_DMAR
575         if (list_empty(&dmar_rmrr_units))
576                 printk(KERN_INFO PREFIX "No RMRR found\n");
577
578         if (list_empty(&dmar_atsr_units))
579                 printk(KERN_INFO PREFIX "No ATSR found\n");
580 #endif
581
582         return 0;
583 }
584
585 int __init check_zero_address(void)
586 {
587         struct acpi_table_dmar *dmar;
588         struct acpi_dmar_header *entry_header;
589         struct acpi_dmar_hardware_unit *drhd;
590
591         dmar = (struct acpi_table_dmar *)dmar_tbl;
592         entry_header = (struct acpi_dmar_header *)(dmar + 1);
593
594         while (((unsigned long)entry_header) <
595                         (((unsigned long)dmar) + dmar_tbl->length)) {
596                 /* Avoid looping forever on bad ACPI tables */
597                 if (entry_header->length == 0) {
598                         printk(KERN_WARNING PREFIX
599                                 "Invalid 0-length structure\n");
600                         return 0;
601                 }
602
603                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604                         void __iomem *addr;
605                         u64 cap, ecap;
606
607                         drhd = (void *)entry_header;
608                         if (!drhd->address) {
609                                 /* Promote an attitude of violence to a BIOS engineer today */
610                                 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
611                                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
612                                      dmi_get_system_info(DMI_BIOS_VENDOR),
613                                      dmi_get_system_info(DMI_BIOS_VERSION),
614                                      dmi_get_system_info(DMI_PRODUCT_VERSION));
615                                 goto failed;
616                         }
617
618                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
619                         if (!addr ) {
620                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
621                                 goto failed;
622                         }
623                         cap = dmar_readq(addr + DMAR_CAP_REG);
624                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
625                         early_iounmap(addr, VTD_PAGE_SIZE);
626                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
627                                 /* Promote an attitude of violence to a BIOS engineer today */
628                                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
629                                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
630                                       drhd->address,
631                                       dmi_get_system_info(DMI_BIOS_VENDOR),
632                                       dmi_get_system_info(DMI_BIOS_VERSION),
633                                       dmi_get_system_info(DMI_PRODUCT_VERSION));
634                                 goto failed;
635                         }
636                 }
637
638                 entry_header = ((void *)entry_header + entry_header->length);
639         }
640         return 1;
641
642 failed:
643 #ifdef CONFIG_DMAR
644         dmar_disabled = 1;
645 #endif
646         return 0;
647 }
648
649 void __init detect_intel_iommu(void)
650 {
651         int ret;
652
653         ret = dmar_table_detect();
654         if (ret)
655                 ret = check_zero_address();
656         {
657 #ifdef CONFIG_INTR_REMAP
658                 struct acpi_table_dmar *dmar;
659                 /*
660                  * for now we will disable dma-remapping when interrupt
661                  * remapping is enabled.
662                  * When support for queued invalidation for IOTLB invalidation
663                  * is added, we will not need this any more.
664                  */
665                 dmar = (struct acpi_table_dmar *) dmar_tbl;
666                 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
667                         printk(KERN_INFO
668                                "Queued invalidation will be enabled to support "
669                                "x2apic and Intr-remapping.\n");
670 #endif
671 #ifdef CONFIG_DMAR
672                 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
673                     !dmar_disabled)
674                         iommu_detected = 1;
675 #endif
676         }
677         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
678         dmar_tbl = NULL;
679 }
680
681
682 int alloc_iommu(struct dmar_drhd_unit *drhd)
683 {
684         struct intel_iommu *iommu;
685         int map_size;
686         u32 ver;
687         static int iommu_allocated = 0;
688         int agaw = 0;
689         int msagaw = 0;
690
691         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
692         if (!iommu)
693                 return -ENOMEM;
694
695         iommu->seq_id = iommu_allocated++;
696         sprintf (iommu->name, "dmar%d", iommu->seq_id);
697
698         iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
699         if (!iommu->reg) {
700                 printk(KERN_ERR "IOMMU: can't map the region\n");
701                 goto error;
702         }
703         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
704         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
705
706         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
707                 /* Promote an attitude of violence to a BIOS engineer today */
708                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
709                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
710                      drhd->reg_base_addr,
711                      dmi_get_system_info(DMI_BIOS_VENDOR),
712                      dmi_get_system_info(DMI_BIOS_VERSION),
713                      dmi_get_system_info(DMI_PRODUCT_VERSION));
714                 goto err_unmap;
715         }
716
717 #ifdef CONFIG_DMAR
718         agaw = iommu_calculate_agaw(iommu);
719         if (agaw < 0) {
720                 printk(KERN_ERR
721                        "Cannot get a valid agaw for iommu (seq_id = %d)\n",
722                        iommu->seq_id);
723                 goto err_unmap;
724         }
725         msagaw = iommu_calculate_max_sagaw(iommu);
726         if (msagaw < 0) {
727                 printk(KERN_ERR
728                         "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
729                         iommu->seq_id);
730                 goto err_unmap;
731         }
732 #endif
733         iommu->agaw = agaw;
734         iommu->msagaw = msagaw;
735
736         /* the registers might be more than one page */
737         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
738                 cap_max_fault_reg_offset(iommu->cap));
739         map_size = VTD_PAGE_ALIGN(map_size);
740         if (map_size > VTD_PAGE_SIZE) {
741                 iounmap(iommu->reg);
742                 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
743                 if (!iommu->reg) {
744                         printk(KERN_ERR "IOMMU: can't map the region\n");
745                         goto error;
746                 }
747         }
748
749         ver = readl(iommu->reg + DMAR_VER_REG);
750         pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
751                 (unsigned long long)drhd->reg_base_addr,
752                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
753                 (unsigned long long)iommu->cap,
754                 (unsigned long long)iommu->ecap);
755
756         spin_lock_init(&iommu->register_lock);
757
758         drhd->iommu = iommu;
759         return 0;
760
761  err_unmap:
762         iounmap(iommu->reg);
763  error:
764         kfree(iommu);
765         return -1;
766 }
767
768 void free_iommu(struct intel_iommu *iommu)
769 {
770         if (!iommu)
771                 return;
772
773 #ifdef CONFIG_DMAR
774         free_dmar_iommu(iommu);
775 #endif
776
777         if (iommu->reg)
778                 iounmap(iommu->reg);
779         kfree(iommu);
780 }
781
782 /*
783  * Reclaim all the submitted descriptors which have completed its work.
784  */
785 static inline void reclaim_free_desc(struct q_inval *qi)
786 {
787         while (qi->desc_status[qi->free_tail] == QI_DONE ||
788                qi->desc_status[qi->free_tail] == QI_ABORT) {
789                 qi->desc_status[qi->free_tail] = QI_FREE;
790                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
791                 qi->free_cnt++;
792         }
793 }
794
795 static int qi_check_fault(struct intel_iommu *iommu, int index)
796 {
797         u32 fault;
798         int head, tail;
799         struct q_inval *qi = iommu->qi;
800         int wait_index = (index + 1) % QI_LENGTH;
801
802         if (qi->desc_status[wait_index] == QI_ABORT)
803                 return -EAGAIN;
804
805         fault = readl(iommu->reg + DMAR_FSTS_REG);
806
807         /*
808          * If IQE happens, the head points to the descriptor associated
809          * with the error. No new descriptors are fetched until the IQE
810          * is cleared.
811          */
812         if (fault & DMA_FSTS_IQE) {
813                 head = readl(iommu->reg + DMAR_IQH_REG);
814                 if ((head >> DMAR_IQ_SHIFT) == index) {
815                         printk(KERN_ERR "VT-d detected invalid descriptor: "
816                                 "low=%llx, high=%llx\n",
817                                 (unsigned long long)qi->desc[index].low,
818                                 (unsigned long long)qi->desc[index].high);
819                         memcpy(&qi->desc[index], &qi->desc[wait_index],
820                                         sizeof(struct qi_desc));
821                         __iommu_flush_cache(iommu, &qi->desc[index],
822                                         sizeof(struct qi_desc));
823                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
824                         return -EINVAL;
825                 }
826         }
827
828         /*
829          * If ITE happens, all pending wait_desc commands are aborted.
830          * No new descriptors are fetched until the ITE is cleared.
831          */
832         if (fault & DMA_FSTS_ITE) {
833                 head = readl(iommu->reg + DMAR_IQH_REG);
834                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
835                 head |= 1;
836                 tail = readl(iommu->reg + DMAR_IQT_REG);
837                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
838
839                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
840
841                 do {
842                         if (qi->desc_status[head] == QI_IN_USE)
843                                 qi->desc_status[head] = QI_ABORT;
844                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
845                 } while (head != tail);
846
847                 if (qi->desc_status[wait_index] == QI_ABORT)
848                         return -EAGAIN;
849         }
850
851         if (fault & DMA_FSTS_ICE)
852                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
853
854         return 0;
855 }
856
857 /*
858  * Submit the queued invalidation descriptor to the remapping
859  * hardware unit and wait for its completion.
860  */
861 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
862 {
863         int rc;
864         struct q_inval *qi = iommu->qi;
865         struct qi_desc *hw, wait_desc;
866         int wait_index, index;
867         unsigned long flags;
868
869         if (!qi)
870                 return 0;
871
872         hw = qi->desc;
873
874 restart:
875         rc = 0;
876
877         spin_lock_irqsave(&qi->q_lock, flags);
878         while (qi->free_cnt < 3) {
879                 spin_unlock_irqrestore(&qi->q_lock, flags);
880                 cpu_relax();
881                 spin_lock_irqsave(&qi->q_lock, flags);
882         }
883
884         index = qi->free_head;
885         wait_index = (index + 1) % QI_LENGTH;
886
887         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
888
889         hw[index] = *desc;
890
891         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
892                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
893         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
894
895         hw[wait_index] = wait_desc;
896
897         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
898         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
899
900         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
901         qi->free_cnt -= 2;
902
903         /*
904          * update the HW tail register indicating the presence of
905          * new descriptors.
906          */
907         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
908
909         while (qi->desc_status[wait_index] != QI_DONE) {
910                 /*
911                  * We will leave the interrupts disabled, to prevent interrupt
912                  * context to queue another cmd while a cmd is already submitted
913                  * and waiting for completion on this cpu. This is to avoid
914                  * a deadlock where the interrupt context can wait indefinitely
915                  * for free slots in the queue.
916                  */
917                 rc = qi_check_fault(iommu, index);
918                 if (rc)
919                         break;
920
921                 spin_unlock(&qi->q_lock);
922                 cpu_relax();
923                 spin_lock(&qi->q_lock);
924         }
925
926         qi->desc_status[index] = QI_DONE;
927
928         reclaim_free_desc(qi);
929         spin_unlock_irqrestore(&qi->q_lock, flags);
930
931         if (rc == -EAGAIN)
932                 goto restart;
933
934         return rc;
935 }
936
937 /*
938  * Flush the global interrupt entry cache.
939  */
940 void qi_global_iec(struct intel_iommu *iommu)
941 {
942         struct qi_desc desc;
943
944         desc.low = QI_IEC_TYPE;
945         desc.high = 0;
946
947         /* should never fail */
948         qi_submit_sync(&desc, iommu);
949 }
950
951 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
952                       u64 type)
953 {
954         struct qi_desc desc;
955
956         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
957                         | QI_CC_GRAN(type) | QI_CC_TYPE;
958         desc.high = 0;
959
960         qi_submit_sync(&desc, iommu);
961 }
962
963 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
964                     unsigned int size_order, u64 type)
965 {
966         u8 dw = 0, dr = 0;
967
968         struct qi_desc desc;
969         int ih = 0;
970
971         if (cap_write_drain(iommu->cap))
972                 dw = 1;
973
974         if (cap_read_drain(iommu->cap))
975                 dr = 1;
976
977         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
978                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
979         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
980                 | QI_IOTLB_AM(size_order);
981
982         qi_submit_sync(&desc, iommu);
983 }
984
985 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
986                         u64 addr, unsigned mask)
987 {
988         struct qi_desc desc;
989
990         if (mask) {
991                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
992                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
993                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
994         } else
995                 desc.high = QI_DEV_IOTLB_ADDR(addr);
996
997         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
998                 qdep = 0;
999
1000         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1001                    QI_DIOTLB_TYPE;
1002
1003         qi_submit_sync(&desc, iommu);
1004 }
1005
1006 /*
1007  * Disable Queued Invalidation interface.
1008  */
1009 void dmar_disable_qi(struct intel_iommu *iommu)
1010 {
1011         unsigned long flags;
1012         u32 sts;
1013         cycles_t start_time = get_cycles();
1014
1015         if (!ecap_qis(iommu->ecap))
1016                 return;
1017
1018         spin_lock_irqsave(&iommu->register_lock, flags);
1019
1020         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
1021         if (!(sts & DMA_GSTS_QIES))
1022                 goto end;
1023
1024         /*
1025          * Give a chance to HW to complete the pending invalidation requests.
1026          */
1027         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1028                 readl(iommu->reg + DMAR_IQH_REG)) &&
1029                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1030                 cpu_relax();
1031
1032         iommu->gcmd &= ~DMA_GCMD_QIE;
1033         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1034
1035         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1036                       !(sts & DMA_GSTS_QIES), sts);
1037 end:
1038         spin_unlock_irqrestore(&iommu->register_lock, flags);
1039 }
1040
1041 /*
1042  * Enable queued invalidation.
1043  */
1044 static void __dmar_enable_qi(struct intel_iommu *iommu)
1045 {
1046         u32 sts;
1047         unsigned long flags;
1048         struct q_inval *qi = iommu->qi;
1049
1050         qi->free_head = qi->free_tail = 0;
1051         qi->free_cnt = QI_LENGTH;
1052
1053         spin_lock_irqsave(&iommu->register_lock, flags);
1054
1055         /* write zero to the tail reg */
1056         writel(0, iommu->reg + DMAR_IQT_REG);
1057
1058         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1059
1060         iommu->gcmd |= DMA_GCMD_QIE;
1061         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1062
1063         /* Make sure hardware complete it */
1064         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1065
1066         spin_unlock_irqrestore(&iommu->register_lock, flags);
1067 }
1068
1069 /*
1070  * Enable Queued Invalidation interface. This is a must to support
1071  * interrupt-remapping. Also used by DMA-remapping, which replaces
1072  * register based IOTLB invalidation.
1073  */
1074 int dmar_enable_qi(struct intel_iommu *iommu)
1075 {
1076         struct q_inval *qi;
1077
1078         if (!ecap_qis(iommu->ecap))
1079                 return -ENOENT;
1080
1081         /*
1082          * queued invalidation is already setup and enabled.
1083          */
1084         if (iommu->qi)
1085                 return 0;
1086
1087         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1088         if (!iommu->qi)
1089                 return -ENOMEM;
1090
1091         qi = iommu->qi;
1092
1093         qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1094         if (!qi->desc) {
1095                 kfree(qi);
1096                 iommu->qi = 0;
1097                 return -ENOMEM;
1098         }
1099
1100         qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1101         if (!qi->desc_status) {
1102                 free_page((unsigned long) qi->desc);
1103                 kfree(qi);
1104                 iommu->qi = 0;
1105                 return -ENOMEM;
1106         }
1107
1108         qi->free_head = qi->free_tail = 0;
1109         qi->free_cnt = QI_LENGTH;
1110
1111         spin_lock_init(&qi->q_lock);
1112
1113         __dmar_enable_qi(iommu);
1114
1115         return 0;
1116 }
1117
1118 /* iommu interrupt handling. Most stuff are MSI-like. */
1119
1120 enum faulttype {
1121         DMA_REMAP,
1122         INTR_REMAP,
1123         UNKNOWN,
1124 };
1125
1126 static const char *dma_remap_fault_reasons[] =
1127 {
1128         "Software",
1129         "Present bit in root entry is clear",
1130         "Present bit in context entry is clear",
1131         "Invalid context entry",
1132         "Access beyond MGAW",
1133         "PTE Write access is not set",
1134         "PTE Read access is not set",
1135         "Next page table ptr is invalid",
1136         "Root table address invalid",
1137         "Context table ptr is invalid",
1138         "non-zero reserved fields in RTP",
1139         "non-zero reserved fields in CTP",
1140         "non-zero reserved fields in PTE",
1141 };
1142
1143 static const char *intr_remap_fault_reasons[] =
1144 {
1145         "Detected reserved fields in the decoded interrupt-remapped request",
1146         "Interrupt index exceeded the interrupt-remapping table size",
1147         "Present field in the IRTE entry is clear",
1148         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1149         "Detected reserved fields in the IRTE entry",
1150         "Blocked a compatibility format interrupt request",
1151         "Blocked an interrupt request due to source-id verification failure",
1152 };
1153
1154 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1155
1156 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1157 {
1158         if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1159                                      ARRAY_SIZE(intr_remap_fault_reasons))) {
1160                 *fault_type = INTR_REMAP;
1161                 return intr_remap_fault_reasons[fault_reason - 0x20];
1162         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1163                 *fault_type = DMA_REMAP;
1164                 return dma_remap_fault_reasons[fault_reason];
1165         } else {
1166                 *fault_type = UNKNOWN;
1167                 return "Unknown";
1168         }
1169 }
1170
1171 void dmar_msi_unmask(unsigned int irq)
1172 {
1173         struct intel_iommu *iommu = get_irq_data(irq);
1174         unsigned long flag;
1175
1176         /* unmask it */
1177         spin_lock_irqsave(&iommu->register_lock, flag);
1178         writel(0, iommu->reg + DMAR_FECTL_REG);
1179         /* Read a reg to force flush the post write */
1180         readl(iommu->reg + DMAR_FECTL_REG);
1181         spin_unlock_irqrestore(&iommu->register_lock, flag);
1182 }
1183
1184 void dmar_msi_mask(unsigned int irq)
1185 {
1186         unsigned long flag;
1187         struct intel_iommu *iommu = get_irq_data(irq);
1188
1189         /* mask it */
1190         spin_lock_irqsave(&iommu->register_lock, flag);
1191         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1192         /* Read a reg to force flush the post write */
1193         readl(iommu->reg + DMAR_FECTL_REG);
1194         spin_unlock_irqrestore(&iommu->register_lock, flag);
1195 }
1196
1197 void dmar_msi_write(int irq, struct msi_msg *msg)
1198 {
1199         struct intel_iommu *iommu = get_irq_data(irq);
1200         unsigned long flag;
1201
1202         spin_lock_irqsave(&iommu->register_lock, flag);
1203         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1204         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1205         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1206         spin_unlock_irqrestore(&iommu->register_lock, flag);
1207 }
1208
1209 void dmar_msi_read(int irq, struct msi_msg *msg)
1210 {
1211         struct intel_iommu *iommu = get_irq_data(irq);
1212         unsigned long flag;
1213
1214         spin_lock_irqsave(&iommu->register_lock, flag);
1215         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1216         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1217         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1218         spin_unlock_irqrestore(&iommu->register_lock, flag);
1219 }
1220
1221 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1222                 u8 fault_reason, u16 source_id, unsigned long long addr)
1223 {
1224         const char *reason;
1225         int fault_type;
1226
1227         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1228
1229         if (fault_type == INTR_REMAP)
1230                 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1231                        "fault index %llx\n"
1232                         "INTR-REMAP:[fault reason %02d] %s\n",
1233                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1234                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1235                         fault_reason, reason);
1236         else
1237                 printk(KERN_ERR
1238                        "DMAR:[%s] Request device [%02x:%02x.%d] "
1239                        "fault addr %llx \n"
1240                        "DMAR:[fault reason %02d] %s\n",
1241                        (type ? "DMA Read" : "DMA Write"),
1242                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1243                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1244         return 0;
1245 }
1246
1247 #define PRIMARY_FAULT_REG_LEN (16)
1248 irqreturn_t dmar_fault(int irq, void *dev_id)
1249 {
1250         struct intel_iommu *iommu = dev_id;
1251         int reg, fault_index;
1252         u32 fault_status;
1253         unsigned long flag;
1254
1255         spin_lock_irqsave(&iommu->register_lock, flag);
1256         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1257         if (fault_status)
1258                 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1259                        fault_status);
1260
1261         /* TBD: ignore advanced fault log currently */
1262         if (!(fault_status & DMA_FSTS_PPF))
1263                 goto clear_rest;
1264
1265         fault_index = dma_fsts_fault_record_index(fault_status);
1266         reg = cap_fault_reg_offset(iommu->cap);
1267         while (1) {
1268                 u8 fault_reason;
1269                 u16 source_id;
1270                 u64 guest_addr;
1271                 int type;
1272                 u32 data;
1273
1274                 /* highest 32 bits */
1275                 data = readl(iommu->reg + reg +
1276                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1277                 if (!(data & DMA_FRCD_F))
1278                         break;
1279
1280                 fault_reason = dma_frcd_fault_reason(data);
1281                 type = dma_frcd_type(data);
1282
1283                 data = readl(iommu->reg + reg +
1284                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1285                 source_id = dma_frcd_source_id(data);
1286
1287                 guest_addr = dmar_readq(iommu->reg + reg +
1288                                 fault_index * PRIMARY_FAULT_REG_LEN);
1289                 guest_addr = dma_frcd_page_addr(guest_addr);
1290                 /* clear the fault */
1291                 writel(DMA_FRCD_F, iommu->reg + reg +
1292                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1293
1294                 spin_unlock_irqrestore(&iommu->register_lock, flag);
1295
1296                 dmar_fault_do_one(iommu, type, fault_reason,
1297                                 source_id, guest_addr);
1298
1299                 fault_index++;
1300                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1301                         fault_index = 0;
1302                 spin_lock_irqsave(&iommu->register_lock, flag);
1303         }
1304 clear_rest:
1305         /* clear all the other faults */
1306         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1307         writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1308
1309         spin_unlock_irqrestore(&iommu->register_lock, flag);
1310         return IRQ_HANDLED;
1311 }
1312
1313 int dmar_set_interrupt(struct intel_iommu *iommu)
1314 {
1315         int irq, ret;
1316
1317         /*
1318          * Check if the fault interrupt is already initialized.
1319          */
1320         if (iommu->irq)
1321                 return 0;
1322
1323         irq = create_irq();
1324         if (!irq) {
1325                 printk(KERN_ERR "IOMMU: no free vectors\n");
1326                 return -EINVAL;
1327         }
1328
1329         set_irq_data(irq, iommu);
1330         iommu->irq = irq;
1331
1332         ret = arch_setup_dmar_msi(irq);
1333         if (ret) {
1334                 set_irq_data(irq, NULL);
1335                 iommu->irq = 0;
1336                 destroy_irq(irq);
1337                 return ret;
1338         }
1339
1340         ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1341         if (ret)
1342                 printk(KERN_ERR "IOMMU: can't request irq\n");
1343         return ret;
1344 }
1345
1346 int __init enable_drhd_fault_handling(void)
1347 {
1348         struct dmar_drhd_unit *drhd;
1349
1350         /*
1351          * Enable fault control interrupt.
1352          */
1353         for_each_drhd_unit(drhd) {
1354                 int ret;
1355                 struct intel_iommu *iommu = drhd->iommu;
1356                 ret = dmar_set_interrupt(iommu);
1357
1358                 if (ret) {
1359                         printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1360                                " interrupt, ret %d\n",
1361                                (unsigned long long)drhd->reg_base_addr, ret);
1362                         return -1;
1363                 }
1364         }
1365
1366         return 0;
1367 }
1368
1369 /*
1370  * Re-enable Queued Invalidation interface.
1371  */
1372 int dmar_reenable_qi(struct intel_iommu *iommu)
1373 {
1374         if (!ecap_qis(iommu->ecap))
1375                 return -ENOENT;
1376
1377         if (!iommu->qi)
1378                 return -ENOENT;
1379
1380         /*
1381          * First disable queued invalidation.
1382          */
1383         dmar_disable_qi(iommu);
1384         /*
1385          * Then enable queued invalidation again. Since there is no pending
1386          * invalidation requests now, it's safe to re-enable queued
1387          * invalidation.
1388          */
1389         __dmar_enable_qi(iommu);
1390
1391         return 0;
1392 }
1393
1394 /*
1395  * Check interrupt remapping support in DMAR table description.
1396  */
1397 int dmar_ir_support(void)
1398 {
1399         struct acpi_table_dmar *dmar;
1400         dmar = (struct acpi_table_dmar *)dmar_tbl;
1401         return dmar->flags & 0x1;
1402 }