From: Joerg Roedel Date: Thu, 28 May 2009 16:23:56 +0000 (+0200) Subject: Merge branches 'amd-iommu/fixes', 'amd-iommu/debug', 'amd-iommu/suspend-resume' and... X-Git-Tag: firefly_0821_release~13675^2~13 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=83cce2b69eaa4bc7535f98f75b79397baf277470;p=firefly-linux-kernel-4.4.55.git Merge branches 'amd-iommu/fixes', 'amd-iommu/debug', 'amd-iommu/suspend-resume' and 'amd-iommu/extended-allocator' into amd-iommu/2.6.31 Conflicts: arch/x86/kernel/amd_iommu.c arch/x86/kernel/amd_iommu_init.c --- 83cce2b69eaa4bc7535f98f75b79397baf277470 diff --cc arch/x86/include/asm/amd_iommu_types.h index 95c8cd9d22b5,89dfb3793edd,cf5ef172cfca,4ff4cf1f0809..0c878caaa0a2 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@@@@ -194,7 -194,13 -194,15 -194,14 +194,28 @@@@@ #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops domain for an IOMMU */ + ++extern bool amd_iommu_dump; + ++#define DUMP_printk(format, arg...) \ + ++ do { \ + ++ if (amd_iommu_dump) \ + ++ printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ + ++ } while(0); + + ++ +/* ++ + * Make iterating over all IOMMUs easier ++ + */ ++ +#define for_each_iommu(iommu) \ ++ + list_for_each_entry((iommu), &amd_iommu_list, list) ++ +#define for_each_iommu_safe(iommu, next) \ ++ + list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) ++ +++ #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ +++ #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) +++ #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) +++ #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ +++ #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) +++ #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) ++ /* * This structure contains generic data for IOMMU protection domains * independent of their use. diff --cc arch/x86/kernel/amd_iommu.c index d6898833c363,33565990164a,92b0e1881e09,04ff5ec4ac0e..2c63d8748133 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@@@@ -55,12 -55,8 -55,8 -55,13 +55,17 @@@@@ struct iommu_cmd static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, struct unity_map_entry *e); static struct dma_ops_domain *find_protection_domain(u16 devid); - +++ static u64* alloc_pte(struct protection_domain *dom, +++ unsigned long address, u64 +++ **pte_page, gfp_t gfp); +++ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, +++ unsigned long start_page, +++ unsigned int pages); +++#ifndef BUS_NOTIFY_UNBOUND_DRIVER +++#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 +++#endif + #ifdef CONFIG_AMD_IOMMU_STATS /* @@@@@ -845,40 -841,40 -870,40 -976,16 +1009,16 @@@@@ static struct dma_ops_domain *dma_ops_d dma_dom->need_flush = false; dma_dom->target_dev = 0xffff; --- /* Intialize the exclusion range if necessary */ --- if (iommu->exclusion_start && --- iommu->exclusion_start < dma_dom->aperture_size) { --- unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; --- int pages = iommu_num_pages(iommu->exclusion_start, --- iommu->exclusion_length, --- PAGE_SIZE); --- dma_ops_reserve_addresses(dma_dom, startpage, pages); --- } +++ if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) +++ goto free_dma_dom; /* --- * At the last step, build the page tables so we don't need to --- * allocate page table pages in the dma_ops mapping/unmapping --- * path. +++ * mark the first page as allocated so we never return 0 as +++ * a valid dma-address. So we can use 0 as error value */ --- num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); --- dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *), --- GFP_KERNEL); --- if (!dma_dom->pte_pages) -- goto free_dma_dom; -- -- l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); -- if (l2_pde == NULL) --- goto free_dma_dom; +++ dma_dom->aperture[0]->bitmap[0] = 1; +++ dma_dom->next_address = 0; - l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); - if (l2_pde == NULL) - goto free_dma_dom; - --- dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); --- --- for (i = 0; i < num_pte_pages; ++i) { --- dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL); --- if (!dma_dom->pte_pages[i]) --- goto free_dma_dom; --- address = virt_to_phys(dma_dom->pte_pages[i]); --- l2_pde[i] = IOMMU_L1_PDE(address); --- } return dma_dom; @@@@@ -1013,10 -1009,11 -1038,10 -1119,10 +1152,11 @@@@@ static int device_change_notifier(struc if (!dma_domain) dma_domain = iommu->default_dom; attach_device(iommu, &dma_domain->domain, devid); - -- printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " - -- "device %s\n", dma_domain->domain.id, dev_name(dev)); + ++ DUMP_printk(KERN_INFO "AMD IOMMU: Using protection domain " + ++ "%d for device %s\n", + ++ dma_domain->domain.id, dev_name(dev)); break; --- case BUS_NOTIFY_UNBIND_DRIVER: +++ case BUS_NOTIFY_UNBOUND_DRIVER: if (!domain) goto out; detach_device(domain, devid); @@@@@ -1676,8 -1674,8 -1701,8 -1877,8 +1912,8 @@@@@ int __init amd_iommu_init_dma_ops(void * found in the system. Devices not assigned to any other * protection domain will be assigned to the default one. */ -- - list_for_each_entry(iommu, &amd_iommu_list, list) { -- iommu->default_dom = dma_ops_domain_alloc(iommu, order); ++ + for_each_iommu(iommu) { - iommu->default_dom = dma_ops_domain_alloc(iommu, order); +++ iommu->default_dom = dma_ops_domain_alloc(iommu); if (iommu->default_dom == NULL) return -ENOMEM; iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; diff --cc arch/x86/kernel/amd_iommu_init.c index a3a2b98bb39e,66941129e9c7,4ca8fbfb68dc,762a4eefec93..238989ec077d --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@@@@ -121,9 -123,15 -121,9 -121,8 +123,13 @@@@@ u16 amd_iommu_last_bdf; /* largest PC to handle */ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ --- unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ - + ++#ifdef CONFIG_IOMMU_STRESS + ++bool amd_iommu_isolate = false; + ++#else bool amd_iommu_isolate = true; /* if true, device isolation is enabled */ + ++#endif + ++ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the @@@@@ -616,9 -652,20 -632,9 -615,9 +666,20 @@@@@ static void __init init_iommu_from_acpi alias = false; break; case IVHD_DEV_ALIAS: + ++ + ++ DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " + ++ "flags: %02x devid_to: %02x:%02x.%x\n", + ++ PCI_BUS(e->devid), + ++ PCI_SLOT(e->devid), + ++ PCI_FUNC(e->devid), + ++ e->flags, + ++ PCI_BUS(e->ext >> 8), + ++ PCI_SLOT(e->ext >> 8), + ++ PCI_FUNC(e->ext >> 8)); + ++ devid = e->devid; devid_to = e->ext >> 8; --- set_dev_entry_from_acpi(iommu, devid, e->flags, 0); +++ set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); amd_iommu_alias_table[devid] = devid_to; break; case IVHD_DEV_ALIAS_RANGE: @@@@@ -906,9 -997,8 -875,7 -905,7 +964,10 @@@@@ static int __init init_unity_map_range( switch (m->type) { default: +++ kfree(e); +++ return 0; case ACPI_IVMD_TYPE: + ++ s = "IVMD_TYPEi\t\t\t"; e->devid_start = e->devid_end = m->devid; break; case ACPI_IVMD_TYPE_ALL: @@@@@ -1227,15 -1333,16 -1223,15 -1221,4 +1328,5 @@@@@ static int __init parse_amd_iommu_optio return 1; } --- static int __init parse_amd_iommu_size_options(char *str) --- { --- unsigned order = PAGE_SHIFT + get_order(memparse(str, &str)); --- --- if ((order > 24) && (order < 31)) --- amd_iommu_aperture_order = order; --- --- return 1; --- } --- + ++__setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu=", parse_amd_iommu_options); --- __setup("amd_iommu_size=", parse_amd_iommu_size_options);