iommu/vt-d: Only insert alias dev_info if there is an alias
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
index 62c27eff549d062ecc678ff7517040636be4a08b..6e61b3eb47e88d84c0970d3e4daaf7bc5565c483 100644 (file)
@@ -406,7 +406,6 @@ struct dmar_domain {
        int             iommu_superpage;/* Level of superpages supported:
                                           0 == 4KiB (no superpages), 1 == 2MiB,
                                           2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-       spinlock_t      iommu_lock;     /* protect iommu set in domain */
        u64             max_addr;       /* maximum mapped address */
 
        struct iommu_domain domain;     /* generic domain data structure for
@@ -474,6 +473,7 @@ static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
                                     struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
 static void domain_context_clear(struct intel_iommu *iommu,
                                 struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
@@ -1404,24 +1404,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
                         u8 bus, u8 devfn)
 {
        bool found = false;
-       unsigned long flags;
        struct device_domain_info *info;
        struct pci_dev *pdev;
 
+       assert_spin_locked(&device_domain_lock);
+
        if (!ecap_dev_iotlb_support(iommu->ecap))
                return NULL;
 
        if (!iommu->qi)
                return NULL;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry(info, &domain->devices, link)
                if (info->iommu == iommu && info->bus == bus &&
                    info->devfn == devfn) {
                        found = true;
                        break;
                }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (!found || !info->dev || !dev_is_pci(info->dev))
                return NULL;
@@ -1616,10 +1615,12 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 static void disable_dmar_iommu(struct intel_iommu *iommu)
 {
        struct device_domain_info *info, *tmp;
+       unsigned long flags;
 
        if (!iommu->domains || !iommu->domain_ids)
                return;
 
+       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
                struct dmar_domain *domain;
 
@@ -1636,6 +1637,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
                if (!domain_type_is_vm_or_si(domain))
                        domain_exit(domain);
        }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (iommu->gcmd & DMA_GCMD_TE)
                iommu_disable_translation(iommu);
@@ -1672,100 +1674,64 @@ static struct dmar_domain *alloc_domain(int flags)
        memset(domain, 0, sizeof(*domain));
        domain->nid = -1;
        domain->flags = flags;
-       spin_lock_init(&domain->iommu_lock);
        INIT_LIST_HEAD(&domain->devices);
 
        return domain;
 }
 
-static int __iommu_attach_domain(struct dmar_domain *domain,
-                                struct intel_iommu *iommu)
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
+                              struct intel_iommu *iommu)
 {
-       int num;
        unsigned long ndomains;
+       int num;
 
-       num = domain->iommu_did[iommu->seq_id];
-       if (num)
-               return num;
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
 
-       ndomains = cap_ndoms(iommu->cap);
-       num      = find_first_zero_bit(iommu->domain_ids, ndomains);
+       domain->iommu_refcnt[iommu->seq_id] += 1;
+       domain->iommu_count += 1;
+       if (domain->iommu_refcnt[iommu->seq_id] == 1) {
+               ndomains = cap_ndoms(iommu->cap);
+               num      = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+               if (num >= ndomains) {
+                       pr_err("%s: No free domain ids\n", iommu->name);
+                       domain->iommu_refcnt[iommu->seq_id] -= 1;
+                       domain->iommu_count -= 1;
+                       return -ENOSPC;
+               }
 
-       if (num < ndomains) {
                set_bit(num, iommu->domain_ids);
                set_iommu_domain(iommu, num, domain);
-               domain->iommu_did[iommu->seq_id] = num;
-       } else {
-               num = -ENOSPC;
-       }
-
-       if (num < 0)
-               pr_err("%s: No free domain ids\n", iommu->name);
-
-       return num;
-}
 
-static int iommu_attach_domain(struct dmar_domain *domain,
-                              struct intel_iommu *iommu)
-{
-       int num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       num = __iommu_attach_domain(domain, iommu);
-       spin_unlock_irqrestore(&iommu->lock, flags);
-
-       return num;
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
-                               struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       int num;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-
-       num = domain->iommu_did[iommu->seq_id];
-
-       if (num == 0)
-               return;
-
-       clear_bit(num, iommu->domain_ids);
-       set_iommu_domain(iommu, num, NULL);
-
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void domain_attach_iommu(struct dmar_domain *domain,
-                              struct intel_iommu *iommu)
-{
-       unsigned long flags;
+               domain->iommu_did[iommu->seq_id] = num;
+               domain->nid                      = iommu->node;
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       domain->iommu_refcnt[iommu->seq_id] += 1;
-       domain->iommu_count += 1;
-       if (domain->iommu_refcnt[iommu->seq_id] == 1) {
-               domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+       return 0;
 }
 
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu)
 {
-       unsigned long flags;
-       int count = INT_MAX;
+       int num, count = INT_MAX;
+
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
        domain->iommu_refcnt[iommu->seq_id] -= 1;
        count = --domain->iommu_count;
        if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+               num = domain->iommu_did[iommu->seq_id];
+               clear_bit(num, iommu->domain_ids);
+               set_iommu_domain(iommu, num, NULL);
+
                domain_update_iommu_cap(domain);
                domain->iommu_did[iommu->seq_id] = 0;
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
        return count;
 }
@@ -1832,9 +1798,9 @@ static inline int guestwidth_to_adjustwidth(int gaw)
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+                      int guest_width)
 {
-       struct intel_iommu *iommu;
        int adjust_width, agaw;
        unsigned long sagaw;
 
@@ -1843,7 +1809,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain_reserve_special_ranges(domain);
 
        /* calculate AGAW */
-       iommu = domain_get_iommu(domain);
        if (guest_width > cap_mgaw(iommu->cap))
                guest_width = cap_mgaw(iommu->cap);
        domain->gaw = guest_width;
@@ -1887,7 +1852,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 static void domain_exit(struct dmar_domain *domain)
 {
        struct page *freelist = NULL;
-       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1897,20 +1861,16 @@ static void domain_exit(struct dmar_domain *domain)
        if (!intel_iommu_strict)
                flush_unmaps_timeout(0);
 
-       /* remove associated devices */
+       /* Remove associated devices and clear attached or cached domains */
+       rcu_read_lock();
        domain_remove_dev_info(domain);
+       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
 
        freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
 
-       /* clear attached or cached domains */
-       rcu_read_lock();
-       for_each_domain_iommu(i, domain)
-               iommu_detach_domain(domain, g_iommus[i]);
-       rcu_read_unlock();
-
        dma_free_pagelist(freelist);
 
        free_domain_mem(domain);
@@ -1920,13 +1880,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
                                      u8 bus, u8 devfn)
 {
+       u16 did = domain->iommu_did[iommu->seq_id];
        int translation = CONTEXT_TT_MULTI_LEVEL;
        struct device_domain_info *info = NULL;
        struct context_entry *context;
        unsigned long flags;
        struct dma_pte *pgd;
-       int id;
-       int agaw;
+       int ret, agaw;
+
+       WARN_ON(did == 0);
 
        if (hw_pass_through && domain_type_is_si(domain))
                translation = CONTEXT_TT_PASS_THROUGH;
@@ -1936,28 +1898,22 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 
        BUG_ON(!domain->pgd);
 
-       spin_lock_irqsave(&iommu->lock, flags);
+       spin_lock_irqsave(&device_domain_lock, flags);
+       spin_lock(&iommu->lock);
+
+       ret = -ENOMEM;
        context = iommu_context_addr(iommu, bus, devfn, 1);
-       spin_unlock_irqrestore(&iommu->lock, flags);
        if (!context)
-               return -ENOMEM;
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (context_present(context)) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               return 0;
-       }
+               goto out_unlock;
 
-       pgd = domain->pgd;
+       ret = 0;
+       if (context_present(context))
+               goto out_unlock;
 
-       id = __iommu_attach_domain(domain, iommu);
-       if (id < 0) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               pr_err("%s: No free domain ids\n", iommu->name);
-               return -EFAULT;
-       }
+       pgd = domain->pgd;
 
        context_clear_entry(context);
-       context_set_domain_id(context, id);
+       context_set_domain_id(context, did);
 
        /*
         * Skip top levels of page tables for iommu which has less agaw
@@ -1965,11 +1921,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
         */
        if (translation != CONTEXT_TT_PASS_THROUGH) {
                for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       ret = -ENOMEM;
                        pgd = phys_to_virt(dma_pte_addr(pgd));
-                       if (!dma_pte_present(pgd)) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               return -ENOMEM;
-                       }
+                       if (!dma_pte_present(pgd))
+                               goto out_unlock;
                }
 
                info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
@@ -2003,14 +1958,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                                           (((u16)bus) << 8) | devfn,
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
        } else {
                iommu_flush_write_buffer(iommu);
        }
        iommu_enable_dev_iotlb(info);
-       spin_unlock_irqrestore(&iommu->lock, flags);
 
-       domain_attach_iommu(domain, iommu);
+       ret = 0;
+
+out_unlock:
+       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return 0;
 }
@@ -2253,9 +2211,12 @@ static inline void unlink_domain_info(struct device_domain_info *info)
 static void domain_remove_dev_info(struct dmar_domain *domain)
 {
        struct device_domain_info *info, *tmp;
+       unsigned long flags;
 
+       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry_safe(info, tmp, &domain->devices, link)
-               dmar_remove_one_dev_info(domain, info->dev);
+               __dmar_remove_one_dev_info(info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 /*
@@ -2294,6 +2255,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        struct dmar_domain *found = NULL;
        struct device_domain_info *info;
        unsigned long flags;
+       int ret;
 
        info = alloc_devinfo_mem();
        if (!info)
@@ -2321,6 +2283,15 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                return found;
        }
 
+       spin_lock(&iommu->lock);
+       ret = domain_attach_iommu(domain, iommu);
+       spin_unlock(&iommu->lock);
+
+       if (ret) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               return NULL;
+       }
+
        list_add(&info->link, &domain->devices);
        list_add(&info->global, &device_domain_list);
        if (dev)
@@ -2348,8 +2319,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        struct device_domain_info *info = NULL;
        struct dmar_domain *domain, *tmp;
        struct intel_iommu *iommu;
+       u16 req_id, dma_alias;
        unsigned long flags;
-       u16 dma_alias;
        u8 bus, devfn;
 
        domain = find_domain(dev);
@@ -2360,6 +2331,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        if (!iommu)
                return NULL;
 
+       req_id = ((u16)bus << 8) | devfn;
+
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
@@ -2384,18 +2357,13 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        domain = alloc_domain(0);
        if (!domain)
                return NULL;
-       if (iommu_attach_domain(domain, iommu) < 0) {
-               free_domain_mem(domain);
-               return NULL;
-       }
-       domain_attach_iommu(domain, iommu);
-       if (domain_init(domain, gaw)) {
+       if (domain_init(domain, iommu, gaw)) {
                domain_exit(domain);
                return NULL;
        }
 
        /* register PCI DMA alias device */
-       if (dev_is_pci(dev)) {
+       if (req_id != dma_alias && dev_is_pci(dev)) {
                tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
                                               dma_alias & 0xff, NULL, domain);
 
@@ -4306,11 +4274,9 @@ static int device_notifier(struct notifier_block *nb,
        if (!domain)
                return 0;
 
-       down_read(&dmar_global_lock);
        dmar_remove_one_dev_info(domain, dev);
        if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
                domain_exit(domain);
-       up_read(&dmar_global_lock);
 
        return 0;
 }
@@ -4573,36 +4539,42 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
        pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
 }
 
-static void dmar_remove_one_dev_info(struct dmar_domain *domain,
-                                    struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
-       struct device_domain_info *info;
        struct intel_iommu *iommu;
        unsigned long flags;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return;
-
-       info = dev->archdata.iommu;
+       assert_spin_locked(&device_domain_lock);
 
        if (WARN_ON(!info))
                return;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
+       iommu = info->iommu;
+
+       if (info->dev) {
+               iommu_disable_dev_iotlb(info);
+               domain_context_clear(iommu, info->dev);
+       }
+
        unlink_domain_info(info);
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
-       iommu_disable_dev_iotlb(info);
-       domain_context_clear(iommu, dev);
+       spin_lock_irqsave(&iommu->lock, flags);
+       domain_detach_iommu(info->domain, iommu);
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
        free_devinfo_mem(info);
-       domain_detach_iommu(domain, iommu);
+}
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (!domain->iommu_refcnt[iommu->seq_id])
-               iommu_detach_domain(domain, iommu);
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+                                    struct device *dev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       __dmar_remove_one_dev_info(info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4683,10 +4655,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
                old_domain = find_domain(dev);
                if (old_domain) {
-                       if (domain_type_is_vm_or_si(dmar_domain))
-                               dmar_remove_one_dev_info(old_domain, dev);
-                       else
-                               domain_remove_dev_info(old_domain);
+                       rcu_read_lock();
+                       dmar_remove_one_dev_info(old_domain, dev);
+                       rcu_read_unlock();
 
                        if (!domain_type_is_vm_or_si(old_domain) &&
                             list_empty(&old_domain->devices))