iommu/tegra-gart: Make use of domain_alloc and domain_free
authorJoerg Roedel <jroedel@suse.de>
Thu, 26 Mar 2015 12:43:13 +0000 (13:43 +0100)
committerJoerg Roedel <jroedel@suse.de>
Tue, 31 Mar 2015 13:32:12 +0000 (15:32 +0200)
Implement domain_alloc and domain_free iommu-ops as a
replacement for domain_init/domain_destroy.

Tested-by: Thierry Reding <treding@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/tegra-gart.c

index c48da057dbb1e5b6be38e6b28daced1046454687..fc588a1ffeef5a84f6c07ba4dc845b1a55d9b188 100644 (file)
@@ -63,11 +63,21 @@ struct gart_device {
        struct device           *dev;
 };
 
+struct gart_domain {
+       struct iommu_domain domain;             /* generic domain handle */
+       struct gart_device *gart;               /* link to gart device   */
+};
+
 static struct gart_device *gart_handle; /* unique for a system */
 
 #define GART_PTE(_pfn)                                         \
        (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
 
+static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct gart_domain, domain);
+}
+
 /*
  * Any interaction between any block on PPSB and a block on APB or AHB
  * must have these read-back to ensure the APB/AHB bus transaction is
@@ -156,6 +166,7 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
 static int gart_iommu_attach_dev(struct iommu_domain *domain,
                                 struct device *dev)
 {
+       struct gart_domain *gart_domain = to_gart_domain(domain);
        struct gart_device *gart;
        struct gart_client *client, *c;
        int err = 0;
@@ -163,7 +174,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
        gart = gart_handle;
        if (!gart)
                return -EINVAL;
-       domain->priv = gart;
+       gart_domain->gart = gart;
 
        domain->geometry.aperture_start = gart->iovmm_base;
        domain->geometry.aperture_end   = gart->iovmm_base +
@@ -198,7 +209,8 @@ fail:
 static void gart_iommu_detach_dev(struct iommu_domain *domain,
                                  struct device *dev)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        struct gart_client *c;
 
        spin_lock(&gart->client_lock);
@@ -216,33 +228,44 @@ out:
        spin_unlock(&gart->client_lock);
 }
 
-static int gart_iommu_domain_init(struct iommu_domain *domain)
+static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
 {
-       return 0;
+       struct gart_domain *gart_domain;
+
+       if (type != IOMMU_DOMAIN_UNMANAGED)
+               return NULL;
+
+       gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
+       if (!gart_domain)
+               return NULL;
+
+       return &gart_domain->domain;
 }
 
-static void gart_iommu_domain_destroy(struct iommu_domain *domain)
+static void gart_iommu_domain_free(struct iommu_domain *domain)
 {
-       struct gart_device *gart = domain->priv;
-
-       if (!gart)
-               return;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
 
-       spin_lock(&gart->client_lock);
-       if (!list_empty(&gart->client)) {
-               struct gart_client *c;
+       if (gart) {
+               spin_lock(&gart->client_lock);
+               if (!list_empty(&gart->client)) {
+                       struct gart_client *c;
 
-               list_for_each_entry(c, &gart->client, list)
-                       gart_iommu_detach_dev(domain, c->dev);
+                       list_for_each_entry(c, &gart->client, list)
+                               gart_iommu_detach_dev(domain, c->dev);
+               }
+               spin_unlock(&gart->client_lock);
        }
-       spin_unlock(&gart->client_lock);
-       domain->priv = NULL;
+
+       kfree(gart_domain);
 }
 
 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t pa, size_t bytes, int prot)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long flags;
        unsigned long pfn;
 
@@ -265,7 +288,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
                               size_t bytes)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long flags;
 
        if (!gart_iova_range_valid(gart, iova, bytes))
@@ -281,7 +305,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
-       struct gart_device *gart = domain->priv;
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
        unsigned long pte;
        phys_addr_t pa;
        unsigned long flags;
@@ -310,8 +335,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
 
 static const struct iommu_ops gart_iommu_ops = {
        .capable        = gart_iommu_capable,
-       .domain_init    = gart_iommu_domain_init,
-       .domain_destroy = gart_iommu_domain_destroy,
+       .domain_alloc   = gart_iommu_domain_alloc,
+       .domain_free    = gart_iommu_domain_free,
        .attach_dev     = gart_iommu_attach_dev,
        .detach_dev     = gart_iommu_detach_dev,
        .map            = gart_iommu_map,