From: Simon Date: Fri, 16 Dec 2016 03:48:06 +0000 (+0800) Subject: iommu/rockchip: Add pd/clk operation in iommu X-Git-Tag: firefly_0821_release~933 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=943ec345649d4752ee00de11d4e0ae0f07d07ddc;p=firefly-linux-kernel-4.4.55.git iommu/rockchip: Add pd/clk operation in iommu Rk iommus share pd and clk with their masters, to make iommus independent, iommus need to manage pd and clk by using pm_runtime_get_sync API who is not atomic save, might lead to sleep, we change the spin lock to mutex to satisfy the pm_runtime_get_sync, callers of rk_iommu_attach_device and rk_iommu_map should guarantee not in a atomic path. Change-Id: Icbe175030d36572e19740d23eae94f49fe59eb10 Signed-off-by: Simon --- diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index a681df695f5b..ad914dd17443 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -4,6 +4,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -19,6 +20,7 @@ #include #include #include +#include #include #include @@ -79,8 +81,8 @@ struct rk_iommu_domain { struct platform_device *pdev; u32 *dt; /* page directory table */ dma_addr_t dt_dma; - spinlock_t iommus_lock; /* lock for iommus list */ - spinlock_t dt_lock; /* lock for modifying page directory table */ + struct mutex iommus_lock; /* lock for iommus list */ + struct mutex dt_lock; /* lock for modifying page directory table */ struct iommu_domain domain; }; @@ -93,6 +95,8 @@ struct rk_iommu { bool reset_disabled; /* isp iommu reset operation would failed */ struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ + struct clk *aclk; /* aclock belong to master */ + struct clk *hclk; /* hclock belong to master */ }; static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, @@ -256,6 +260,26 @@ static u32 rk_mk_pte_invalid(u32 pte) #define RK_IOVA_PAGE_MASK 0x00000fff #define RK_IOVA_PAGE_SHIFT 0 +static void rk_iommu_power_on(struct rk_iommu *iommu) +{ + if (iommu->aclk && iommu->hclk) { + clk_enable(iommu->aclk); + clk_enable(iommu->hclk); + } + + pm_runtime_get_sync(iommu->dev); +} + +static void rk_iommu_power_off(struct rk_iommu *iommu) +{ + pm_runtime_put_sync(iommu->dev); + + if (iommu->aclk && iommu->hclk) { + clk_disable(iommu->aclk); + clk_disable(iommu->hclk); + } +} + static u32 rk_iova_dte_index(dma_addr_t iova) { return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; @@ -302,12 +326,17 @@ static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, * TODO(djkurtz): Figure out when it is more efficient to shootdown the * entire iotlb rather than iterate over individual iovas. */ + + rk_iommu_power_on(iommu); + for (i = 0; i < iommu->num_mmu; i++) { dma_addr_t iova; for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); } + + rk_iommu_power_off(iommu); } static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) @@ -556,12 +585,11 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; phys_addr_t pt_phys, phys = 0; u32 dte, pte; u32 *page_table; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); dte = rk_domain->dt[rk_iova_dte_index(iova)]; if (!rk_dte_is_pt_valid(dte)) @@ -575,7 +603,7 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); out: - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return phys; } @@ -584,16 +612,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size) { struct list_head *pos; - unsigned long flags; /* shootdown these iova from all iommus using this domain */ - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_for_each(pos, &rk_domain->iommus) { struct rk_iommu *iommu; iommu = list_entry(pos, struct rk_iommu, node); rk_iommu_zap_lines(iommu, iova, size); } - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); } static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, @@ -614,7 +641,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, phys_addr_t pt_phys; dma_addr_t pt_dma; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); dte_index = rk_iova_dte_index(iova); dte_addr = &rk_domain->dt[dte_index]; @@ -651,7 +678,7 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, unsigned int pte_count; unsigned int pte_total = size / SPAGE_SIZE; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); for (pte_count = 0; pte_count < pte_total; pte_count++) { u32 pte = pte_addr[pte_count]; @@ -674,7 +701,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, unsigned int pte_total = size / SPAGE_SIZE; phys_addr_t page_phys; - assert_spin_locked(&rk_domain->dt_lock); + WARN_ON(!mutex_is_locked(&rk_domain->dt_lock)); for (pte_count = 0; pte_count < pte_total; pte_count++) { u32 pte = pte_addr[pte_count]; @@ -715,13 +742,12 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, phys_addr_t paddr, size_t size, int prot) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; dma_addr_t pte_dma, iova = (dma_addr_t)_iova; u32 *page_table, *pte_addr; u32 dte_index, pte_index; int ret; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); /* * pgsize_bitmap specifies iova sizes that fit in one page table @@ -732,7 +758,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, */ page_table = rk_dte_get_page_table(rk_domain, iova); if (IS_ERR(page_table)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return PTR_ERR(page_table); } @@ -743,7 +769,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot); - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return ret; } @@ -752,14 +778,13 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, size_t size) { struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; dma_addr_t pte_dma, iova = (dma_addr_t)_iova; phys_addr_t pt_phys; u32 dte; u32 *pte_addr; size_t unmap_size; - spin_lock_irqsave(&rk_domain->dt_lock, flags); + mutex_lock(&rk_domain->dt_lock); /* * pgsize_bitmap specifies iova sizes that fit in one page table @@ -771,7 +796,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, dte = rk_domain->dt[rk_iova_dte_index(iova)]; /* Just return 0 if iova is unmapped */ if (!rk_dte_is_pt_valid(dte)) { - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); return 0; } @@ -780,7 +805,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); - spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + mutex_unlock(&rk_domain->dt_lock); /* Shootdown iotlb entries for iova range that was just unmapped */ rk_iommu_zap_iova(rk_domain, iova, unmap_size); @@ -814,7 +839,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, { struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int ret, i; /* @@ -825,6 +849,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (!iommu) return 0; + rk_iommu_power_on(iommu); + ret = rk_iommu_enable_stall(iommu); if (ret) return ret; @@ -851,9 +877,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, if (ret) return ret; - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_add_tail(&iommu->node, &rk_domain->iommus); - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); dev_dbg(dev, "Attached to iommu domain\n"); @@ -867,7 +893,6 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, { struct rk_iommu *iommu; struct rk_iommu_domain *rk_domain = to_rk_domain(domain); - unsigned long flags; int i; /* Allow 'virtual devices' (eg drm) to detach from domain */ @@ -875,9 +900,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, if (!iommu) return; - spin_lock_irqsave(&rk_domain->iommus_lock, flags); + mutex_lock(&rk_domain->iommus_lock); list_del_init(&iommu->node); - spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + mutex_unlock(&rk_domain->iommus_lock); /* Ignore error while disabling, just keep going */ rk_iommu_enable_stall(iommu); @@ -892,6 +917,8 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, iommu->domain = NULL; + rk_iommu_power_off(iommu); + dev_dbg(dev, "Detached from iommu domain\n"); } @@ -941,8 +968,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); - spin_lock_init(&rk_domain->iommus_lock); - spin_lock_init(&rk_domain->dt_lock); + mutex_init(&rk_domain->iommus_lock); + mutex_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); rk_domain->domain.geometry.aperture_start = 0; @@ -1165,11 +1192,32 @@ static int rk_iommu_probe(struct platform_device *pdev) iommu->reset_disabled = device_property_read_bool(dev, "rk_iommu,disable_reset_quirk"); + iommu->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(iommu->aclk)) { + dev_info(dev, "can't get aclk\n"); + iommu->aclk = NULL; + } + + iommu->hclk = devm_clk_get(dev, "hclk"); + if (IS_ERR(iommu->hclk)) { + dev_info(dev, "can't get hclk\n"); + iommu->hclk = NULL; + } + + if (iommu->aclk && iommu->hclk) { + clk_prepare(iommu->aclk); + clk_prepare(iommu->hclk); + } + + pm_runtime_enable(dev); + return 0; } static int rk_iommu_remove(struct platform_device *pdev) { + pm_runtime_put(&pdev->dev); + return 0; }