* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct platform_device *pdev;
u32 *dt; /* page directory table */
dma_addr_t dt_dma;
- spinlock_t iommus_lock; /* lock for iommus list */
- spinlock_t dt_lock; /* lock for modifying page directory table */
+ struct mutex iommus_lock; /* lock for iommus list */
+ struct mutex dt_lock; /* lock for modifying page directory table */
struct iommu_domain domain;
};
bool reset_disabled; /* isp iommu reset operation would failed */
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
+ struct clk *aclk; /* aclock belong to master */
+ struct clk *hclk; /* hclock belong to master */
};
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
#define RK_IOVA_PAGE_MASK 0x00000fff
#define RK_IOVA_PAGE_SHIFT 0
+static void rk_iommu_power_on(struct rk_iommu *iommu)
+{
+ if (iommu->aclk && iommu->hclk) {
+ clk_enable(iommu->aclk);
+ clk_enable(iommu->hclk);
+ }
+
+ pm_runtime_get_sync(iommu->dev);
+}
+
+static void rk_iommu_power_off(struct rk_iommu *iommu)
+{
+ pm_runtime_put_sync(iommu->dev);
+
+ if (iommu->aclk && iommu->hclk) {
+ clk_disable(iommu->aclk);
+ clk_disable(iommu->hclk);
+ }
+}
+
static u32 rk_iova_dte_index(dma_addr_t iova)
{
return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
+
+ rk_iommu_power_on(iommu);
+
for (i = 0; i < iommu->num_mmu; i++) {
dma_addr_t iova;
for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
}
+
+ rk_iommu_power_off(iommu);
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
dma_addr_t iova)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
phys_addr_t pt_phys, phys = 0;
u32 dte, pte;
u32 *page_table;
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
+ mutex_lock(&rk_domain->dt_lock);
dte = rk_domain->dt[rk_iova_dte_index(iova)];
if (!rk_dte_is_pt_valid(dte))
phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
out:
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ mutex_unlock(&rk_domain->dt_lock);
return phys;
}
dma_addr_t iova, size_t size)
{
struct list_head *pos;
- unsigned long flags;
/* shootdown these iova from all iommus using this domain */
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ mutex_lock(&rk_domain->iommus_lock);
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
iommu = list_entry(pos, struct rk_iommu, node);
rk_iommu_zap_lines(iommu, iova, size);
}
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+ mutex_unlock(&rk_domain->iommus_lock);
}
static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
phys_addr_t pt_phys;
dma_addr_t pt_dma;
- assert_spin_locked(&rk_domain->dt_lock);
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
dte_index = rk_iova_dte_index(iova);
dte_addr = &rk_domain->dt[dte_index];
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
- assert_spin_locked(&rk_domain->dt_lock);
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
for (pte_count = 0; pte_count < pte_total; pte_count++) {
u32 pte = pte_addr[pte_count];
unsigned int pte_total = size / SPAGE_SIZE;
phys_addr_t page_phys;
- assert_spin_locked(&rk_domain->dt_lock);
+ WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
for (pte_count = 0; pte_count < pte_total; pte_count++) {
u32 pte = pte_addr[pte_count];
phys_addr_t paddr, size_t size, int prot)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
u32 dte_index, pte_index;
int ret;
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
+ mutex_lock(&rk_domain->dt_lock);
/*
* pgsize_bitmap specifies iova sizes that fit in one page table
*/
page_table = rk_dte_get_page_table(rk_domain, iova);
if (IS_ERR(page_table)) {
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ mutex_unlock(&rk_domain->dt_lock);
return PTR_ERR(page_table);
}
ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
paddr, size, prot);
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ mutex_unlock(&rk_domain->dt_lock);
return ret;
}
size_t size)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
u32 dte;
u32 *pte_addr;
size_t unmap_size;
- spin_lock_irqsave(&rk_domain->dt_lock, flags);
+ mutex_lock(&rk_domain->dt_lock);
/*
* pgsize_bitmap specifies iova sizes that fit in one page table
dte = rk_domain->dt[rk_iova_dte_index(iova)];
/* Just return 0 if iova is unmapped */
if (!rk_dte_is_pt_valid(dte)) {
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ mutex_unlock(&rk_domain->dt_lock);
return 0;
}
pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
- spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ mutex_unlock(&rk_domain->dt_lock);
/* Shootdown iotlb entries for iova range that was just unmapped */
rk_iommu_zap_iova(rk_domain, iova, unmap_size);
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
int ret, i;
/*
if (!iommu)
return 0;
+ rk_iommu_power_on(iommu);
+
ret = rk_iommu_enable_stall(iommu);
if (ret)
return ret;
if (ret)
return ret;
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ mutex_lock(&rk_domain->iommus_lock);
list_add_tail(&iommu->node, &rk_domain->iommus);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+ mutex_unlock(&rk_domain->iommus_lock);
dev_dbg(dev, "Attached to iommu domain\n");
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
int i;
/* Allow 'virtual devices' (eg drm) to detach from domain */
if (!iommu)
return;
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ mutex_lock(&rk_domain->iommus_lock);
list_del_init(&iommu->node);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+ mutex_unlock(&rk_domain->iommus_lock);
/* Ignore error while disabling, just keep going */
rk_iommu_enable_stall(iommu);
iommu->domain = NULL;
+ rk_iommu_power_off(iommu);
+
dev_dbg(dev, "Detached from iommu domain\n");
}
rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
- spin_lock_init(&rk_domain->iommus_lock);
- spin_lock_init(&rk_domain->dt_lock);
+ mutex_init(&rk_domain->iommus_lock);
+ mutex_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
rk_domain->domain.geometry.aperture_start = 0;
iommu->reset_disabled = device_property_read_bool(dev,
"rk_iommu,disable_reset_quirk");
+ iommu->aclk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(iommu->aclk)) {
+ dev_info(dev, "can't get aclk\n");
+ iommu->aclk = NULL;
+ }
+
+ iommu->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(iommu->hclk)) {
+ dev_info(dev, "can't get hclk\n");
+ iommu->hclk = NULL;
+ }
+
+ if (iommu->aclk && iommu->hclk) {
+ clk_prepare(iommu->aclk);
+ clk_prepare(iommu->hclk);
+ }
+
+ pm_runtime_enable(dev);
+
return 0;
}
static int rk_iommu_remove(struct platform_device *pdev)
{
+ pm_runtime_put(&pdev->dev);
+
return 0;
}