From: Simon Date: Wed, 5 Nov 2014 11:22:39 +0000 (+0800) Subject: rockchip: iommu: fix some bugs X-Git-Tag: firefly_0821_release~4496 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=7e8de64476f8ff563d7cf596616aa827694d80e4;p=firefly-linux-kernel-4.4.55.git rockchip: iommu: fix some bugs 1.rewrite irq function 2.rewrite dump_pgtable --- diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4386d0e88bc8..c9edbcf0d45f 100755 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -49,35 +49,30 @@ enum iommu_entry_flags { IOMMU_FLAGS_MASK = 0x1FF, }; -#define lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0) -#define lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1) -#define lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0) -#define spage_phys(pent) (*(pent) & SPAGE_MASK) -#define spage_offs(iova) ((iova) & 0x0FFF) +#define rockchip_lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0) +#define rockchip_lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1) +#define rockchip_lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0) +#define rockchip_spage_phys(pent) (*(pent) & SPAGE_MASK) +#define rockchip_spage_offs(iova) ((iova) & 0x0FFF) -#define lv1ent_offset(iova) (((iova)>>22) & 0x03FF) -#define lv2ent_offset(iova) (((iova)>>12) & 0x03FF) +#define rockchip_lv1ent_offset(iova) (((iova)>>22) & 0x03FF) +#define rockchip_lv2ent_offset(iova) (((iova)>>12) & 0x03FF) #define NUM_LV1ENTRIES 1024 #define NUM_LV2ENTRIES 1024 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) -#define lv2table_base(sent) (*(sent) & 0xFFFFFFFE) +#define rockchip_lv2table_base(sent) (*(sent) & 0xFFFFFFFE) -#define mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT) +#define rockchip_mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT) /*write and read permission for level2 page default*/ -#define mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \ +#define rockchip_mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \ IOMMU_FLAGS_READ_PERMISSION | \ IOMMU_FLAGS_WRITE_PERMISSION) #define IOMMU_REG_POLL_COUNT_FAST 1000 -/*rk3036:vpu and hevc share ahb interface*/ -#define BIT_VCODEC_SEL_3036 (1<<3) -#define BIT_VCODEC_SEL_312x (1<<15) - - /** * MMU register numbers * Used in the register read/write routines. @@ -175,46 +170,17 @@ enum iommu_status_bits { static struct kmem_cache *lv2table_kmem_cache; -static void rockchip_vcodec_select(const char *string) +static unsigned long *rockchip_section_entry(unsigned long *pgtable, unsigned long iova) { - if (strstr(string,"hevc")) { - if (cpu_is_rk3036()) { - writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) | - (BIT_VCODEC_SEL_3036) | (BIT_VCODEC_SEL_3036 << 16), - RK_GRF_VIRT + RK3036_GRF_SOC_CON1); - } else { - writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1) | - (BIT_VCODEC_SEL_312x) | (BIT_VCODEC_SEL_312x << 16), - RK_GRF_VIRT + RK312X_GRF_SOC_CON1); - } - } else if (strstr(string,"vpu")) { - if (cpu_is_rk3036()) { - writel_relaxed((readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) & - (~BIT_VCODEC_SEL_3036)) | (BIT_VCODEC_SEL_3036 << 16), - RK_GRF_VIRT + RK3036_GRF_SOC_CON1); - } else { - writel_relaxed((readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1) & - (~BIT_VCODEC_SEL_312x)) | (BIT_VCODEC_SEL_312x << 16), - RK_GRF_VIRT + RK312X_GRF_SOC_CON1); - } - } -} -static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) -{ - return pgtable + lv1ent_offset(iova); + return pgtable + rockchip_lv1ent_offset(iova); } -static unsigned long *page_entry(unsigned long *sent, unsigned long iova) +static unsigned long *rockchip_page_entry(unsigned long *sent, unsigned long iova) { - return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); + return (unsigned long *)__va(rockchip_lv2table_base(sent)) + + rockchip_lv2ent_offset(iova); } -static char *iommu_fault_name[IOMMU_FAULTS_NUM] = { - "PAGE FAULT", - "BUS ERROR", - "UNKNOWN FAULT" -}; - struct rk_iommu_domain { struct list_head clients; /* list of iommu_drvdata.node */ unsigned long *pgtable; /* lv1 page table, 4KB */ @@ -223,26 +189,26 @@ struct rk_iommu_domain { spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ }; -static bool set_iommu_active(struct iommu_drvdata *data) +static bool rockchip_set_iommu_active(struct iommu_drvdata *data) { /* return true if the IOMMU was not active previously and it needs to be initialized */ return ++data->activations == 1; } -static bool set_iommu_inactive(struct iommu_drvdata *data) +static bool rockchip_set_iommu_inactive(struct iommu_drvdata *data) { /* return true if the IOMMU is needed to be disabled */ BUG_ON(data->activations < 1); return --data->activations == 0; } -static bool is_iommu_active(struct iommu_drvdata *data) +static bool rockchip_is_iommu_active(struct iommu_drvdata *data) { return data->activations > 0; } -static void iommu_disable_stall(void __iomem *base) +static void rockchip_iommu_disable_stall(void __iomem *base) { int i; u32 mmu_status; @@ -252,15 +218,23 @@ static void iommu_disable_stall(void __iomem *base) } else { goto skip_vop_mmu_disable; } - if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) + + if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) { return; + } + if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { pr_info("Aborting MMU disable stall request since it is in pagefault state.\n"); return; } + + if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) { + pr_info("MMU stall already disabled\n"); + return; + } + skip_vop_mmu_disable: - __raw_writel(IOMMU_COMMAND_DISABLE_STALL, - base + IOMMU_REGISTER_COMMAND); + __raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND); for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) { u32 status; @@ -273,20 +247,24 @@ static void iommu_disable_stall(void __iomem *base) j++; return; } + if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE)) break; + if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) break; + if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) break; } + if (IOMMU_REG_POLL_COUNT_FAST == i) { pr_info("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(base + IOMMU_REGISTER_STATUS)); } } -static bool iommu_enable_stall(void __iomem *base) +static bool rockchip_iommu_enable_stall(void __iomem *base) { int i; @@ -297,15 +275,24 @@ static bool iommu_enable_stall(void __iomem *base) } else { goto skip_vop_mmu_enable; } - if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) + + if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) { return true; + } + + if (mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE){ + pr_info("MMU stall already enabled\n"); + return true; + } + if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { - pr_info("Aborting MMU stall request since it is in pagefault state.\n"); + pr_info("Aborting MMU stall request since it is in pagefault state. mmu status is 0x%08x\n", + mmu_status); return false; } + skip_vop_mmu_enable: - __raw_writel(IOMMU_COMMAND_ENABLE_STALL, - base + IOMMU_REGISTER_COMMAND); + __raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND); for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) { if (base != rk312x_vop_mmu_base) { @@ -316,27 +303,33 @@ static bool iommu_enable_stall(void __iomem *base) j++; return true; } + if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) break; + if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE))) break; + if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED))) break; } + if (IOMMU_REG_POLL_COUNT_FAST == i) { pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(base + IOMMU_REGISTER_STATUS)); return false; } + if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { pr_info("Aborting MMU stall request since it has a pagefault.\n"); return false; } + return true; } -static bool iommu_enable_paging(void __iomem *base) +static bool rockchip_iommu_enable_paging(void __iomem *base) { int i; @@ -363,7 +356,7 @@ static bool iommu_enable_paging(void __iomem *base) return true; } -static bool iommu_disable_paging(void __iomem *base) +static bool rockchip_iommu_disable_paging(void __iomem *base) { int i; @@ -390,7 +383,7 @@ static bool iommu_disable_paging(void __iomem *base) return true; } -static void iommu_page_fault_done(void __iomem *base, const char *dbgname) +static void rockchip_iommu_page_fault_done(void __iomem *base, const char *dbgname) { pr_info("MMU: %s: Leaving page fault mode\n", dbgname); @@ -398,29 +391,21 @@ static void iommu_page_fault_done(void __iomem *base, const char *dbgname) base + IOMMU_REGISTER_COMMAND); } -static bool iommu_zap_tlb(void __iomem *base) +static int rockchip_iommu_zap_tlb(void __iomem *base) { - bool stall_success; + if (!rockchip_iommu_enable_stall(base)) { + pr_err("%s failed\n", __func__); + return -1; + } - if (rk312x_vop_mmu_base != base) - stall_success = iommu_enable_stall(base); - else - stall_success = true; + __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND); - __raw_writel(IOMMU_COMMAND_ZAP_CACHE, - base + IOMMU_REGISTER_COMMAND); - if (!stall_success) - return false; - - if (rk312x_vop_mmu_base != base) - iommu_disable_stall(base); - else - return true; + rockchip_iommu_disable_stall(base); - return true; + return 0; } -extern bool __clk_is_enabled(struct clk *clk); -static inline bool iommu_raw_reset(void __iomem *base) + +static inline bool rockchip_iommu_raw_reset(void __iomem *base) { int i; unsigned int ret; @@ -448,6 +433,7 @@ static inline bool iommu_raw_reset(void __iomem *base) return true; } } + if (IOMMU_REG_POLL_COUNT_FAST == i) { pr_info("%s,Reset request failed, MMU status is 0x%08X\n", __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR)); @@ -456,247 +442,219 @@ static inline bool iommu_raw_reset(void __iomem *base) return true; } -static void __iommu_set_ptbase(void __iomem *base, unsigned long pgd) +static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned long pgd) { __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR); } -static bool iommu_reset(void __iomem *base, const char *dbgname) +static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname) { - bool err = true; + bool ret = true; - err = iommu_enable_stall(base); - if (!err) { + ret = rockchip_iommu_enable_stall(base); + if (!ret) { pr_info("%s:stall failed: %s\n", __func__, dbgname); - return err; + return ret; } - err = iommu_raw_reset(base); - if (err) { - if (base != rk312x_vop_mmu_base) - __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT | - IOMMU_INTERRUPT_READ_BUS_ERROR, - base+IOMMU_REGISTER_INT_MASK); - else - __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK); + + ret = rockchip_iommu_raw_reset(base); + if (!ret) { + pr_info("(%s), %s failed\n", dbgname, __func__); + return ret; } - iommu_disable_stall(base); - if (!err) - pr_info("%s: failed: %s\n", __func__, dbgname); - return err; + + if (base != rk312x_vop_mmu_base) + __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT | + IOMMU_INTERRUPT_READ_BUS_ERROR, + base + IOMMU_REGISTER_INT_MASK); + else + __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK); + + rockchip_iommu_disable_stall(base); + + return ret; } -static inline void pgtable_flush(void *vastart, void *vaend) +static inline void rockchip_pgtable_flush(void *vastart, void *vaend) { dmac_flush_range(vastart, vaend); outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend)); } -static void set_fault_handler(struct iommu_drvdata *data, - rockchip_iommu_fault_handler_t handler) +static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte) { - unsigned long flags; - - write_lock_irqsave(&data->lock, flags); - data->fault_handler = handler; - write_unlock_irqrestore(&data->lock, flags); + u32 dte_index, pte_index, page_offset; + u32 mmu_dte_addr; + phys_addr_t mmu_dte_addr_phys, dte_addr_phys; + u32 *dte_addr; + u32 dte; + phys_addr_t pte_addr_phys = 0; + u32 *pte_addr = NULL; + u32 pte = 0; + phys_addr_t page_addr_phys = 0; + u32 page_flags = 0; + + dte_index = rockchip_lv1ent_offset(fault_address); + pte_index = rockchip_lv2ent_offset(fault_address); + page_offset = (u32)(fault_address & 0x00000fff); + + mmu_dte_addr = addr_dte; + mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; + + dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); + dte_addr = phys_to_virt(dte_addr_phys); + dte = *dte_addr; + + if (!(IOMMU_FLAGS_PRESENT & dte)) + goto print_it; + + pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4); + pte_addr = phys_to_virt(pte_addr_phys); + pte = *pte_addr; + + if (!(IOMMU_FLAGS_PRESENT & pte)) + goto print_it; + + page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset; + page_flags = pte & 0x000001fe; + +print_it: + pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n", + &fault_address, dte_index, pte_index, page_offset); + pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", + &mmu_dte_addr_phys, &dte_addr_phys, dte, + (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte, + (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags); } -static int default_fault_handler(struct device *dev, - enum rk_iommu_inttype itype, - unsigned long pgtable_base, - unsigned long fault_addr, - unsigned int status) +static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id) { - struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); + /* SYSMMU is in blocked when interrupt occurred. */ + struct iommu_drvdata *data = dev_id; + u32 status; + u32 rawstat; + dma_addr_t fault_address; + int i; + unsigned long flags; + int ret; + u32 reg_status; - if (!data) { - dev_err(dev->archdata.iommu,"%s,iommu device not assigned yet\n", __func__); - return 0; + spin_lock_irqsave(&data->data_lock, flags); + + if (!rockchip_is_iommu_active(data)) { + spin_unlock_irqrestore(&data->data_lock, flags); + return IRQ_HANDLED; } - if ((itype >= IOMMU_FAULTS_NUM) || (itype < IOMMU_PAGEFAULT)) - itype = IOMMU_FAULT_UNKNOWN; - if (itype == IOMMU_BUSERROR) - dev_err(dev->archdata.iommu,"%s occured at 0x%lx(Page table base: 0x%lx)\n", - iommu_fault_name[itype], fault_addr, pgtable_base); + for (i = 0; i < data->num_res_mem; i++) { + status = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_INT_STATUS); + if (status == 0) + continue; - if (itype == IOMMU_PAGEFAULT) - dev_err(dev->archdata.iommu,"IOMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n", - fault_addr, - (status >> 6) & 0x1F, - (status & 32) ? "write" : "read", - data->dbgname); + rawstat = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_INT_RAWSTAT); - dev_err(dev->archdata.iommu,"Generating Kernel OOPS... because it is unrecoverable.\n"); + reg_status = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_STATUS); - BUG(); + dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n", + rawstat, status, reg_status); - return 0; -} + if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) { + u32 dte; + int flags; -static void dump_pagetbl(u32 fault_address, u32 addr_dte) -{ - u32 lv1_offset; - u32 lv2_offset; - - u32 *lv1_entry_pa; - u32 *lv1_entry_va; - u32 *lv1_entry_value; - - u32 *lv2_base; - u32 *lv2_entry_pa; - u32 *lv2_entry_va; - u32 *lv2_entry_value; - - - lv1_offset = lv1ent_offset(fault_address); - lv2_offset = lv2ent_offset(fault_address); - - lv1_entry_pa = (u32 *)addr_dte + lv1_offset; - lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset; - lv1_entry_value = (u32 *)(*lv1_entry_va); - - lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe); - lv2_entry_pa = (u32 *)lv2_base + lv2_offset; - lv2_entry_va = (u32 *)(__va(lv2_base)) + lv2_offset; - lv2_entry_value = (u32 *)(*lv2_entry_va); - - dev_info(NULL,"fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n", - fault_address, addr_dte, (u32)__va(addr_dte)); - dev_info(NULL,"lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n", - lv1_offset, (u32)lv1_entry_pa, (u32)lv1_entry_va); - dev_info(NULL,"lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n", - (u32)lv1_entry_value, (u32)lv2_base); - dev_info(NULL,"lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n", - lv2_offset, (u32)lv2_entry_pa, (u32)lv2_entry_va); - dev_info(NULL,"lv2_entry value(*lv2_entry_va) = 0x%08x\n", - (u32)lv2_entry_value); -} + fault_address = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_PAGE_FAULT_ADDR); -static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id) -{ - /* SYSMMU is in blocked when interrupt occurred. */ - struct iommu_drvdata *data = dev_id; - struct resource *irqres; - struct platform_device *pdev; - enum rk_iommu_inttype itype = IOMMU_FAULT_UNKNOWN; - u32 status; - u32 rawstat; - u32 int_status; - u32 fault_address; - int i, ret = 0; + dte = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_DTE_ADDR); - read_lock(&data->lock); + flags = (status & 32) ? 1 : 0; - if (!is_iommu_active(data)) { - read_unlock(&data->lock); - return IRQ_HANDLED; - } - - if(cpu_is_rk312x() || cpu_is_rk3036()) - rockchip_vcodec_select(data->dbgname); - - pdev = to_platform_device(data->iommu); + dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n", + &fault_address, (status >> 6) & 0x1F, + (flags == 1) ? "write" : "read", data->dbgname); - for (i = 0; i < data->num_res_irq; i++) { - irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (irqres && ((int)irqres->start == irq)) { - if (data->res_bases[i] == rk312x_vop_mmu_base) { - read_unlock(&data->lock); - return IRQ_HANDLED; - } - break; + dump_pagetbl(fault_address, dte); + + if (data->domain) + report_iommu_fault(data->domain, data->iommu, + fault_address, flags); + + rockchip_iommu_page_fault_done(data->res_bases[i], + data->dbgname); } - } - if (i == data->num_res_irq) { - itype = IOMMU_FAULT_UNKNOWN; - } else { - int_status = __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_INT_STATUS); - - if (int_status != 0) { - /*mask status*/ - __raw_writel(0x00, data->res_bases[i] + - IOMMU_REGISTER_INT_MASK); - - rawstat = __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_INT_RAWSTAT); - - if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) { - fault_address = __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_PAGE_FAULT_ADDR); - itype = IOMMU_PAGEFAULT; - } else if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) { - itype = IOMMU_BUSERROR; - } else { - goto out; - } - dump_pagetbl(fault_address, - __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_DTE_ADDR)); - } else { - goto out; + if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) { + dev_err(data->iommu, "bus error occured at %pad\n", + &fault_address); } - } - if (data->fault_handler) { - unsigned long base = __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_DTE_ADDR); + if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR | + IOMMU_INTERRUPT_PAGE_FAULT)) { + dev_err(data->iommu, "unexpected int_status: %#08x\n\n", + rawstat); + } + + __raw_writel(rawstat, data->res_bases[i] + + IOMMU_REGISTER_INT_CLEAR); + status = __raw_readl(data->res_bases[i] + - IOMMU_REGISTER_STATUS); - ret = data->fault_handler(data->dev, itype, base, - fault_address, status); - } + IOMMU_REGISTER_INT_STATUS); - if (!ret && (itype != IOMMU_FAULT_UNKNOWN)) { - if (IOMMU_PAGEFAULT == itype) { - iommu_zap_tlb(data->res_bases[i]); - iommu_page_fault_done(data->res_bases[i], - data->dbgname); - __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT | - IOMMU_INTERRUPT_READ_BUS_ERROR, - data->res_bases[i] + - IOMMU_REGISTER_INT_MASK); - } - } else { - dev_err(data->iommu,"(%s) %s is not handled.\n", - data->dbgname, iommu_fault_name[itype]); - } + rawstat = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_INT_RAWSTAT); -out: - read_unlock(&data->lock); + reg_status = __raw_readl(data->res_bases[i] + + IOMMU_REGISTER_STATUS); + + dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n", + rawstat, status, reg_status); + + ret = rockchip_iommu_zap_tlb(data->res_bases[i]); + if (ret) + dev_err(data->iommu, "(%s) %s failed\n", data->dbgname, + __func__); + } + spin_unlock_irqrestore(&data->data_lock, flags); return IRQ_HANDLED; } -static bool __rockchip_iommu_disable(struct iommu_drvdata *data) +static bool rockchip_iommu_disable(struct iommu_drvdata *data) { unsigned long flags; int i; - bool disabled = false; + bool ret = false; - write_lock_irqsave(&data->lock, flags); + spin_lock_irqsave(&data->data_lock, flags); - if (!set_iommu_inactive(data)) - goto finish; + if (!rockchip_set_iommu_inactive(data)) { + spin_unlock_irqrestore(&data->data_lock, flags); + dev_info(data->iommu,"(%s) %d times left to be disabled\n", + data->dbgname, data->activations); + return ret; + } - for (i = 0; i < data->num_res_mem; i++) - iommu_disable_paging(data->res_bases[i]); + for (i = 0; i < data->num_res_mem; i++) { + __raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK); + ret = rockchip_iommu_disable_paging(data->res_bases[i]); + if (!ret) { + spin_unlock_irqrestore(&data->data_lock, flags); + dev_info(data->iommu, "%s error\n", __func__); + return ret; + } + } - disabled = true; data->pgtable = 0; - data->domain = NULL; -finish: - write_unlock_irqrestore(&data->lock, flags); - if (disabled) - dev_info(data->iommu,"(%s) Disabled\n", data->dbgname); - else - dev_info(data->iommu,"(%s) %d times left to be disabled\n", - data->dbgname, data->activations); + spin_unlock_irqrestore(&data->data_lock, flags); - return disabled; + dev_info(data->iommu,"(%s) Disabled\n", data->dbgname); + + return ret; } /* __rk_sysmmu_enable: Enables System MMU @@ -705,89 +663,92 @@ finish: * 0 if the System MMU has been just enabled and 1 if System MMU was already * enabled before. */ -static int __rockchip_iommu_enable(struct iommu_drvdata *data, +static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned long pgtable, struct iommu_domain *domain) { int i, ret = 0; unsigned long flags; - write_lock_irqsave(&data->lock, flags); + spin_lock_irqsave(&data->data_lock, flags); - if (!set_iommu_active(data)) { + if (!rockchip_set_iommu_active(data)) { if (WARN_ON(pgtable != data->pgtable)) { ret = -EBUSY; - set_iommu_inactive(data); + rockchip_set_iommu_inactive(data); } else { ret = 1; } - dev_info(data->iommu,"(%s) Already enabled\n", data->dbgname); - goto finish; + spin_unlock_irqrestore(&data->data_lock, flags); + dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname); + + return ret; } data->pgtable = pgtable; for (i = 0; i < data->num_res_mem; i++) { - bool status; - - status = iommu_enable_stall(data->res_bases[i]); - if (status) { - __iommu_set_ptbase(data->res_bases[i], pgtable); - __raw_writel(IOMMU_COMMAND_ZAP_CACHE, - data->res_bases[i] + - IOMMU_REGISTER_COMMAND); + ret = rockchip_iommu_enable_stall(data->res_bases[i]); + if (!ret) { + dev_info(data->iommu, "(%s), %s failed\n", + data->dbgname, __func__); + spin_unlock_irqrestore(&data->data_lock, flags); + return -EBUSY; } + + rockchip_iommu_set_ptbase(data->res_bases[i], pgtable); + __raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + + IOMMU_REGISTER_COMMAND); __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT | IOMMU_INTERRUPT_READ_BUS_ERROR, - data->res_bases[i]+IOMMU_REGISTER_INT_MASK); - iommu_enable_paging(data->res_bases[i]); - iommu_disable_stall(data->res_bases[i]); + data->res_bases[i] + IOMMU_REGISTER_INT_MASK); + ret = rockchip_iommu_enable_paging(data->res_bases[i]); + if (!ret) { + spin_unlock_irqrestore(&data->data_lock, flags); + dev_info(data->iommu, "(%s), %s failed\n", + data->dbgname, __func__); + return -EBUSY; + } + rockchip_iommu_disable_stall(data->res_bases[i]); } - data->domain = domain; - dev_info(data->iommu,"(%s) Enabled\n", data->dbgname); -finish: - write_unlock_irqrestore(&data->lock, flags); - return ret; -} + spin_unlock_irqrestore(&data->data_lock, flags); -bool rockchip_iommu_disable(struct device *dev) -{ - struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - bool disabled; - - disabled = __rockchip_iommu_disable(data); - - return disabled; + return 0; } -void rockchip_iommu_tlb_invalidate(struct device *dev) +int rockchip_iommu_tlb_invalidate(struct device *dev) { unsigned long flags; struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - read_lock_irqsave(&data->lock, flags); - - if(cpu_is_rk312x() || cpu_is_rk3036()) - rockchip_vcodec_select(data->dbgname); - - if (is_iommu_active(data)) { + spin_lock_irqsave(&data->data_lock, flags); + + if (rockchip_is_iommu_active(data)) { int i; + int ret; for (i = 0; i < data->num_res_mem; i++) { - if (!iommu_zap_tlb(data->res_bases[i])) - dev_err(dev->archdata.iommu,"%s,invalidating TLB failed\n", - data->dbgname); + ret = rockchip_iommu_zap_tlb(data->res_bases[i]); + if (ret) { + dev_err(dev->archdata.iommu, "(%s) %s failed\n", + data->dbgname, __func__); + spin_unlock_irqrestore(&data->data_lock, flags); + return ret; + } + } } else { - dev_dbg(dev->archdata.iommu,"(%s) Disabled. Skipping invalidating TLB.\n", + dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n", data->dbgname); } - read_unlock_irqrestore(&data->lock, flags); + spin_unlock_irqrestore(&data->data_lock, flags); + + return 0; } static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain, @@ -800,31 +761,31 @@ static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain, spin_lock_irqsave(&priv->pgtablelock, flags); - entry = section_entry(priv->pgtable, iova); - entry = page_entry(entry, iova); - phys = spage_phys(entry) + spage_offs(iova); + entry = rockchip_section_entry(priv->pgtable, iova); + entry = rockchip_page_entry(entry, iova); + phys = rockchip_spage_phys(entry) + rockchip_spage_offs(iova); spin_unlock_irqrestore(&priv->pgtablelock, flags); return phys; } -static int lv2set_page(unsigned long *pent, phys_addr_t paddr, +static int rockchip_lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, short *pgcnt) { - if (!lv2ent_fault(pent)) + if (!rockchip_lv2ent_fault(pent)) return -EADDRINUSE; - *pent = mk_lv2ent_spage(paddr); - pgtable_flush(pent, pent + 1); + *pent = rockchip_mk_lv2ent_spage(paddr); + rockchip_pgtable_flush(pent, pent + 1); *pgcnt -= 1; return 0; } -static unsigned long *alloc_lv2entry(unsigned long *sent, +static unsigned long *rockchip_alloc_lv2entry(unsigned long *sent, unsigned long iova, short *pgcounter) { - if (lv1ent_fault(sent)) { + if (rockchip_lv1ent_fault(sent)) { unsigned long *pent; pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); @@ -832,13 +793,13 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, if (!pent) return NULL; - *sent = mk_lv1ent_page(__pa(pent)); + *sent = rockchip_mk_lv1ent_page(__pa(pent)); kmemleak_ignore(pent); *pgcounter = NUM_LV2ENTRIES; - pgtable_flush(pent, pent + NUM_LV2ENTRIES); - pgtable_flush(sent, sent + 1); + rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES); + rockchip_pgtable_flush(sent, sent + 1); } - return page_entry(sent, iova); + return rockchip_page_entry(sent, iova); } static size_t rockchip_iommu_unmap(struct iommu_domain *domain, @@ -852,9 +813,9 @@ static size_t rockchip_iommu_unmap(struct iommu_domain *domain, spin_lock_irqsave(&priv->pgtablelock, flags); - ent = section_entry(priv->pgtable, iova); + ent = rockchip_section_entry(priv->pgtable, iova); - if (unlikely(lv1ent_fault(ent))) { + if (unlikely(rockchip_lv1ent_fault(ent))) { if (size > SPAGE_SIZE) size = SPAGE_SIZE; goto done; @@ -862,16 +823,16 @@ static size_t rockchip_iommu_unmap(struct iommu_domain *domain, /* lv1ent_page(sent) == true here */ - ent = page_entry(ent, iova); + ent = rockchip_page_entry(ent, iova); - if (unlikely(lv2ent_fault(ent))) { + if (unlikely(rockchip_lv2ent_fault(ent))) { size = SPAGE_SIZE; goto done; } *ent = 0; size = SPAGE_SIZE; - priv->lv2entcnt[lv1ent_offset(iova)] += 1; + priv->lv2entcnt[rockchip_lv1ent_offset(iova)] += 1; goto done; done: @@ -897,15 +858,15 @@ static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&priv->pgtablelock, flags); - entry = section_entry(priv->pgtable, iova); + entry = rockchip_section_entry(priv->pgtable, iova); - pent = alloc_lv2entry(entry, iova, - &priv->lv2entcnt[lv1ent_offset(iova)]); + pent = rockchip_alloc_lv2entry(entry, iova, + &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]); if (!pent) ret = -ENOMEM; else - ret = lv2set_page(pent, paddr, size, - &priv->lv2entcnt[lv1ent_offset(iova)]); + ret = rockchip_lv2set_page(pent, paddr, size, + &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]); if (ret) { pr_info("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__, @@ -927,30 +888,28 @@ static void rockchip_iommu_detach_device(struct iommu_domain *domain, spin_lock_irqsave(&priv->lock, flags); - list_for_each(pos, &priv->clients) - { + list_for_each(pos, &priv->clients) { if (list_entry(pos, struct iommu_drvdata, node) == data) { found = true; break; } } - if (!found) - goto finish; - - if(cpu_is_rk312x() || cpu_is_rk3036()) - rockchip_vcodec_select(data->dbgname); - - if (__rockchip_iommu_disable(data)) { - dev_info(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %#lx\n", + + if (!found) { + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + if (rockchip_iommu_disable(data)) { + dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %#lx\n", __func__, __pa(priv->pgtable)); - list_del(&data->node); - INIT_LIST_HEAD(&data->node); + data->domain = NULL; + list_del_init(&data->node); } else - dev_info(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %#lx delayed", + dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %#lx delayed", __func__, __pa(priv->pgtable)); -finish: spin_unlock_irqrestore(&priv->lock, flags); } @@ -964,16 +923,13 @@ static int rockchip_iommu_attach_device(struct iommu_domain *domain, spin_lock_irqsave(&priv->lock, flags); - if(cpu_is_rk312x() || cpu_is_rk3036()) - rockchip_vcodec_select(data->dbgname); - - ret = __rockchip_iommu_enable(data, __pa(priv->pgtable), domain); + ret = rockchip_iommu_enable(data, __pa(priv->pgtable), domain); if (ret == 0) { /* 'data->node' must not be appeared in priv->clients */ BUG_ON(!list_empty(&data->node)); - data->dev = dev; list_add_tail(&data->node, &priv->clients); + data->domain = domain; } spin_unlock_irqrestore(&priv->lock, flags); @@ -995,26 +951,14 @@ static int rockchip_iommu_attach_device(struct iommu_domain *domain, static void rockchip_iommu_domain_destroy(struct iommu_domain *domain) { struct rk_iommu_domain *priv = domain->priv; - struct iommu_drvdata *data; - unsigned long flags; int i; WARN_ON(!list_empty(&priv->clients)); - spin_lock_irqsave(&priv->lock, flags); - - list_for_each_entry(data, &priv->clients, node) { - if(cpu_is_rk312x() || cpu_is_rk3036()) - rockchip_vcodec_select(data->dbgname); - while (!rockchip_iommu_disable(data->dev)) - ; /* until System MMU is actually disabled */ - } - spin_unlock_irqrestore(&priv->lock, flags); - for (i = 0; i < NUM_LV1ENTRIES; i++) - if (lv1ent_page(priv->pgtable + i)) + if (rockchip_lv1ent_page(priv->pgtable + i)) kmem_cache_free(lv2table_kmem_cache, - __va(lv2table_base(priv->pgtable + i))); + __va(rockchip_lv2table_base(priv->pgtable + i))); free_pages((unsigned long)priv->pgtable, 0); free_pages((unsigned long)priv->lv2entcnt, 0); @@ -1044,7 +988,7 @@ static int rockchip_iommu_domain_init(struct iommu_domain *domain) if (!priv->lv2entcnt) goto err_counter; - pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); + rockchip_pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); spin_lock_init(&priv->lock); spin_lock_init(&priv->pgtablelock); @@ -1071,30 +1015,6 @@ static struct iommu_ops rk_iommu_ops = { .pgsize_bitmap = SPAGE_SIZE, }; -static int rockchip_iommu_prepare(void) -{ - int ret = 0; - static int registed; - - if (registed) - return 0; - - lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table", - LV2TABLE_SIZE, - LV2TABLE_SIZE, - 0, NULL); - if (!lv2table_kmem_cache) { - pr_info("%s: failed to create kmem cache\n", __func__); - return -ENOMEM; - } - ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); - if (!ret) - registed = 1; - else - pr_info("%s:failed to set iommu to bus\r\n", __func__); - return ret; -} - static int rockchip_get_iommu_resource_num(struct platform_device *pdev, unsigned int type) { @@ -1115,44 +1035,6 @@ dev_info(&pdev->dev, "r[%d] start %08x end %08x flags %08lx name (%s) resource_t return num; } -static struct kobject *dump_mmu_object; - -static int dump_mmu_pagetbl(struct device *dev, struct device_attribute *attr, - const char *buf, u32 count) -{ - u32 fault_address; - u32 iommu_dte; - u32 mmu_base; - void __iomem *base; - u32 ret; - - ret = kstrtouint(buf, 0, &mmu_base); - if (ret) - dev_dbg(dev,"%s is not in hexdecimal form.\n", buf); - base = ioremap(mmu_base, 0x100); - if (base != rk312x_vop_mmu_base) { - iommu_dte = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR); - fault_address = __raw_readl(base + IOMMU_REGISTER_PAGE_FAULT_ADDR); - dump_pagetbl(fault_address, iommu_dte); - } else { - dev_dbg(dev,"vop mmu not support\n"); - } - return count; -} - -static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl); - -void dump_iommu_sysfs_init(void) -{ - u32 ret; - - dump_mmu_object = kobject_create_and_add("rk_iommu", NULL); - if (dump_mmu_object == NULL) - return; - ret = sysfs_create_file(dump_mmu_object, - &dev_attr_dump_mmu_pgtable.attr); -} - static int rockchip_iommu_probe(struct platform_device *pdev) { int i, ret; @@ -1168,50 +1050,36 @@ for (i = 0; i < pdev->num_resources; i++, res++) { pr_info("r[%d] start %08x end %08x flags %08lx name (%s) resource_type %08lx\n", i, res->start, res->end, res->flags, res->name, resource_type(res)); } #endif - ret = rockchip_iommu_prepare(); - if (ret) { - dev_err(dev,"%s,failed\r\n", __func__); - goto err_alloc; - } data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_dbg(dev, "Not enough memory\n"); - ret = -ENOMEM; - goto err_alloc; + return -ENOMEM; } + dev_set_drvdata(dev, data); -/* - ret = dev_set_drvdata(dev, data); - if (ret) - { - dev_dbg(dev, "Unabled to initialize driver data\n"); - goto err_init; - } -*/ - if (pdev->dev.of_node) { - of_property_read_string(pdev->dev.of_node, - "dbgname", &(data->dbgname)); - } else { - dev_dbg(dev, - "dbgname not assigned in device tree or device node not exist\r\n"); - } + + if (pdev->dev.of_node) + of_property_read_string(pdev->dev.of_node, "dbgname", + &(data->dbgname)); + else + dev_dbg(dev, "dbgname not assigned in device tree or device node not exist\r\n"); dev_info(dev,"(%s) Enter\n", data->dbgname); - data->num_res_mem = rockchip_get_iommu_resource_num(pdev, IORESOURCE_MEM); if (0 == data->num_res_mem) { dev_err(dev,"can't find iommu memory resource \r\n"); - goto err_init; + return -ENOMEM; } dev_dbg(dev,"data->num_res_mem=%d\n", data->num_res_mem); + data->num_res_irq = rockchip_get_iommu_resource_num(pdev, IORESOURCE_IRQ); if (0 == data->num_res_irq) { dev_err(dev,"can't find iommu irq resource \r\n"); - goto err_init; + return -ENOMEM; } dev_dbg(dev,"data->num_res_irq=%d\n", data->num_res_irq); @@ -1219,8 +1087,7 @@ for (i = 0; i < pdev->num_resources; i++, res++) { sizeof(*data->res_bases), GFP_KERNEL); if (data->res_bases == NULL) { dev_err(dev, "Not enough memory\n"); - ret = -ENOMEM; - goto err_init; + return -ENOMEM; } for (i = 0; i < data->num_res_mem; i++) { @@ -1229,69 +1096,63 @@ for (i = 0; i < pdev->num_resources; i++, res++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) { dev_err(dev,"Unable to find IOMEM region\n"); - ret = -ENOENT; - goto err_res; + return -ENOENT; } - data->res_bases[i] = devm_ioremap(dev,res->start, resource_size(res)); - dev_dbg(dev,"res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n", - res->start, i, (unsigned int)data->res_bases[i]); + + data->res_bases[i] = devm_ioremap(dev,res->start, + resource_size(res)); if (!data->res_bases[i]) { - pr_err("Unable to map IOMEM @ PA:%#x\n", res->start); - ret = -ENOENT; - goto err_res; + dev_err(dev, "Unable to map IOMEM @ PA:%#x\n", + res->start); + return -ENOMEM; } - if (cpu_is_rk312x() || cpu_is_rk3036()) { - rockchip_vcodec_select(data->dbgname); - if (strstr(data->dbgname, "vop") && cpu_is_rk312x()) { - rk312x_vop_mmu_base = data->res_bases[0]; - dev_dbg(dev,"rk312x_vop_mmu_base = 0x%08x\n",(unsigned int)rk312x_vop_mmu_base); - } - } - if (!strstr(data->dbgname, "isp")) { - if (!iommu_reset(data->res_bases[i], data->dbgname)) { - ret = -ENOENT; - goto err_res; - } + dev_dbg(dev,"res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n", + res->start, i, (unsigned int)data->res_bases[i]); + + if (strstr(data->dbgname, "vop") && cpu_is_rk312x()) { + rk312x_vop_mmu_base = data->res_bases[0]; + dev_dbg(dev, "rk312x_vop_mmu_base = 0x%08x\n", + (unsigned int)rk312x_vop_mmu_base); } + + if (!strstr(data->dbgname, "isp")) + if (!rockchip_iommu_reset(data->res_bases[i], + data->dbgname)) + return -ENOENT; } for (i = 0; i < data->num_res_irq; i++) { - ret = platform_get_irq(pdev, i); - if (ret <= 0) { - dev_err(dev,"Unable to find IRQ resource\n"); - goto err_irq; - } if (cpu_is_rk312x() && strstr(data->dbgname, "vop")) { dev_info(dev, "skip request vop mmu irq\n"); continue; } + + ret = platform_get_irq(pdev, i); + if (ret <= 0) { + dev_err(dev,"Unable to find IRQ resource\n"); + return -ENOENT; + } + ret = devm_request_irq(dev, ret, rockchip_iommu_irq, IRQF_SHARED, dev_name(dev), data); if (ret) { - dev_err(dev,"Unabled to register interrupt handler\n"); - goto err_irq; + dev_err(dev, "Unabled to register interrupt handler\n"); + return -ENOENT; } } + ret = rockchip_init_iovmm(dev, &data->vmm); if (ret) - goto err_irq; + return ret; data->iommu = dev; - rwlock_init(&data->lock); + spin_lock_init(&data->data_lock); INIT_LIST_HEAD(&data->node); - set_fault_handler(data, &default_fault_handler); - dev_info(dev,"(%s) Initialized\n", data->dbgname); - return 0; -err_irq: -err_res: -err_init: -err_alloc: - dev_err(dev, "Failed to initialize\n"); - return ret; + return 0; } #ifdef CONFIG_OF @@ -1322,7 +1183,19 @@ static struct platform_driver rk_iommu_driver = { static int __init rockchip_iommu_init_driver(void) { - dump_iommu_sysfs_init(); + int ret; + + lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table", + LV2TABLE_SIZE, LV2TABLE_SIZE, + 0, NULL); + if (!lv2table_kmem_cache) { + pr_info("%s: failed to create kmem cache\n", __func__); + return -ENOMEM; + } + + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); + if (ret) + return ret; return platform_driver_register(&rk_iommu_driver); } diff --git a/drivers/iommu/rockchip-iommu.h b/drivers/iommu/rockchip-iommu.h index 761b68120981..c5813638e3b9 100755 --- a/drivers/iommu/rockchip-iommu.h +++ b/drivers/iommu/rockchip-iommu.h @@ -29,15 +29,13 @@ struct rk_iovmm { struct iommu_drvdata { struct list_head node; /* entry of rk_iommu_domain.clients */ struct device *iommu; /* IOMMU's device descriptor */ - struct device *dev; /* Owner of IOMMU */ int num_res_mem; int num_res_irq; const char *dbgname; void __iomem **res_bases; int activations; - rwlock_t lock; + spinlock_t data_lock; struct iommu_domain *domain; /* domain given to iommu_attach_device() */ - rockchip_iommu_fault_handler_t fault_handler; unsigned long pgtable; struct rk_iovmm vmm; }; @@ -74,31 +72,18 @@ static inline int rockchip_init_iovmm(struct device *iommu, #ifdef CONFIG_ROCKCHIP_IOMMU -/** -* rockchip_iommu_disable() - disable iommu mmu of ip -* @owner: The device whose IOMMU is about to be disabled. -* -* This function disable iommu to transfer address - * from virtual address to physical address - */ -bool rockchip_iommu_disable(struct device *owner); - /** * rockchip_iommu_tlb_invalidate() - flush all TLB entry in iommu * @owner: The device whose IOMMU. * * This function flush all TLB entry in iommu */ -void rockchip_iommu_tlb_invalidate(struct device *owner); +int rockchip_iommu_tlb_invalidate(struct device *owner); #else /* CONFIG_ROCKCHIP_IOMMU */ -static inline bool rockchip_iommu_disable(struct device *owner) -{ - return false; -} -static inline void rockchip_iommu_tlb_invalidate(struct device *owner) +static inline int rockchip_iommu_tlb_invalidate(struct device *owner) { - return false; + return -1; } #endif diff --git a/drivers/iommu/rockchip-iovmm.c b/drivers/iommu/rockchip-iovmm.c index 1079ea17d746..94faaffce39a 100755 --- a/drivers/iommu/rockchip-iovmm.c +++ b/drivers/iommu/rockchip-iovmm.c @@ -33,12 +33,7 @@ static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova) void rockchip_iovmm_set_fault_handler(struct device *dev, rockchip_iommu_fault_handler_t handler) { - unsigned long flags; - struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); - - write_lock_irqsave(&data->lock, flags); - data->fault_handler = handler; - write_unlock_irqrestore(&data->lock, flags); + return; } int rockchip_iovmm_activate(struct device *dev) @@ -141,11 +136,13 @@ dma_addr_t rockchip_iovmm_map(struct device *dev, spin_unlock(&vmm->lock); - rockchip_iommu_tlb_invalidate(dev); - /* - pr_err("IOVMM: Allocated VM region @ %#x/%#X bytes.\n", + ret = rockchip_iommu_tlb_invalidate(dev); + if (ret) + goto err_map_map; + + dev_dbg(dev->archdata.iommu, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n", region->start, region->size); - */ + return region->start; err_map_map: @@ -154,7 +151,7 @@ err_map_map: err_map_noiomem: kfree(region); err_map_nomem: - pr_err("IOVMM: Failed to allocated VM region for %#x bytes.\n", size); + dev_err(dev->archdata.iommu, "IOVMM: Failed to allocated VM region for %#x bytes.\n", size); return (dma_addr_t)ret; } @@ -189,10 +186,10 @@ void rockchip_iovmm_unmap(struct device *dev, dma_addr_t iova) gen_pool_free(vmm->vmm_pool, region->start, region->size); WARN_ON(unmapped_size != region->size); - /* - pr_err("IOVMM: Unmapped %#x bytes from %#x.\n", + + dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %#x bytes from %#x.\n", unmapped_size, region->start); - */ + kfree(region); } @@ -203,7 +200,7 @@ int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size) int ret; if (WARN_ON((phys + size) >= IOVA_START)) { - pr_err("Unable to create one to one mapping for %#x @ %#x\n", + dev_err(dev->archdata.iommu, "Unable to create one to one mapping for %#x @ %#x\n", size, phys); return -EINVAL; } @@ -232,7 +229,9 @@ int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size) spin_unlock(&vmm->lock); - rockchip_iommu_tlb_invalidate(dev); + ret = rockchip_iommu_tlb_invalidate(dev); + if (ret) + return ret; return 0; } @@ -262,9 +261,8 @@ void rockchip_iovmm_unmap_oto(struct device *dev, phys_addr_t phys) spin_unlock(&vmm->lock); unmapped_size = iommu_unmap(vmm->domain, region->start, region->size); - rockchip_iommu_tlb_invalidate(dev); WARN_ON(unmapped_size != region->size); - pr_err("IOVMM: Unmapped %#x bytes from %#x.\n", + dev_dbg(dev->archdata.iommu, "IOVMM: Unmapped %#x bytes from %#x.\n", unmapped_size, region->start); kfree(region); @@ -295,13 +293,13 @@ int rockchip_init_iovmm(struct device *iommu, struct rk_iovmm *vmm) INIT_LIST_HEAD(&vmm->regions_list); - pr_info("IOVMM: Created %#x B IOVMM from %#x.\n", + dev_info(iommu, "IOVMM: Created %#x B IOVMM from %#x.\n", IOVM_SIZE, IOVA_START); return 0; err_setup_domain: gen_pool_destroy(vmm->vmm_pool); err_setup_genalloc: - pr_err("IOVMM: Failed to create IOVMM (%d)\n", ret); + dev_err(iommu, "IOVMM: Failed to create IOVMM (%d)\n", ret); return ret; }