2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
25 #include <linux/rockchip-iovmm.h>
26 #include <linux/rockchip/grf.h>
27 #include <linux/rockchip/cpu.h>
28 #include <linux/rockchip/iomap.h>
30 #include "rockchip-iommu.h"
32 /* We does not consider super section mapping (16MB) */
33 #define SPAGE_ORDER 12
34 #define SPAGE_SIZE (1 << SPAGE_ORDER)
35 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
37 enum iommu_entry_flags {
38 IOMMU_FLAGS_PRESENT = 0x01,
39 IOMMU_FLAGS_READ_PERMISSION = 0x02,
40 IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
41 IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
42 IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
43 IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
44 IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
45 IOMMU_FLAGS_READ_CACHEABLE = 0x80,
46 IOMMU_FLAGS_READ_ALLOCATE = 0x100,
47 IOMMU_FLAGS_MASK = 0x1FF,
50 #define lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
51 #define lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
52 #define lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
53 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
54 #define spage_offs(iova) ((iova) & 0x0FFF)
56 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
57 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
59 #define NUM_LV1ENTRIES 1024
60 #define NUM_LV2ENTRIES 1024
62 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
64 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
66 #define mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
67 /*write and read permission for level2 page default*/
68 #define mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
69 IOMMU_FLAGS_READ_PERMISSION | \
70 IOMMU_FLAGS_WRITE_PERMISSION)
72 #define IOMMU_REG_POLL_COUNT_FAST 1000
74 /*rk3036:vpu and hevc share ahb interface*/
75 #define BIT_VCODEC_SEL (1<<3)
79 * MMU register numbers
80 * Used in the register read/write routines.
81 * See the hardware documentation for more information about each register
84 /**< Current Page Directory Pointer */
85 IOMMU_REGISTER_DTE_ADDR = 0x0000,
86 /**< Status of the MMU */
87 IOMMU_REGISTER_STATUS = 0x0004,
88 /**< Command register, used to control the MMU */
89 IOMMU_REGISTER_COMMAND = 0x0008,
90 /**< Logical address of the last page fault */
91 IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
92 /**< Used to invalidate the mapping of a single page from the MMU */
93 IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
94 /**< Raw interrupt status, all interrupts visible */
95 IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
96 /**< Indicate to the MMU that the interrupt has been received */
97 IOMMU_REGISTER_INT_CLEAR = 0x0018,
98 /**< Enable/disable types of interrupts */
99 IOMMU_REGISTER_INT_MASK = 0x001C,
100 /**< Interrupt status based on the mask */
101 IOMMU_REGISTER_INT_STATUS = 0x0020,
102 IOMMU_REGISTER_AUTO_GATING = 0x0024
106 /**< Enable paging (memory translation) */
107 IOMMU_COMMAND_ENABLE_PAGING = 0x00,
108 /**< Disable paging (memory translation) */
109 IOMMU_COMMAND_DISABLE_PAGING = 0x01,
110 /**< Enable stall on page fault */
111 IOMMU_COMMAND_ENABLE_STALL = 0x02,
112 /**< Disable stall on page fault */
113 IOMMU_COMMAND_DISABLE_STALL = 0x03,
114 /**< Zap the entire page table cache */
115 IOMMU_COMMAND_ZAP_CACHE = 0x04,
116 /**< Page fault processed */
117 IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
118 /**< Reset the MMU back to power-on settings */
119 IOMMU_COMMAND_HARD_RESET = 0x06
123 * MMU interrupt register bits
124 * Each cause of the interrupt is reported
125 * through the (raw) interrupt status registers.
126 * Multiple interrupts can be pending, so multiple bits
127 * can be set at once.
129 enum iommu_interrupt {
130 IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
131 IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
134 enum iommu_status_bits {
135 IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
136 IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
137 IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
138 IOMMU_STATUS_BIT_IDLE = 1 << 3,
139 IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
140 IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
141 IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
145 * Size of an MMU page in bytes
147 #define IOMMU_PAGE_SIZE 0x1000
150 * Size of the address space referenced by a page table page
152 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
155 * Page directory index from address
156 * Calculates the page directory index from the given address
158 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
161 * Page table index from address
162 * Calculates the page table index from the given address
164 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
167 * Extract the memory address from an PDE/PTE entry
169 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
171 #define INVALID_PAGE ((u32)(~0))
173 static struct kmem_cache *lv2table_kmem_cache;
175 static void rockchip_vcodec_select(const char *string)
177 if(strstr(string,"hevc"))
179 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) |
180 (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16),
181 RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
183 else if(strstr(string,"vpu"))
185 writel_relaxed((readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1) &
186 (~BIT_VCODEC_SEL)) | (BIT_VCODEC_SEL << 16),
187 RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
190 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
192 return pgtable + lv1ent_offset(iova);
195 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
197 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
200 static char *iommu_fault_name[IOMMU_FAULTS_NUM] = {
206 struct rk_iommu_domain {
207 struct list_head clients; /* list of iommu_drvdata.node */
208 unsigned long *pgtable; /* lv1 page table, 4KB */
209 short *lv2entcnt; /* free lv2 entry counter for each section */
210 spinlock_t lock; /* lock for this structure */
211 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
214 static bool set_iommu_active(struct iommu_drvdata *data)
216 /* return true if the IOMMU was not active previously
217 and it needs to be initialized */
218 return ++data->activations == 1;
221 static bool set_iommu_inactive(struct iommu_drvdata *data)
223 /* return true if the IOMMU is needed to be disabled */
224 BUG_ON(data->activations < 1);
225 return --data->activations == 0;
228 static bool is_iommu_active(struct iommu_drvdata *data)
230 return data->activations > 0;
233 static void iommu_disable_stall(void __iomem *base)
236 u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
238 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
240 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
241 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
244 __raw_writel(IOMMU_COMMAND_DISABLE_STALL,
245 base + IOMMU_REGISTER_COMMAND);
247 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
248 u32 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
250 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
252 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
254 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
257 if (IOMMU_REG_POLL_COUNT_FAST == i)
258 pr_err("Disable stall request failed, MMU status is 0x%08X\n",
259 __raw_readl(base + IOMMU_REGISTER_STATUS));
262 static bool iommu_enable_stall(void __iomem *base)
266 u32 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
268 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
270 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
271 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
274 __raw_writel(IOMMU_COMMAND_ENABLE_STALL,
275 base + IOMMU_REGISTER_COMMAND);
277 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
278 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
279 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
281 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
282 (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
284 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
287 if (IOMMU_REG_POLL_COUNT_FAST == i) {
288 pr_err("Enable stall request failed, MMU status is 0x%08X\n",
289 __raw_readl(base + IOMMU_REGISTER_STATUS));
292 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
293 pr_err("Aborting MMU stall request since it has a pagefault.\n");
299 static bool iommu_enable_paging(void __iomem *base)
303 __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
304 base + IOMMU_REGISTER_COMMAND);
306 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
307 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
308 IOMMU_STATUS_BIT_PAGING_ENABLED)
311 if (IOMMU_REG_POLL_COUNT_FAST == i) {
312 pr_err("Enable paging request failed, MMU status is 0x%08X\n",
313 __raw_readl(base + IOMMU_REGISTER_STATUS));
319 static bool iommu_disable_paging(void __iomem *base)
323 __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
324 base + IOMMU_REGISTER_COMMAND);
326 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
327 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
328 IOMMU_STATUS_BIT_PAGING_ENABLED))
331 if (IOMMU_REG_POLL_COUNT_FAST == i) {
332 pr_err("Disable paging request failed, MMU status is 0x%08X\n",
333 __raw_readl(base + IOMMU_REGISTER_STATUS));
339 static void iommu_page_fault_done(void __iomem *base, const char *dbgname)
341 pr_info("MMU: %s: Leaving page fault mode\n",
343 __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
344 base + IOMMU_REGISTER_COMMAND);
347 static bool iommu_zap_tlb(void __iomem *base)
349 bool stall_success = iommu_enable_stall(base);
351 __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
352 base + IOMMU_REGISTER_COMMAND);
355 iommu_disable_stall(base);
359 static inline bool iommu_raw_reset(void __iomem *base)
363 __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
365 if (!(0xCAFEB000 == __raw_readl(base + IOMMU_REGISTER_DTE_ADDR))) {
366 pr_err("error when %s.\n", __func__);
369 __raw_writel(IOMMU_COMMAND_HARD_RESET,
370 base + IOMMU_REGISTER_COMMAND);
372 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
373 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
376 if (IOMMU_REG_POLL_COUNT_FAST == i) {
377 pr_err("%s,Reset request failed, MMU status is 0x%08X\n",
378 __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
384 static void __iommu_set_ptbase(void __iomem *base, unsigned long pgd)
386 __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
389 static bool iommu_reset(void __iomem *base, const char *dbgname)
393 err = iommu_enable_stall(base);
395 pr_err("%s:stall failed: %s\n", __func__, dbgname);
398 err = iommu_raw_reset(base);
400 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
401 IOMMU_INTERRUPT_READ_BUS_ERROR,
402 base+IOMMU_REGISTER_INT_MASK);
403 iommu_disable_stall(base);
405 pr_err("%s: failed: %s\n", __func__, dbgname);
409 static inline void pgtable_flush(void *vastart, void *vaend)
411 dmac_flush_range(vastart, vaend);
412 outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
415 static void set_fault_handler(struct iommu_drvdata *data,
416 rockchip_iommu_fault_handler_t handler)
420 write_lock_irqsave(&data->lock, flags);
421 data->fault_handler = handler;
422 write_unlock_irqrestore(&data->lock, flags);
425 static int default_fault_handler(struct device *dev,
426 enum rk_iommu_inttype itype,
427 unsigned long pgtable_base,
428 unsigned long fault_addr,
431 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
434 pr_err("%s,iommu device not assigned yet\n", __func__);
437 if ((itype >= IOMMU_FAULTS_NUM) || (itype < IOMMU_PAGEFAULT))
438 itype = IOMMU_FAULT_UNKNOWN;
440 if (itype == IOMMU_BUSERROR)
441 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
442 iommu_fault_name[itype], fault_addr, pgtable_base);
444 if (itype == IOMMU_PAGEFAULT)
445 pr_err("IOMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
447 (status >> 6) & 0x1F,
448 (status & 32) ? "write" : "read",
451 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
458 static void dump_pagetbl(u32 fault_address, u32 addr_dte)
465 u32 *lv1_entry_value;
470 u32 *lv2_entry_value;
473 lv1_offset = lv1ent_offset(fault_address);
474 lv2_offset = lv2ent_offset(fault_address);
476 lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
477 lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
478 lv1_entry_value = (u32 *)(*lv1_entry_va);
480 lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
481 lv2_entry_pa = (u32 *)lv2_base + lv2_offset;
482 lv2_entry_va = (u32 *)(__va(lv2_base)) + lv2_offset;
483 lv2_entry_value = (u32 *)(*lv2_entry_va);
485 pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",
486 fault_address, addr_dte, (u32)__va(addr_dte));
487 pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",
488 lv1_offset, (u32)lv1_entry_pa, (u32)lv1_entry_va);
489 pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",
490 (u32)lv1_entry_value, (u32)lv2_base);
491 pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",
492 lv2_offset, (u32)lv2_entry_pa, (u32)lv2_entry_va);
493 pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",
494 (u32)lv2_entry_value);
497 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
499 /* SYSMMU is in blocked when interrupt occurred. */
500 struct iommu_drvdata *data = dev_id;
501 struct resource *irqres;
502 struct platform_device *pdev;
503 enum rk_iommu_inttype itype = IOMMU_FAULT_UNKNOWN;
510 read_lock(&data->lock);
512 if (!is_iommu_active(data)) {
513 read_unlock(&data->lock);
517 if(cpu_is_312x() || cpu_is_3036())
518 rockchip_vcodec_select(data->dbgname);
520 pdev = to_platform_device(data->iommu);
522 for (i = 0; i < data->num_res_irq; i++) {
523 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
524 if (irqres && ((int)irqres->start == irq))
528 if (i == data->num_res_irq) {
529 itype = IOMMU_FAULT_UNKNOWN;
531 int_status = __raw_readl(data->res_bases[i] +
532 IOMMU_REGISTER_INT_STATUS);
534 if (int_status != 0) {
536 __raw_writel(0x00, data->res_bases[i] +
537 IOMMU_REGISTER_INT_MASK);
539 rawstat = __raw_readl(data->res_bases[i] +
540 IOMMU_REGISTER_INT_RAWSTAT);
542 if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
543 fault_address = __raw_readl(data->res_bases[i] +
544 IOMMU_REGISTER_PAGE_FAULT_ADDR);
545 itype = IOMMU_PAGEFAULT;
546 } else if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
547 itype = IOMMU_BUSERROR;
551 dump_pagetbl(fault_address,
552 __raw_readl(data->res_bases[i] +
553 IOMMU_REGISTER_DTE_ADDR));
559 if (data->fault_handler) {
560 unsigned long base = __raw_readl(data->res_bases[i] +
561 IOMMU_REGISTER_DTE_ADDR);
562 status = __raw_readl(data->res_bases[i] +
563 IOMMU_REGISTER_STATUS);
564 ret = data->fault_handler(data->dev, itype, base,
565 fault_address, status);
568 if (!ret && (itype != IOMMU_FAULT_UNKNOWN)) {
569 if (IOMMU_PAGEFAULT == itype) {
570 iommu_zap_tlb(data->res_bases[i]);
571 iommu_page_fault_done(data->res_bases[i],
573 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
574 IOMMU_INTERRUPT_READ_BUS_ERROR,
576 IOMMU_REGISTER_INT_MASK);
579 pr_err("(%s) %s is not handled.\n",
580 data->dbgname, iommu_fault_name[itype]);
584 read_unlock(&data->lock);
589 static bool __rockchip_iommu_disable(struct iommu_drvdata *data)
593 bool disabled = false;
595 write_lock_irqsave(&data->lock, flags);
597 if (!set_iommu_inactive(data))
600 for (i = 0; i < data->num_res_mem; i++)
601 iommu_disable_paging(data->res_bases[i]);
607 write_unlock_irqrestore(&data->lock, flags);
610 pr_info("(%s) Disabled\n", data->dbgname);
612 pr_info("(%s) %d times left to be disabled\n",
613 data->dbgname, data->activations);
618 /* __rk_sysmmu_enable: Enables System MMU
620 * returns -error if an error occurred and System MMU is not enabled,
621 * 0 if the System MMU has been just enabled and 1 if System MMU was already
624 static int __rockchip_iommu_enable(struct iommu_drvdata *data,
625 unsigned long pgtable,
626 struct iommu_domain *domain)
631 write_lock_irqsave(&data->lock, flags);
633 if (!set_iommu_active(data)) {
634 if (WARN_ON(pgtable != data->pgtable)) {
636 set_iommu_inactive(data);
641 pr_info("(%s) Already enabled\n", data->dbgname);
645 data->pgtable = pgtable;
647 for (i = 0; i < data->num_res_mem; i++) {
650 status = iommu_enable_stall(data->res_bases[i]);
652 __iommu_set_ptbase(data->res_bases[i], pgtable);
653 __raw_writel(IOMMU_COMMAND_ZAP_CACHE,
655 IOMMU_REGISTER_COMMAND);
657 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
658 IOMMU_INTERRUPT_READ_BUS_ERROR,
659 data->res_bases[i]+IOMMU_REGISTER_INT_MASK);
660 iommu_enable_paging(data->res_bases[i]);
661 iommu_disable_stall(data->res_bases[i]);
664 data->domain = domain;
666 pr_info("(%s) Enabled\n", data->dbgname);
668 write_unlock_irqrestore(&data->lock, flags);
673 bool rockchip_iommu_disable(struct device *dev)
675 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
678 disabled = __rockchip_iommu_disable(data);
683 void rockchip_iommu_tlb_invalidate(struct device *dev)
686 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
688 read_lock_irqsave(&data->lock, flags);
690 if(cpu_is_312x() || cpu_is_3036())
691 rockchip_vcodec_select(data->dbgname);
693 if (is_iommu_active(data)) {
696 for (i = 0; i < data->num_res_mem; i++) {
697 if (!iommu_zap_tlb(data->res_bases[i]))
698 pr_err("%s,invalidating TLB failed\n",
702 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",
706 read_unlock_irqrestore(&data->lock, flags);
709 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
712 struct rk_iommu_domain *priv = domain->priv;
713 unsigned long *entry;
715 phys_addr_t phys = 0;
717 spin_lock_irqsave(&priv->pgtablelock, flags);
719 entry = section_entry(priv->pgtable, iova);
720 entry = page_entry(entry, iova);
721 phys = spage_phys(entry) + spage_offs(iova);
723 spin_unlock_irqrestore(&priv->pgtablelock, flags);
728 static int lv2set_page(unsigned long *pent, phys_addr_t paddr,
729 size_t size, short *pgcnt)
731 if (!lv2ent_fault(pent))
734 *pent = mk_lv2ent_spage(paddr);
735 pgtable_flush(pent, pent + 1);
740 static unsigned long *alloc_lv2entry(unsigned long *sent,
741 unsigned long iova, short *pgcounter)
743 if (lv1ent_fault(sent)) {
746 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
747 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
751 *sent = mk_lv1ent_page(__pa(pent));
752 kmemleak_ignore(pent);
753 *pgcounter = NUM_LV2ENTRIES;
754 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
755 pgtable_flush(sent, sent + 1);
757 return page_entry(sent, iova);
760 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
761 unsigned long iova, size_t size)
763 struct rk_iommu_domain *priv = domain->priv;
767 BUG_ON(priv->pgtable == NULL);
769 spin_lock_irqsave(&priv->pgtablelock, flags);
771 ent = section_entry(priv->pgtable, iova);
773 if (unlikely(lv1ent_fault(ent))) {
774 if (size > SPAGE_SIZE)
779 /* lv1ent_page(sent) == true here */
781 ent = page_entry(ent, iova);
783 if (unlikely(lv2ent_fault(ent))) {
790 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
794 /*pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",
795 __func__, iova,size);
797 spin_unlock_irqrestore(&priv->pgtablelock, flags);
802 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
803 phys_addr_t paddr, size_t size, int prot)
805 struct rk_iommu_domain *priv = domain->priv;
806 unsigned long *entry;
811 BUG_ON(priv->pgtable == NULL);
813 spin_lock_irqsave(&priv->pgtablelock, flags);
815 entry = section_entry(priv->pgtable, iova);
817 pent = alloc_lv2entry(entry, iova,
818 &priv->lv2entcnt[lv1ent_offset(iova)]);
822 ret = lv2set_page(pent, paddr, size,
823 &priv->lv2entcnt[lv1ent_offset(iova)]);
826 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__,
829 spin_unlock_irqrestore(&priv->pgtablelock, flags);
834 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
837 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
838 struct rk_iommu_domain *priv = domain->priv;
839 struct list_head *pos;
843 spin_lock_irqsave(&priv->lock, flags);
845 list_for_each(pos, &priv->clients)
847 if (list_entry(pos, struct iommu_drvdata, node) == data) {
855 if(cpu_is_312x() || cpu_is_3036())
856 rockchip_vcodec_select(data->dbgname);
858 if (__rockchip_iommu_disable(data)) {
859 pr_info("%s: Detached IOMMU with pgtable %#lx\n",
860 __func__, __pa(priv->pgtable));
861 list_del(&data->node);
862 INIT_LIST_HEAD(&data->node);
865 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",
866 __func__, __pa(priv->pgtable));
869 spin_unlock_irqrestore(&priv->lock, flags);
872 static int rockchip_iommu_attach_device(struct iommu_domain *domain,
875 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
876 struct rk_iommu_domain *priv = domain->priv;
880 spin_lock_irqsave(&priv->lock, flags);
882 if(cpu_is_312x() || cpu_is_3036())
883 rockchip_vcodec_select(data->dbgname);
885 ret = __rockchip_iommu_enable(data, __pa(priv->pgtable), domain);
888 /* 'data->node' must not be appeared in priv->clients */
889 BUG_ON(!list_empty(&data->node));
891 list_add_tail(&data->node, &priv->clients);
894 spin_unlock_irqrestore(&priv->lock, flags);
897 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",
898 __func__, __pa(priv->pgtable));
899 } else if (ret > 0) {
900 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",
901 __func__, __pa(priv->pgtable));
903 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",
904 __func__, __pa(priv->pgtable));
910 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
912 struct rk_iommu_domain *priv = domain->priv;
913 struct iommu_drvdata *data;
917 WARN_ON(!list_empty(&priv->clients));
919 spin_lock_irqsave(&priv->lock, flags);
921 list_for_each_entry(data, &priv->clients, node) {
922 if(cpu_is_312x() || cpu_is_3036())
923 rockchip_vcodec_select(data->dbgname);
924 while (!rockchip_iommu_disable(data->dev))
925 ; /* until System MMU is actually disabled */
927 spin_unlock_irqrestore(&priv->lock, flags);
929 for (i = 0; i < NUM_LV1ENTRIES; i++)
930 if (lv1ent_page(priv->pgtable + i))
931 kmem_cache_free(lv2table_kmem_cache,
932 __va(lv2table_base(priv->pgtable + i)));
934 free_pages((unsigned long)priv->pgtable, 0);
935 free_pages((unsigned long)priv->lv2entcnt, 0);
940 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
942 struct rk_iommu_domain *priv;
944 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
948 /*rk32xx iommu use 2 level pagetable,
949 level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
950 so alloc a page size for each page table
952 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL |
957 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
959 if (!priv->lv2entcnt)
962 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
964 spin_lock_init(&priv->lock);
965 spin_lock_init(&priv->pgtablelock);
966 INIT_LIST_HEAD(&priv->clients);
972 free_pages((unsigned long)priv->pgtable, 0);
978 static struct iommu_ops rk_iommu_ops = {
979 .domain_init = &rockchip_iommu_domain_init,
980 .domain_destroy = &rockchip_iommu_domain_destroy,
981 .attach_dev = &rockchip_iommu_attach_device,
982 .detach_dev = &rockchip_iommu_detach_device,
983 .map = &rockchip_iommu_map,
984 .unmap = &rockchip_iommu_unmap,
985 .iova_to_phys = &rockchip_iommu_iova_to_phys,
986 .pgsize_bitmap = SPAGE_SIZE,
989 static int rockchip_iommu_prepare(void)
997 lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
1001 if (!lv2table_kmem_cache) {
1002 pr_err("%s: failed to create kmem cache\n", __func__);
1005 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1009 pr_err("%s:failed to set iommu to bus\r\n", __func__);
1013 static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
1016 struct resource *info = NULL;
1017 int num_resources = 0;
1019 /*get resouce info*/
1021 info = platform_get_resource(pdev, type, num_resources);
1026 return num_resources;
1029 static struct kobject *dump_mmu_object;
1031 static int dump_mmu_pagetbl(struct device *dev, struct device_attribute *attr,
1032 const char *buf, u32 count)
1040 ret = kstrtouint(buf, 0, &mmu_base);
1042 pr_info("%s is not in hexdecimal form.\n", buf);
1043 base = ioremap(mmu_base, 0x100);
1044 iommu_dte = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
1045 fault_address = __raw_readl(base + IOMMU_REGISTER_PAGE_FAULT_ADDR);
1046 dump_pagetbl(fault_address, iommu_dte);
1050 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1052 void dump_iommu_sysfs_init(void)
1056 dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1057 if (dump_mmu_object == NULL)
1059 ret = sysfs_create_file(dump_mmu_object,
1060 &dev_attr_dump_mmu_pgtable.attr);
1063 static int rockchip_iommu_probe(struct platform_device *pdev)
1067 struct iommu_drvdata *data;
1071 ret = rockchip_iommu_prepare();
1073 pr_err("%s,failed\r\n", __func__);
1077 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1079 dev_dbg(dev, "Not enough memory\n");
1083 dev_set_drvdata(dev, data);
1085 ret = dev_set_drvdata(dev, data);
1088 dev_dbg(dev, "Unabled to initialize driver data\n");
1092 if (pdev->dev.of_node) {
1093 of_property_read_string(pdev->dev.of_node,
1094 "dbgname", &(data->dbgname));
1096 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1099 pr_info("(%s) Enter\n", data->dbgname);
1101 data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1103 if (0 == data->num_res_mem) {
1104 pr_err("can't find iommu memory resource \r\n");
1107 pr_info("data->num_res_mem=%d\n", data->num_res_mem);
1108 data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1110 if (0 == data->num_res_irq) {
1111 pr_err("can't find iommu irq resource \r\n");
1115 data->res_bases = kmalloc_array(data->num_res_mem,
1116 sizeof(*data->res_bases), GFP_KERNEL);
1117 if (data->res_bases == NULL) {
1118 dev_dbg(dev, "Not enough memory\n");
1123 for (i = 0; i < data->num_res_mem; i++) {
1124 struct resource *res;
1126 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1128 pr_err("Unable to find IOMEM region\n");
1132 data->res_bases[i] = ioremap(res->start, resource_size(res));
1133 pr_info("res->start = 0x%08x ioremap to data->res_bases[%d] = 0x%08x\n",
1134 res->start, i, (unsigned int)data->res_bases[i]);
1135 if (!data->res_bases[i]) {
1136 pr_err("Unable to map IOMEM @ PA:%#x\n", res->start);
1141 if(cpu_is_312x() || cpu_is_3036())
1142 rockchip_vcodec_select(data->dbgname);
1144 if (!strstr(data->dbgname, "isp")) {
1145 if (!iommu_reset(data->res_bases[i], data->dbgname)) {
1152 for (i = 0; i < data->num_res_irq; i++) {
1153 ret = platform_get_irq(pdev, i);
1155 pr_err("Unable to find IRQ resource\n");
1158 ret = request_irq(ret, rockchip_iommu_irq,
1159 IRQF_SHARED, dev_name(dev), data);
1161 pr_err("Unabled to register interrupt handler\n");
1165 ret = rockchip_init_iovmm(dev, &data->vmm);
1170 rwlock_init(&data->lock);
1171 INIT_LIST_HEAD(&data->node);
1173 set_fault_handler(data, &default_fault_handler);
1175 pr_info("(%s) Initialized\n", data->dbgname);
1182 irq = platform_get_irq(pdev, i);
1183 free_irq(irq, data);
1186 while (data->num_res_mem-- > 0)
1187 iounmap(data->res_bases[data->num_res_mem]);
1188 kfree(data->res_bases);
1192 dev_err(dev, "Failed to initialize\n");
1197 static const struct of_device_id iommu_dt_ids[] = {
1198 { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1199 { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1200 { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1201 { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1202 { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1203 { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1204 { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1205 { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1209 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1212 static struct platform_driver rk_iommu_driver = {
1213 .probe = rockchip_iommu_probe,
1217 .owner = THIS_MODULE,
1218 .of_match_table = of_match_ptr(iommu_dt_ids),
1222 static int __init rockchip_iommu_init_driver(void)
1224 dump_iommu_sysfs_init();
1226 return platform_driver_register(&rk_iommu_driver);
1229 core_initcall(rockchip_iommu_init_driver);