2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
25 #include <linux/rockchip-iovmm.h>
26 #include <linux/rockchip/grf.h>
27 #include <linux/rockchip/cpu.h>
28 #include <linux/rockchip/iomap.h>
29 #include <linux/device.h>
32 /* We does not consider super section mapping (16MB) */
33 #define SPAGE_ORDER 12
34 #define SPAGE_SIZE (1 << SPAGE_ORDER)
35 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
37 static void __iomem *rk312x_vop_mmu_base;
39 enum iommu_entry_flags {
40 IOMMU_FLAGS_PRESENT = 0x01,
41 IOMMU_FLAGS_READ_PERMISSION = 0x02,
42 IOMMU_FLAGS_WRITE_PERMISSION = 0x04,
43 IOMMU_FLAGS_OVERRIDE_CACHE = 0x8,
44 IOMMU_FLAGS_WRITE_CACHEABLE = 0x10,
45 IOMMU_FLAGS_WRITE_ALLOCATE = 0x20,
46 IOMMU_FLAGS_WRITE_BUFFERABLE = 0x40,
47 IOMMU_FLAGS_READ_CACHEABLE = 0x80,
48 IOMMU_FLAGS_READ_ALLOCATE = 0x100,
49 IOMMU_FLAGS_MASK = 0x1FF,
52 #define rockchip_lv1ent_fault(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 0)
53 #define rockchip_lv1ent_page(sent) ((*(sent) & IOMMU_FLAGS_PRESENT) == 1)
54 #define rockchip_lv2ent_fault(pent) ((*(pent) & IOMMU_FLAGS_PRESENT) == 0)
55 #define rockchip_spage_phys(pent) (*(pent) & SPAGE_MASK)
56 #define rockchip_spage_offs(iova) ((iova) & 0x0FFF)
58 #define rockchip_lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
59 #define rockchip_lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
61 #define NUM_LV1ENTRIES 1024
62 #define NUM_LV2ENTRIES 1024
64 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
66 #define rockchip_lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
68 #define rockchip_mk_lv1ent_page(pa) ((pa) | IOMMU_FLAGS_PRESENT)
69 /*write and read permission for level2 page default*/
70 #define rockchip_mk_lv2ent_spage(pa) ((pa) | IOMMU_FLAGS_PRESENT | \
71 IOMMU_FLAGS_READ_PERMISSION | \
72 IOMMU_FLAGS_WRITE_PERMISSION)
74 #define IOMMU_REG_POLL_COUNT_FAST 1000
77 * MMU register numbers
78 * Used in the register read/write routines.
79 * See the hardware documentation for more information about each register
82 /**< Current Page Directory Pointer */
83 IOMMU_REGISTER_DTE_ADDR = 0x0000,
84 /**< Status of the MMU */
85 IOMMU_REGISTER_STATUS = 0x0004,
86 /**< Command register, used to control the MMU */
87 IOMMU_REGISTER_COMMAND = 0x0008,
88 /**< Logical address of the last page fault */
89 IOMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C,
90 /**< Used to invalidate the mapping of a single page from the MMU */
91 IOMMU_REGISTER_ZAP_ONE_LINE = 0x010,
92 /**< Raw interrupt status, all interrupts visible */
93 IOMMU_REGISTER_INT_RAWSTAT = 0x0014,
94 /**< Indicate to the MMU that the interrupt has been received */
95 IOMMU_REGISTER_INT_CLEAR = 0x0018,
96 /**< Enable/disable types of interrupts */
97 IOMMU_REGISTER_INT_MASK = 0x001C,
98 /**< Interrupt status based on the mask */
99 IOMMU_REGISTER_INT_STATUS = 0x0020,
100 IOMMU_REGISTER_AUTO_GATING = 0x0024
104 /**< Enable paging (memory translation) */
105 IOMMU_COMMAND_ENABLE_PAGING = 0x00,
106 /**< Disable paging (memory translation) */
107 IOMMU_COMMAND_DISABLE_PAGING = 0x01,
108 /**< Enable stall on page fault */
109 IOMMU_COMMAND_ENABLE_STALL = 0x02,
110 /**< Disable stall on page fault */
111 IOMMU_COMMAND_DISABLE_STALL = 0x03,
112 /**< Zap the entire page table cache */
113 IOMMU_COMMAND_ZAP_CACHE = 0x04,
114 /**< Page fault processed */
115 IOMMU_COMMAND_PAGE_FAULT_DONE = 0x05,
116 /**< Reset the MMU back to power-on settings */
117 IOMMU_COMMAND_HARD_RESET = 0x06
121 * MMU interrupt register bits
122 * Each cause of the interrupt is reported
123 * through the (raw) interrupt status registers.
124 * Multiple interrupts can be pending, so multiple bits
125 * can be set at once.
127 enum iommu_interrupt {
128 IOMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
129 IOMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
132 enum iommu_status_bits {
133 IOMMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
134 IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
135 IOMMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
136 IOMMU_STATUS_BIT_IDLE = 1 << 3,
137 IOMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
138 IOMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
139 IOMMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
143 * Size of an MMU page in bytes
145 #define IOMMU_PAGE_SIZE 0x1000
148 * Size of the address space referenced by a page table page
150 #define IOMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
153 * Page directory index from address
154 * Calculates the page directory index from the given address
156 #define IOMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
159 * Page table index from address
160 * Calculates the page table index from the given address
162 #define IOMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
165 * Extract the memory address from an PDE/PTE entry
167 #define IOMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
169 #define INVALID_PAGE ((u32)(~0))
171 static struct kmem_cache *lv2table_kmem_cache;
173 static unsigned int *rockchip_section_entry(unsigned int *pgtable, unsigned long iova)
175 return pgtable + rockchip_lv1ent_offset(iova);
178 static unsigned int *rockchip_page_entry(unsigned int *sent, unsigned long iova)
180 return (unsigned int *)phys_to_virt(rockchip_lv2table_base(sent)) +
181 rockchip_lv2ent_offset(iova);
184 struct rk_iommu_domain {
185 struct list_head clients; /* list of iommu_drvdata.node */
186 unsigned int *pgtable; /* lv1 page table, 4KB */
187 short *lv2entcnt; /* free lv2 entry counter for each section */
188 spinlock_t lock; /* lock for this structure */
189 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
190 struct iommu_domain domain;
193 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
195 return container_of(dom, struct rk_iommu_domain, domain);
198 static bool rockchip_set_iommu_active(struct iommu_drvdata *data)
200 /* return true if the IOMMU was not active previously
201 and it needs to be initialized */
202 return ++data->activations == 1;
205 static bool rockchip_set_iommu_inactive(struct iommu_drvdata *data)
207 /* return true if the IOMMU is needed to be disabled */
208 BUG_ON(data->activations < 1);
209 return --data->activations == 0;
212 static bool rockchip_is_iommu_active(struct iommu_drvdata *data)
214 return data->activations > 0;
217 static void rockchip_iommu_disable_stall(void __iomem *base)
222 if (base != rk312x_vop_mmu_base) {
223 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
225 goto skip_vop_mmu_disable;
228 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
232 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
233 pr_info("Aborting MMU disable stall request since it is in pagefault state.\n");
237 if (!(mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE)) {
241 __raw_writel(IOMMU_COMMAND_DISABLE_STALL, base + IOMMU_REGISTER_COMMAND);
243 skip_vop_mmu_disable:
245 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
248 if (base != rk312x_vop_mmu_base) {
249 status = __raw_readl(base + IOMMU_REGISTER_STATUS);
257 if (0 == (status & IOMMU_STATUS_BIT_STALL_ACTIVE))
260 if (status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
263 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED))
267 if (IOMMU_REG_POLL_COUNT_FAST == i) {
268 pr_info("Disable stall request failed, MMU status is 0x%08X\n",
269 __raw_readl(base + IOMMU_REGISTER_STATUS));
273 static bool rockchip_iommu_enable_stall(void __iomem *base)
279 if (base != rk312x_vop_mmu_base) {
280 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
282 goto skip_vop_mmu_enable;
285 if (0 == (mmu_status & IOMMU_STATUS_BIT_PAGING_ENABLED)) {
289 if (mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE){
290 pr_info("MMU stall already enabled\n");
294 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
295 pr_info("Aborting MMU stall request since it is in pagefault state. mmu status is 0x%08x\n",
300 __raw_writel(IOMMU_COMMAND_ENABLE_STALL, base + IOMMU_REGISTER_COMMAND);
304 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
305 if (base != rk312x_vop_mmu_base) {
306 mmu_status = __raw_readl(base + IOMMU_REGISTER_STATUS);
314 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
317 if ((mmu_status & IOMMU_STATUS_BIT_STALL_ACTIVE) &&
318 (0 == (mmu_status & IOMMU_STATUS_BIT_STALL_NOT_ACTIVE)))
321 if (0 == (mmu_status & (IOMMU_STATUS_BIT_PAGING_ENABLED)))
325 if (IOMMU_REG_POLL_COUNT_FAST == i) {
326 pr_info("Enable stall request failed, MMU status is 0x%08X\n",
327 __raw_readl(base + IOMMU_REGISTER_STATUS));
331 if (mmu_status & IOMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
332 pr_info("Aborting MMU stall request since it has a pagefault.\n");
339 static bool rockchip_iommu_enable_paging(void __iomem *base)
343 __raw_writel(IOMMU_COMMAND_ENABLE_PAGING,
344 base + IOMMU_REGISTER_COMMAND);
346 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
347 if (base != rk312x_vop_mmu_base) {
348 if (__raw_readl(base + IOMMU_REGISTER_STATUS) &
349 IOMMU_STATUS_BIT_PAGING_ENABLED)
359 if (IOMMU_REG_POLL_COUNT_FAST == i) {
360 pr_info("Enable paging request failed, MMU status is 0x%08X\n",
361 __raw_readl(base + IOMMU_REGISTER_STATUS));
368 static bool rockchip_iommu_disable_paging(void __iomem *base)
372 __raw_writel(IOMMU_COMMAND_DISABLE_PAGING,
373 base + IOMMU_REGISTER_COMMAND);
375 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
376 if (base != rk312x_vop_mmu_base) {
377 if (!(__raw_readl(base + IOMMU_REGISTER_STATUS) &
378 IOMMU_STATUS_BIT_PAGING_ENABLED))
388 if (IOMMU_REG_POLL_COUNT_FAST == i) {
389 pr_info("Disable paging request failed, MMU status is 0x%08X\n",
390 __raw_readl(base + IOMMU_REGISTER_STATUS));
397 static void rockchip_iommu_page_fault_done(void __iomem *base, const char *dbgname)
399 pr_info("MMU: %s: Leaving page fault mode\n",
401 __raw_writel(IOMMU_COMMAND_PAGE_FAULT_DONE,
402 base + IOMMU_REGISTER_COMMAND);
405 static int rockchip_iommu_zap_tlb_without_stall (void __iomem *base)
407 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
412 static int rockchip_iommu_zap_tlb(void __iomem *base)
414 if (!rockchip_iommu_enable_stall(base)) {
415 pr_err("%s failed\n", __func__);
419 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, base + IOMMU_REGISTER_COMMAND);
421 rockchip_iommu_disable_stall(base);
426 static inline bool rockchip_iommu_raw_reset(void __iomem *base)
430 unsigned int grf_value;
432 __raw_writel(0xCAFEBABE, base + IOMMU_REGISTER_DTE_ADDR);
434 if (base != rk312x_vop_mmu_base) {
435 ret = __raw_readl(base + IOMMU_REGISTER_DTE_ADDR);
436 if (!(0xCAFEB000 == ret)) {
437 grf_value = readl_relaxed(RK_GRF_VIRT + RK3036_GRF_SOC_CON1);
438 pr_info("error when %s. grf = 0x%08x\n", __func__, grf_value);
442 __raw_writel(IOMMU_COMMAND_HARD_RESET,
443 base + IOMMU_REGISTER_COMMAND);
445 for (i = 0; i < IOMMU_REG_POLL_COUNT_FAST; ++i) {
446 if (base != rk312x_vop_mmu_base) {
447 if (__raw_readl(base + IOMMU_REGISTER_DTE_ADDR) == 0)
457 if (IOMMU_REG_POLL_COUNT_FAST == i) {
458 pr_info("%s,Reset request failed, MMU status is 0x%08X\n",
459 __func__, __raw_readl(base + IOMMU_REGISTER_DTE_ADDR));
465 static void rockchip_iommu_set_ptbase(void __iomem *base, unsigned int pgd)
467 __raw_writel(pgd, base + IOMMU_REGISTER_DTE_ADDR);
470 static bool rockchip_iommu_reset(void __iomem *base, const char *dbgname)
474 ret = rockchip_iommu_raw_reset(base);
476 pr_info("(%s), %s failed\n", dbgname, __func__);
480 if (base != rk312x_vop_mmu_base)
481 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
482 IOMMU_INTERRUPT_READ_BUS_ERROR,
483 base + IOMMU_REGISTER_INT_MASK);
485 __raw_writel(0x00, base + IOMMU_REGISTER_INT_MASK);
490 static inline void rockchip_pgtable_flush(void *vastart, void *vaend)
493 dmac_flush_range(vastart, vaend);
494 outer_flush_range(virt_to_phys(vastart), virt_to_phys(vaend));
495 #elif defined(CONFIG_ARM64)
496 __dma_flush_range(vastart, vaend);
501 static void dump_pagetbl(dma_addr_t fault_address, u32 addr_dte)
503 u32 dte_index, pte_index, page_offset;
505 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
508 phys_addr_t pte_addr_phys = 0;
509 u32 *pte_addr = NULL;
511 phys_addr_t page_addr_phys = 0;
514 dte_index = rockchip_lv1ent_offset(fault_address);
515 pte_index = rockchip_lv2ent_offset(fault_address);
516 page_offset = (u32)(fault_address & 0x00000fff);
518 mmu_dte_addr = addr_dte;
519 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
521 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
522 dte_addr = phys_to_virt(dte_addr_phys);
525 if (!(IOMMU_FLAGS_PRESENT & dte))
528 pte_addr_phys = ((phys_addr_t)dte & 0xfffff000) + (pte_index * 4);
529 pte_addr = phys_to_virt(pte_addr_phys);
532 if (!(IOMMU_FLAGS_PRESENT & pte))
535 page_addr_phys = ((phys_addr_t)pte & 0xfffff000) + page_offset;
536 page_flags = pte & 0x000001fe;
539 pr_err("iova = %pad: dte_index: 0x%03x pte_index: 0x%03x page_offset: 0x%03x\n",
540 &fault_address, dte_index, pte_index, page_offset);
541 pr_err("mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
542 &mmu_dte_addr_phys, &dte_addr_phys, dte,
543 (dte & IOMMU_FLAGS_PRESENT), &pte_addr_phys, pte,
544 (pte & IOMMU_FLAGS_PRESENT), &page_addr_phys, page_flags);
547 static irqreturn_t rockchip_iommu_irq(int irq, void *dev_id)
549 /* SYSMMU is in blocked when interrupt occurred. */
550 struct iommu_drvdata *data = dev_id;
553 dma_addr_t fault_address;
559 spin_lock_irqsave(&data->data_lock, flags);
561 if (!rockchip_is_iommu_active(data)) {
562 spin_unlock_irqrestore(&data->data_lock, flags);
566 for (i = 0; i < data->num_res_mem; i++) {
567 status = __raw_readl(data->res_bases[i] +
568 IOMMU_REGISTER_INT_STATUS);
572 rawstat = __raw_readl(data->res_bases[i] +
573 IOMMU_REGISTER_INT_RAWSTAT);
575 reg_status = __raw_readl(data->res_bases[i] +
576 IOMMU_REGISTER_STATUS);
578 dev_info(data->iommu, "1.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
579 rawstat, status, reg_status);
581 if (rawstat & IOMMU_INTERRUPT_PAGE_FAULT) {
585 fault_address = __raw_readl(data->res_bases[i] +
586 IOMMU_REGISTER_PAGE_FAULT_ADDR);
588 dte = __raw_readl(data->res_bases[i] +
589 IOMMU_REGISTER_DTE_ADDR);
591 flags = (status & 32) ? 1 : 0;
593 dev_err(data->iommu, "Page fault detected at %pad from bus id %d of type %s on %s\n",
594 &fault_address, (status >> 6) & 0x1F,
595 (flags == 1) ? "write" : "read", data->dbgname);
597 dump_pagetbl(fault_address, dte);
600 report_iommu_fault(data->domain, data->iommu,
601 fault_address, flags);
602 if (data->fault_handler)
603 data->fault_handler(data->master, IOMMU_PAGEFAULT, dte, fault_address, 1);
605 rockchip_iommu_page_fault_done(data->res_bases[i],
609 if (rawstat & IOMMU_INTERRUPT_READ_BUS_ERROR) {
610 dev_err(data->iommu, "bus error occured at %pad\n",
614 if (rawstat & ~(IOMMU_INTERRUPT_READ_BUS_ERROR |
615 IOMMU_INTERRUPT_PAGE_FAULT)) {
616 dev_err(data->iommu, "unexpected int_status: %#08x\n\n",
620 __raw_writel(rawstat, data->res_bases[i] +
621 IOMMU_REGISTER_INT_CLEAR);
623 status = __raw_readl(data->res_bases[i] +
624 IOMMU_REGISTER_INT_STATUS);
626 rawstat = __raw_readl(data->res_bases[i] +
627 IOMMU_REGISTER_INT_RAWSTAT);
629 reg_status = __raw_readl(data->res_bases[i] +
630 IOMMU_REGISTER_STATUS);
632 dev_info(data->iommu, "2.rawstat = 0x%08x,status = 0x%08x,reg_status = 0x%08x\n",
633 rawstat, status, reg_status);
635 ret = rockchip_iommu_zap_tlb_without_stall(data->res_bases[i]);
637 dev_err(data->iommu, "(%s) %s failed\n", data->dbgname,
641 spin_unlock_irqrestore(&data->data_lock, flags);
645 static bool rockchip_iommu_disable(struct iommu_drvdata *data)
651 spin_lock_irqsave(&data->data_lock, flags);
653 if (!rockchip_set_iommu_inactive(data)) {
654 spin_unlock_irqrestore(&data->data_lock, flags);
655 dev_info(data->iommu,"(%s) %d times left to be disabled\n",
656 data->dbgname, data->activations);
660 for (i = 0; i < data->num_res_mem; i++) {
661 ret = rockchip_iommu_enable_stall(data->res_bases[i]);
663 dev_info(data->iommu, "(%s), %s failed\n",
664 data->dbgname, __func__);
665 spin_unlock_irqrestore(&data->data_lock, flags);
669 __raw_writel(0, data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
671 ret = rockchip_iommu_disable_paging(data->res_bases[i]);
673 rockchip_iommu_disable_stall(data->res_bases[i]);
674 spin_unlock_irqrestore(&data->data_lock, flags);
675 dev_info(data->iommu, "%s error\n", __func__);
678 rockchip_iommu_disable_stall(data->res_bases[i]);
683 spin_unlock_irqrestore(&data->data_lock, flags);
685 dev_dbg(data->iommu,"(%s) Disabled\n", data->dbgname);
690 /* __rk_sysmmu_enable: Enables System MMU
692 * returns -error if an error occurred and System MMU is not enabled,
693 * 0 if the System MMU has been just enabled and 1 if System MMU was already
696 static int rockchip_iommu_enable(struct iommu_drvdata *data, unsigned int pgtable)
701 spin_lock_irqsave(&data->data_lock, flags);
703 if (!rockchip_set_iommu_active(data)) {
704 if (WARN_ON(pgtable != data->pgtable))
709 dev_info(data->iommu, "(%s) Already enabled\n", data->dbgname);
714 for (i = 0; i < data->num_res_mem; i++) {
715 ret = rockchip_iommu_enable_stall(data->res_bases[i]);
717 dev_info(data->iommu, "(%s), %s failed\n",
718 data->dbgname, __func__);
723 if (!strstr(data->dbgname, "isp")) {
724 if (!rockchip_iommu_reset(data->res_bases[i],
726 rockchip_iommu_disable_stall(data->res_bases[i]);
732 rockchip_iommu_set_ptbase(data->res_bases[i], pgtable);
734 __raw_writel(IOMMU_COMMAND_ZAP_CACHE, data->res_bases[i] +
735 IOMMU_REGISTER_COMMAND);
737 if (strstr(data->dbgname, "isp")) {
738 __raw_writel(IOMMU_INTERRUPT_PAGE_FAULT |
739 IOMMU_INTERRUPT_READ_BUS_ERROR,
740 data->res_bases[i] + IOMMU_REGISTER_INT_MASK);
743 ret = rockchip_iommu_enable_paging(data->res_bases[i]);
745 dev_info(data->iommu, "(%s), %s failed\n",
746 data->dbgname, __func__);
747 rockchip_iommu_disable_stall(data->res_bases[i]);
752 rockchip_iommu_disable_stall(data->res_bases[i]);
755 data->pgtable = pgtable;
756 spin_unlock_irqrestore(&data->data_lock, flags);
758 dev_dbg(data->iommu,"(%s) Enabled\n", data->dbgname);
763 rockchip_set_iommu_inactive(data);
764 spin_unlock_irqrestore(&data->data_lock, flags);
769 int rockchip_iommu_tlb_invalidate_global(struct device *dev)
772 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
775 spin_lock_irqsave(&data->data_lock, flags);
777 if (rockchip_is_iommu_active(data)) {
780 for (i = 0; i < data->num_res_mem; i++) {
781 ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
783 dev_err(dev->archdata.iommu, "(%s) %s failed\n",
784 data->dbgname, __func__);
787 dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
792 spin_unlock_irqrestore(&data->data_lock, flags);
797 int rockchip_iommu_tlb_invalidate(struct device *dev)
800 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
802 if (strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc"))
805 spin_lock_irqsave(&data->data_lock, flags);
807 if (rockchip_is_iommu_active(data)) {
811 for (i = 0; i < data->num_res_mem; i++) {
812 ret = rockchip_iommu_zap_tlb(data->res_bases[i]);
814 dev_err(dev->archdata.iommu, "(%s) %s failed\n",
815 data->dbgname, __func__);
816 spin_unlock_irqrestore(&data->data_lock, flags);
822 dev_dbg(dev->archdata.iommu, "(%s) Disabled. Skipping invalidating TLB.\n",
826 spin_unlock_irqrestore(&data->data_lock, flags);
831 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,
834 struct rk_iommu_domain *priv = to_rk_domain(domain);
837 phys_addr_t phys = 0;
839 spin_lock_irqsave(&priv->pgtablelock, flags);
841 entry = rockchip_section_entry(priv->pgtable, iova);
842 entry = rockchip_page_entry(entry, iova);
843 phys = rockchip_spage_phys(entry) + rockchip_spage_offs(iova);
845 spin_unlock_irqrestore(&priv->pgtablelock, flags);
850 static int rockchip_lv2set_page(unsigned int *pent, phys_addr_t paddr,
851 size_t size, short *pgcnt)
853 if (!rockchip_lv2ent_fault(pent))
856 *pent = rockchip_mk_lv2ent_spage(paddr);
857 rockchip_pgtable_flush(pent, pent + 1);
862 static unsigned int *rockchip_alloc_lv2entry(unsigned int *sent,
863 unsigned long iova, short *pgcounter)
865 if (rockchip_lv1ent_fault(sent)) {
868 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
869 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
873 *sent = rockchip_mk_lv1ent_page(virt_to_phys(pent));
874 kmemleak_ignore(pent);
875 *pgcounter = NUM_LV2ENTRIES;
876 rockchip_pgtable_flush(pent, pent + NUM_LV2ENTRIES);
877 rockchip_pgtable_flush(sent, sent + 1);
879 return rockchip_page_entry(sent, iova);
882 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,
883 unsigned long iova, size_t size)
885 struct rk_iommu_domain *priv = to_rk_domain(domain);
889 BUG_ON(priv->pgtable == NULL);
891 spin_lock_irqsave(&priv->pgtablelock, flags);
893 ent = rockchip_section_entry(priv->pgtable, iova);
895 if (unlikely(rockchip_lv1ent_fault(ent))) {
896 if (size > SPAGE_SIZE)
901 /* lv1ent_page(sent) == true here */
903 ent = rockchip_page_entry(ent, iova);
905 if (unlikely(rockchip_lv2ent_fault(ent))) {
912 priv->lv2entcnt[rockchip_lv1ent_offset(iova)] += 1;
916 pr_debug("%s:unmap iova 0x%lx/%zx bytes\n",
917 __func__, iova,size);
918 spin_unlock_irqrestore(&priv->pgtablelock, flags);
923 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
924 phys_addr_t paddr, size_t size, int prot)
926 struct rk_iommu_domain *priv = to_rk_domain(domain);
932 BUG_ON(priv->pgtable == NULL);
934 spin_lock_irqsave(&priv->pgtablelock, flags);
936 entry = rockchip_section_entry(priv->pgtable, iova);
938 pent = rockchip_alloc_lv2entry(entry, iova,
939 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
943 ret = rockchip_lv2set_page(pent, paddr, size,
944 &priv->lv2entcnt[rockchip_lv1ent_offset(iova)]);
947 pr_info("%s: Failed to map iova 0x%lx/%zx bytes\n", __func__,
950 spin_unlock_irqrestore(&priv->pgtablelock, flags);
955 static void rockchip_iommu_detach_device(struct iommu_domain *domain, struct device *dev)
957 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
958 struct rk_iommu_domain *priv = to_rk_domain(domain);
959 struct list_head *pos;
963 spin_lock_irqsave(&priv->lock, flags);
965 list_for_each(pos, &priv->clients) {
966 if (list_entry(pos, struct iommu_drvdata, node) == data) {
973 spin_unlock_irqrestore(&priv->lock, flags);
977 if (rockchip_iommu_disable(data)) {
978 if (!(strstr(data->dbgname, "vpu") || strstr(data->dbgname, "hevc")))
979 dev_dbg(dev->archdata.iommu,"%s: Detached IOMMU with pgtable %08lx\n",
980 __func__, (unsigned long)virt_to_phys(priv->pgtable));
982 list_del_init(&data->node);
985 dev_err(dev->archdata.iommu,"%s: Detaching IOMMU with pgtable %08lx delayed",
986 __func__, (unsigned long)virt_to_phys(priv->pgtable));
988 spin_unlock_irqrestore(&priv->lock, flags);
991 static int rockchip_iommu_attach_device(struct iommu_domain *domain, struct device *dev)
993 struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
994 struct rk_iommu_domain *priv = to_rk_domain(domain);
998 spin_lock_irqsave(&priv->lock, flags);
1000 ret = rockchip_iommu_enable(data, virt_to_phys(priv->pgtable));
1003 /* 'data->node' must not be appeared in priv->clients */
1004 BUG_ON(!list_empty(&data->node));
1005 list_add_tail(&data->node, &priv->clients);
1006 data->domain = domain;
1010 spin_unlock_irqrestore(&priv->lock, flags);
1013 dev_err(dev->archdata.iommu,"%s: Failed to attach IOMMU with pgtable %x\n",
1014 __func__, (unsigned int)virt_to_phys(priv->pgtable));
1015 } else if (ret > 0) {
1016 dev_dbg(dev->archdata.iommu,"%s: IOMMU with pgtable 0x%x already attached\n",
1017 __func__, (unsigned int)virt_to_phys(priv->pgtable));
1019 if (!(strstr(data->dbgname, "vpu") ||
1020 strstr(data->dbgname, "hevc") ||
1021 strstr(data->dbgname, "vdec")))
1022 dev_info(dev->archdata.iommu,"%s: Attached new IOMMU with pgtable 0x%x\n",
1023 __func__, (unsigned int)virt_to_phys(priv->pgtable));
1029 static void rockchip_iommu_domain_free(struct iommu_domain *domain)
1031 struct rk_iommu_domain *priv = to_rk_domain(domain);
1034 WARN_ON(!list_empty(&priv->clients));
1036 for (i = 0; i < NUM_LV1ENTRIES; i++)
1037 if (rockchip_lv1ent_page(priv->pgtable + i))
1038 kmem_cache_free(lv2table_kmem_cache,
1039 phys_to_virt(rockchip_lv2table_base(priv->pgtable + i)));
1041 free_pages((unsigned long)priv->pgtable, 0);
1042 free_pages((unsigned long)priv->lv2entcnt, 0);
1046 static struct iommu_domain *rockchip_iommu_domain_alloc(unsigned type)
1048 struct rk_iommu_domain *priv;
1050 if (type != IOMMU_DOMAIN_UNMANAGED)
1053 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1057 /*rk32xx iommu use 2 level pagetable,
1058 level1 and leve2 both have 1024 entries,each entry occupy 4 bytes,
1059 so alloc a page size for each page table
1061 priv->pgtable = (unsigned int *)__get_free_pages(GFP_KERNEL |
1066 priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL |
1068 if (!priv->lv2entcnt)
1071 rockchip_pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
1073 spin_lock_init(&priv->lock);
1074 spin_lock_init(&priv->pgtablelock);
1075 INIT_LIST_HEAD(&priv->clients);
1077 return &priv->domain;
1080 free_pages((unsigned long)priv->pgtable, 0);
1086 static struct iommu_ops rk_iommu_ops = {
1087 .domain_alloc = rockchip_iommu_domain_alloc,
1088 .domain_free = rockchip_iommu_domain_free,
1089 .attach_dev = rockchip_iommu_attach_device,
1090 .detach_dev = rockchip_iommu_detach_device,
1091 .map = rockchip_iommu_map,
1092 .unmap = rockchip_iommu_unmap,
1093 .iova_to_phys = rockchip_iommu_iova_to_phys,
1094 .pgsize_bitmap = SPAGE_SIZE,
1097 static int rockchip_get_iommu_resource_num(struct platform_device *pdev,
1103 for (i = 0; i < pdev->num_resources; i++) {
1104 struct resource *r = &pdev->resource[i];
1105 if (type == resource_type(r))
1112 static int rockchip_iommu_probe(struct platform_device *pdev)
1116 struct iommu_drvdata *data;
1120 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
1122 dev_dbg(dev, "Not enough memory\n");
1126 dev_set_drvdata(dev, data);
1128 if (pdev->dev.of_node)
1129 of_property_read_string(pdev->dev.of_node, "dbgname",
1132 dev_dbg(dev, "dbgname not assigned in device tree or device node not exist\r\n");
1134 dev_info(dev,"(%s) Enter\n", data->dbgname);
1136 data->num_res_mem = rockchip_get_iommu_resource_num(pdev,
1138 if (0 == data->num_res_mem) {
1139 dev_err(dev,"can't find iommu memory resource \r\n");
1142 dev_dbg(dev,"data->num_res_mem=%d\n", data->num_res_mem);
1144 data->num_res_irq = rockchip_get_iommu_resource_num(pdev,
1146 if (0 == data->num_res_irq) {
1147 dev_err(dev,"can't find iommu irq resource \r\n");
1150 dev_dbg(dev,"data->num_res_irq=%d\n", data->num_res_irq);
1152 data->res_bases = devm_kmalloc_array(dev, data->num_res_mem,
1153 sizeof(*data->res_bases), GFP_KERNEL);
1154 if (data->res_bases == NULL) {
1155 dev_err(dev, "Not enough memory\n");
1159 for (i = 0; i < data->num_res_mem; i++) {
1160 struct resource *res;
1162 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1164 dev_err(dev,"Unable to find IOMEM region\n");
1168 data->res_bases[i] = devm_ioremap(dev,res->start,
1169 resource_size(res));
1170 if (!data->res_bases[i]) {
1171 dev_err(dev, "Unable to map IOMEM @ PA:%pa\n",
1176 dev_dbg(dev,"res->start = 0x%pa ioremap to data->res_bases[%d] = %p\n",
1177 &res->start, i, data->res_bases[i]);
1179 if (strstr(data->dbgname, "vop") &&
1180 (soc_is_rk3128() || soc_is_rk3126())) {
1181 rk312x_vop_mmu_base = data->res_bases[0];
1182 dev_dbg(dev, "rk312x_vop_mmu_base = %p\n",
1183 rk312x_vop_mmu_base);
1187 for (i = 0; i < data->num_res_irq; i++) {
1188 if ((soc_is_rk3128() || soc_is_rk3126()) &&
1189 strstr(data->dbgname, "vop")) {
1190 dev_info(dev, "skip request vop mmu irq\n");
1194 ret = platform_get_irq(pdev, i);
1196 dev_err(dev,"Unable to find IRQ resource\n");
1200 ret = devm_request_irq(dev, ret, rockchip_iommu_irq,
1201 IRQF_SHARED, dev_name(dev), data);
1203 dev_err(dev, "Unabled to register interrupt handler\n");
1208 ret = rockchip_init_iovmm(dev, &data->vmm);
1213 spin_lock_init(&data->data_lock);
1214 INIT_LIST_HEAD(&data->node);
1216 dev_info(dev,"(%s) Initialized\n", data->dbgname);
1222 static const struct of_device_id iommu_dt_ids[] = {
1223 { .compatible = IEP_IOMMU_COMPATIBLE_NAME},
1224 { .compatible = VIP_IOMMU_COMPATIBLE_NAME},
1225 { .compatible = VOPB_IOMMU_COMPATIBLE_NAME},
1226 { .compatible = VOPL_IOMMU_COMPATIBLE_NAME},
1227 { .compatible = HEVC_IOMMU_COMPATIBLE_NAME},
1228 { .compatible = VPU_IOMMU_COMPATIBLE_NAME},
1229 { .compatible = ISP_IOMMU_COMPATIBLE_NAME},
1230 { .compatible = ISP0_IOMMU_COMPATIBLE_NAME},
1231 { .compatible = ISP1_IOMMU_COMPATIBLE_NAME},
1232 { .compatible = VOP_IOMMU_COMPATIBLE_NAME},
1233 { .compatible = VDEC_IOMMU_COMPATIBLE_NAME},
1237 MODULE_DEVICE_TABLE(of, iommu_dt_ids);
1240 static struct platform_driver rk_iommu_driver = {
1241 .probe = rockchip_iommu_probe,
1245 .owner = THIS_MODULE,
1246 .of_match_table = of_match_ptr(iommu_dt_ids),
1250 static int __init rockchip_iommu_init_driver(void)
1252 struct device_node *np;
1255 np = of_find_matching_node(NULL, iommu_dt_ids);
1257 pr_err("Failed to find legacy iommu devices\n");
1261 lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",
1262 LV2TABLE_SIZE, LV2TABLE_SIZE,
1264 if (!lv2table_kmem_cache) {
1265 pr_info("%s: failed to create kmem cache\n", __func__);
1269 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1273 return platform_driver_register(&rk_iommu_driver);
1276 core_initcall(rockchip_iommu_init_driver);