2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
8 #include <linux/compiler.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/errno.h>
13 #include <linux/interrupt.h>
15 #include <linux/iommu.h>
16 #include <linux/jiffies.h>
17 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 /** MMU register offsets */
28 #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
29 #define RK_MMU_STATUS 0x04
30 #define RK_MMU_COMMAND 0x08
31 #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
32 #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
33 #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
34 #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
35 #define RK_MMU_INT_MASK 0x1C /* IRQ enable */
36 #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
37 #define RK_MMU_AUTO_GATING 0x24
39 #define DTE_ADDR_DUMMY 0xCAFEBABE
40 #define FORCE_RESET_TIMEOUT 100 /* ms */
42 /* RK_MMU_STATUS fields */
43 #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
44 #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
45 #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
46 #define RK_MMU_STATUS_IDLE BIT(3)
47 #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
48 #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
49 #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
51 /* RK_MMU_COMMAND command values */
52 #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
53 #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
54 #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
55 #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
56 #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
57 #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
58 #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
60 /* RK_MMU_INT_* register fields */
61 #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
62 #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
63 #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
65 #define NUM_DT_ENTRIES 1024
66 #define NUM_PT_ENTRIES 1024
68 #define SPAGE_ORDER 12
69 #define SPAGE_SIZE (1 << SPAGE_ORDER)
72 * Support mapping any size that fits in one page table:
75 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
77 #define IOMMU_REG_POLL_COUNT_FAST 1000
79 struct rk_iommu_domain {
80 struct list_head iommus;
81 struct platform_device *pdev;
82 u32 *dt; /* page directory table */
84 struct mutex iommus_lock; /* lock for iommus list */
85 struct mutex dt_lock; /* lock for modifying page directory table */
87 struct iommu_domain domain;
95 bool reset_disabled; /* isp iommu reset operation would failed */
96 struct list_head node; /* entry in rk_iommu_domain.iommus */
97 struct iommu_domain *domain; /* domain to which iommu is attached */
98 struct clk *aclk; /* aclock belong to master */
99 struct clk *hclk; /* hclock belong to master */
102 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
105 size_t size = count * sizeof(u32); /* count of u32 entry */
107 dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
110 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
112 return container_of(dom, struct rk_iommu_domain, domain);
116 * Inspired by _wait_for in intel_drv.h
117 * This is NOT safe for use in interrupt context.
119 * Note that it's important that we check the condition again after having
120 * timed out, since the timeout could be due to preemption or similar and
121 * we've never had a chance to check the condition before the timeout.
123 #define rk_wait_for(COND, MS) ({ \
124 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
127 if (time_after(jiffies, timeout__)) { \
128 ret__ = (COND) ? 0 : -ETIMEDOUT; \
131 usleep_range(50, 100); \
137 * The Rockchip rk3288 iommu uses a 2-level page table.
138 * The first level is the "Directory Table" (DT).
139 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
141 * The second level is the 1024 Page Tables (PT).
142 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
143 * a 4 KB page of physical memory.
145 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
146 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
147 * address of the start of the DT page.
149 * The structure of the page table is as follows:
152 * MMU_DTE_ADDR -> +-----+
158 * | | | PTE | -> +-----+
159 * +-----+ +-----+ | |
169 * Each DTE has a PT address and a valid bit:
170 * +---------------------+-----------+-+
171 * | PT address | Reserved |V|
172 * +---------------------+-----------+-+
173 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
175 * 0 - 1 if PT @ PT address is valid
177 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
178 #define RK_DTE_PT_VALID BIT(0)
180 static inline phys_addr_t rk_dte_pt_address(u32 dte)
182 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
185 static inline bool rk_dte_is_pt_valid(u32 dte)
187 return dte & RK_DTE_PT_VALID;
190 static inline u32 rk_mk_dte(dma_addr_t pt_dma)
192 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
196 * Each PTE has a Page address, some flags and a valid bit:
197 * +---------------------+---+-------+-+
198 * | Page address |Rsv| Flags |V|
199 * +---------------------+---+-------+-+
200 * 31:12 - Page address (Pages always start on a 4 KB boundary)
203 * 8 - Read allocate - allocate cache space on read misses
204 * 7 - Read cache - enable cache & prefetch of data
205 * 6 - Write buffer - enable delaying writes on their way to memory
206 * 5 - Write allocate - allocate cache space on write misses
207 * 4 - Write cache - different writes can be merged together
208 * 3 - Override cache attributes
209 * if 1, bits 4-8 control cache attributes
210 * if 0, the system bus defaults are used
213 * 0 - 1 if Page @ Page address is valid
215 #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
216 #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
217 #define RK_PTE_PAGE_WRITABLE BIT(2)
218 #define RK_PTE_PAGE_READABLE BIT(1)
219 #define RK_PTE_PAGE_VALID BIT(0)
221 static inline phys_addr_t rk_pte_page_address(u32 pte)
223 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
226 static inline bool rk_pte_is_page_valid(u32 pte)
228 return pte & RK_PTE_PAGE_VALID;
231 /* TODO: set cache flags per prot IOMMU_CACHE */
232 static u32 rk_mk_pte(phys_addr_t page, int prot)
235 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
236 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
237 page &= RK_PTE_PAGE_ADDRESS_MASK;
238 return page | flags | RK_PTE_PAGE_VALID;
241 static u32 rk_mk_pte_invalid(u32 pte)
243 return pte & ~RK_PTE_PAGE_VALID;
247 * rk3288 iova (IOMMU Virtual Address) format
249 * +-----------+-----------+-------------+
250 * | DTE index | PTE index | Page offset |
251 * +-----------+-----------+-------------+
252 * 31:22 - DTE index - index of DTE in DT
253 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
254 * 11: 0 - Page offset - offset into page @ PTE.page_address
256 #define RK_IOVA_DTE_MASK 0xffc00000
257 #define RK_IOVA_DTE_SHIFT 22
258 #define RK_IOVA_PTE_MASK 0x003ff000
259 #define RK_IOVA_PTE_SHIFT 12
260 #define RK_IOVA_PAGE_MASK 0x00000fff
261 #define RK_IOVA_PAGE_SHIFT 0
263 static void rk_iommu_power_on(struct rk_iommu *iommu)
265 if (iommu->aclk && iommu->hclk) {
266 clk_enable(iommu->aclk);
267 clk_enable(iommu->hclk);
270 pm_runtime_get_sync(iommu->dev);
273 static void rk_iommu_power_off(struct rk_iommu *iommu)
275 pm_runtime_put_sync(iommu->dev);
277 if (iommu->aclk && iommu->hclk) {
278 clk_disable(iommu->aclk);
279 clk_disable(iommu->hclk);
283 static u32 rk_iova_dte_index(dma_addr_t iova)
285 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
288 static u32 rk_iova_pte_index(dma_addr_t iova)
290 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
293 static u32 rk_iova_page_offset(dma_addr_t iova)
295 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
298 static u32 rk_iommu_read(void __iomem *base, u32 offset)
300 return readl(base + offset);
303 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
305 writel(value, base + offset);
308 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
312 for (i = 0; i < iommu->num_mmu; i++)
313 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
316 static void rk_iommu_base_command(void __iomem *base, u32 command)
318 writel(command, base + RK_MMU_COMMAND);
320 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
324 dma_addr_t iova_end = iova_start + size;
326 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
327 * entire iotlb rather than iterate over individual iovas.
330 rk_iommu_power_on(iommu);
332 for (i = 0; i < iommu->num_mmu; i++) {
335 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
336 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
339 rk_iommu_power_off(iommu);
342 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
347 for (i = 0; i < iommu->num_mmu; i++)
348 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
349 RK_MMU_STATUS_STALL_ACTIVE);
354 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
359 for (i = 0; i < iommu->num_mmu; i++)
360 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
361 RK_MMU_STATUS_PAGING_ENABLED);
366 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
370 if (rk_iommu_is_stall_active(iommu))
373 /* Stall can only be enabled if paging is enabled */
374 if (!rk_iommu_is_paging_enabled(iommu))
377 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
379 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
381 for (i = 0; i < iommu->num_mmu; i++)
382 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
383 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
388 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
392 if (!rk_iommu_is_stall_active(iommu))
395 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
397 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
399 for (i = 0; i < iommu->num_mmu; i++)
400 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
401 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
406 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
410 if (rk_iommu_is_paging_enabled(iommu))
413 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
415 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
417 for (i = 0; i < iommu->num_mmu; i++)
418 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
419 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
424 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
428 if (!rk_iommu_is_paging_enabled(iommu))
431 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
433 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
435 for (i = 0; i < iommu->num_mmu; i++)
436 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
437 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
442 static int rk_iommu_force_reset(struct rk_iommu *iommu)
447 /* Workaround for isp mmus */
448 if (iommu->reset_disabled)
452 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
453 * and verifying that upper 5 nybbles are read back.
455 for (i = 0; i < iommu->num_mmu; i++) {
456 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
458 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
459 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
460 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
465 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
467 for (i = 0; i < iommu->num_mmu; i++) {
468 ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
469 FORCE_RESET_TIMEOUT);
471 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
479 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
481 void __iomem *base = iommu->bases[index];
482 u32 dte_index, pte_index, page_offset;
484 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
487 phys_addr_t pte_addr_phys = 0;
488 u32 *pte_addr = NULL;
490 phys_addr_t page_addr_phys = 0;
493 dte_index = rk_iova_dte_index(iova);
494 pte_index = rk_iova_pte_index(iova);
495 page_offset = rk_iova_page_offset(iova);
497 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
498 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
500 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
501 dte_addr = phys_to_virt(dte_addr_phys);
504 if (!rk_dte_is_pt_valid(dte))
507 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
508 pte_addr = phys_to_virt(pte_addr_phys);
511 if (!rk_pte_is_page_valid(pte))
514 page_addr_phys = rk_pte_page_address(pte) + page_offset;
515 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
518 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
519 &iova, dte_index, pte_index, page_offset);
520 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
521 &mmu_dte_addr_phys, &dte_addr_phys, dte,
522 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
523 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
526 static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
528 struct rk_iommu *iommu = dev_id;
532 irqreturn_t ret = IRQ_NONE;
535 for (i = 0; i < iommu->num_mmu; i++) {
536 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
541 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
543 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
546 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
547 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
548 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
550 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
552 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
554 log_iova(iommu, i, iova);
557 * Report page fault to any installed handlers.
558 * Ignore the return code, though, since we always zap cache
559 * and clear the page fault anyway.
562 report_iommu_fault(iommu->domain, iommu->dev, iova,
565 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
567 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
568 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
571 if (int_status & RK_MMU_IRQ_BUS_ERROR)
572 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
574 if (int_status & ~RK_MMU_IRQ_MASK)
575 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
578 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
584 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
587 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
588 phys_addr_t pt_phys, phys = 0;
592 mutex_lock(&rk_domain->dt_lock);
594 dte = rk_domain->dt[rk_iova_dte_index(iova)];
595 if (!rk_dte_is_pt_valid(dte))
598 pt_phys = rk_dte_pt_address(dte);
599 page_table = (u32 *)phys_to_virt(pt_phys);
600 pte = page_table[rk_iova_pte_index(iova)];
601 if (!rk_pte_is_page_valid(pte))
604 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
606 mutex_unlock(&rk_domain->dt_lock);
611 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
612 dma_addr_t iova, size_t size)
614 struct list_head *pos;
616 /* shootdown these iova from all iommus using this domain */
617 mutex_lock(&rk_domain->iommus_lock);
618 list_for_each(pos, &rk_domain->iommus) {
619 struct rk_iommu *iommu;
620 iommu = list_entry(pos, struct rk_iommu, node);
621 rk_iommu_zap_lines(iommu, iova, size);
623 mutex_unlock(&rk_domain->iommus_lock);
626 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
627 dma_addr_t iova, size_t size)
629 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
630 if (size > SPAGE_SIZE)
631 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
635 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
638 struct device *dev = &rk_domain->pdev->dev;
639 u32 *page_table, *dte_addr;
644 WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
646 dte_index = rk_iova_dte_index(iova);
647 dte_addr = &rk_domain->dt[dte_index];
649 if (rk_dte_is_pt_valid(dte))
652 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
654 return ERR_PTR(-ENOMEM);
656 pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
657 if (dma_mapping_error(dev, pt_dma)) {
658 dev_err(dev, "DMA mapping error while allocating page table\n");
659 free_page((unsigned long)page_table);
660 return ERR_PTR(-ENOMEM);
663 dte = rk_mk_dte(pt_dma);
666 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
667 rk_table_flush(rk_domain,
668 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
670 pt_phys = rk_dte_pt_address(dte);
671 return (u32 *)phys_to_virt(pt_phys);
674 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
675 u32 *pte_addr, dma_addr_t pte_dma,
678 unsigned int pte_count;
679 unsigned int pte_total = size / SPAGE_SIZE;
681 WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
683 for (pte_count = 0; pte_count < pte_total; pte_count++) {
684 u32 pte = pte_addr[pte_count];
685 if (!rk_pte_is_page_valid(pte))
688 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
691 rk_table_flush(rk_domain, pte_dma, pte_count);
693 return pte_count * SPAGE_SIZE;
696 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
697 dma_addr_t pte_dma, dma_addr_t iova,
698 phys_addr_t paddr, size_t size, int prot)
700 unsigned int pte_count;
701 unsigned int pte_total = size / SPAGE_SIZE;
702 phys_addr_t page_phys;
704 WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
706 for (pte_count = 0; pte_count < pte_total; pte_count++) {
707 u32 pte = pte_addr[pte_count];
709 if (rk_pte_is_page_valid(pte))
712 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
717 rk_table_flush(rk_domain, pte_dma, pte_total);
720 * Zap the first and last iova to evict from iotlb any previously
721 * mapped cachelines holding stale values for its dte and pte.
722 * We only zap the first and last iova, since only they could have
723 * dte or pte shared with an existing mapping.
725 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
729 /* Unmap the range of iovas that we just mapped */
730 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
731 pte_count * SPAGE_SIZE);
733 iova += pte_count * SPAGE_SIZE;
734 page_phys = rk_pte_page_address(pte_addr[pte_count]);
735 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
736 &iova, &page_phys, &paddr, prot);
741 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
742 phys_addr_t paddr, size_t size, int prot)
744 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
745 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
746 u32 *page_table, *pte_addr;
747 u32 dte_index, pte_index;
750 mutex_lock(&rk_domain->dt_lock);
753 * pgsize_bitmap specifies iova sizes that fit in one page table
754 * (1024 4-KiB pages = 4 MiB).
755 * So, size will always be 4096 <= size <= 4194304.
756 * Since iommu_map() guarantees that both iova and size will be
757 * aligned, we will always only be mapping from a single dte here.
759 page_table = rk_dte_get_page_table(rk_domain, iova);
760 if (IS_ERR(page_table)) {
761 mutex_unlock(&rk_domain->dt_lock);
762 return PTR_ERR(page_table);
765 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
766 pte_index = rk_iova_pte_index(iova);
767 pte_addr = &page_table[pte_index];
768 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
769 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
772 mutex_unlock(&rk_domain->dt_lock);
777 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
780 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
781 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
787 mutex_lock(&rk_domain->dt_lock);
790 * pgsize_bitmap specifies iova sizes that fit in one page table
791 * (1024 4-KiB pages = 4 MiB).
792 * So, size will always be 4096 <= size <= 4194304.
793 * Since iommu_unmap() guarantees that both iova and size will be
794 * aligned, we will always only be unmapping from a single dte here.
796 dte = rk_domain->dt[rk_iova_dte_index(iova)];
797 /* Just return 0 if iova is unmapped */
798 if (!rk_dte_is_pt_valid(dte)) {
799 mutex_unlock(&rk_domain->dt_lock);
803 pt_phys = rk_dte_pt_address(dte);
804 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
805 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
806 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
808 mutex_unlock(&rk_domain->dt_lock);
810 /* Shootdown iotlb entries for iova range that was just unmapped */
811 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
816 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
818 struct iommu_group *group;
819 struct device *iommu_dev;
820 struct rk_iommu *rk_iommu;
822 group = iommu_group_get(dev);
825 iommu_dev = iommu_group_get_iommudata(group);
827 dev_info(dev, "Possibly a virtual device\n");
831 rk_iommu = dev_get_drvdata(iommu_dev);
832 iommu_group_put(group);
837 static int rk_iommu_attach_device(struct iommu_domain *domain,
840 struct rk_iommu *iommu;
841 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
845 * Allow 'virtual devices' (e.g., drm) to attach to domain.
846 * Such a device does not belong to an iommu group.
848 iommu = rk_iommu_from_dev(dev);
852 rk_iommu_power_on(iommu);
854 ret = rk_iommu_enable_stall(iommu);
858 ret = rk_iommu_force_reset(iommu);
862 iommu->domain = domain;
864 ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq,
865 IRQF_SHARED, dev_name(dev), iommu);
869 for (i = 0; i < iommu->num_mmu; i++) {
870 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
872 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
873 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
876 ret = rk_iommu_enable_paging(iommu);
880 mutex_lock(&rk_domain->iommus_lock);
881 list_add_tail(&iommu->node, &rk_domain->iommus);
882 mutex_unlock(&rk_domain->iommus_lock);
884 dev_dbg(dev, "Attached to iommu domain\n");
886 rk_iommu_disable_stall(iommu);
891 static void rk_iommu_detach_device(struct iommu_domain *domain,
894 struct rk_iommu *iommu;
895 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
898 /* Allow 'virtual devices' (eg drm) to detach from domain */
899 iommu = rk_iommu_from_dev(dev);
903 mutex_lock(&rk_domain->iommus_lock);
904 list_del_init(&iommu->node);
905 mutex_unlock(&rk_domain->iommus_lock);
907 /* Ignore error while disabling, just keep going */
908 rk_iommu_enable_stall(iommu);
909 rk_iommu_disable_paging(iommu);
910 for (i = 0; i < iommu->num_mmu; i++) {
911 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
912 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
914 rk_iommu_disable_stall(iommu);
916 devm_free_irq(iommu->dev, iommu->irq, iommu);
918 iommu->domain = NULL;
920 rk_iommu_power_off(iommu);
922 dev_dbg(dev, "Detached from iommu domain\n");
925 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
927 struct rk_iommu_domain *rk_domain;
928 struct platform_device *pdev;
929 struct device *iommu_dev;
931 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
934 /* Register a pdev per domain, so DMA API can base on this *dev
935 * even some virtual master doesn't have an iommu slave
937 pdev = platform_device_register_simple("rk_iommu_domain",
938 PLATFORM_DEVID_AUTO, NULL, 0);
942 rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
946 rk_domain->pdev = pdev;
948 if (type == IOMMU_DOMAIN_DMA &&
949 iommu_get_dma_cookie(&rk_domain->domain))
953 * rk32xx iommus use a 2 level pagetable.
954 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
955 * Allocate one 4 KiB page for each table.
957 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
961 iommu_dev = &pdev->dev;
962 rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
963 SPAGE_SIZE, DMA_TO_DEVICE);
964 if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
965 dev_err(iommu_dev, "DMA map error for DT\n");
969 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
971 mutex_init(&rk_domain->iommus_lock);
972 mutex_init(&rk_domain->dt_lock);
973 INIT_LIST_HEAD(&rk_domain->iommus);
975 rk_domain->domain.geometry.aperture_start = 0;
976 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
977 rk_domain->domain.geometry.force_aperture = true;
979 return &rk_domain->domain;
982 free_page((unsigned long)rk_domain->dt);
984 if (type == IOMMU_DOMAIN_DMA)
985 iommu_put_dma_cookie(&rk_domain->domain);
987 platform_device_unregister(pdev);
992 static void rk_iommu_domain_free(struct iommu_domain *domain)
994 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
997 WARN_ON(!list_empty(&rk_domain->iommus));
999 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1000 u32 dte = rk_domain->dt[i];
1001 if (rk_dte_is_pt_valid(dte)) {
1002 phys_addr_t pt_phys = rk_dte_pt_address(dte);
1003 u32 *page_table = phys_to_virt(pt_phys);
1004 dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
1005 SPAGE_SIZE, DMA_TO_DEVICE);
1006 free_page((unsigned long)page_table);
1010 dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
1011 SPAGE_SIZE, DMA_TO_DEVICE);
1012 free_page((unsigned long)rk_domain->dt);
1014 if (domain->type == IOMMU_DOMAIN_DMA)
1015 iommu_put_dma_cookie(&rk_domain->domain);
1017 platform_device_unregister(rk_domain->pdev);
1020 static bool rk_iommu_is_dev_iommu_master(struct device *dev)
1022 struct device_node *np = dev->of_node;
1026 * An iommu master has an iommus property containing a list of phandles
1027 * to iommu nodes, each with an #iommu-cells property with value 0.
1029 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
1033 static int rk_iommu_group_set_iommudata(struct iommu_group *group,
1036 struct device_node *np = dev->of_node;
1037 struct platform_device *pd;
1039 struct of_phandle_args args;
1042 * An iommu master has an iommus property containing a list of phandles
1043 * to iommu nodes, each with an #iommu-cells property with value 0.
1045 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
1048 dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
1049 np->full_name, ret);
1052 if (args.args_count != 0) {
1053 dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
1054 args.np->full_name, args.args_count);
1058 pd = of_find_device_by_node(args.np);
1059 of_node_put(args.np);
1061 dev_err(dev, "iommu %s not found\n", args.np->full_name);
1062 return -EPROBE_DEFER;
1065 /* TODO(djkurtz): handle multiple slave iommus for a single master */
1066 iommu_group_set_iommudata(group, &pd->dev, NULL);
1071 static int rk_iommu_add_device(struct device *dev)
1073 struct iommu_group *group;
1076 if (!rk_iommu_is_dev_iommu_master(dev))
1079 group = iommu_group_get(dev);
1081 group = iommu_group_alloc();
1082 if (IS_ERR(group)) {
1083 dev_err(dev, "Failed to allocate IOMMU group\n");
1084 return PTR_ERR(group);
1088 ret = iommu_group_add_device(group, dev);
1092 ret = rk_iommu_group_set_iommudata(group, dev);
1094 goto err_remove_device;
1096 iommu_group_put(group);
1101 iommu_group_remove_device(dev);
1103 iommu_group_put(group);
1107 static void rk_iommu_remove_device(struct device *dev)
1109 if (!rk_iommu_is_dev_iommu_master(dev))
1112 iommu_group_remove_device(dev);
1115 static const struct iommu_ops rk_iommu_ops = {
1116 .domain_alloc = rk_iommu_domain_alloc,
1117 .domain_free = rk_iommu_domain_free,
1118 .attach_dev = rk_iommu_attach_device,
1119 .detach_dev = rk_iommu_detach_device,
1120 .map = rk_iommu_map,
1121 .unmap = rk_iommu_unmap,
1122 .map_sg = default_iommu_map_sg,
1123 .add_device = rk_iommu_add_device,
1124 .remove_device = rk_iommu_remove_device,
1125 .iova_to_phys = rk_iommu_iova_to_phys,
1126 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1129 static int rk_iommu_domain_probe(struct platform_device *pdev)
1131 struct device *dev = &pdev->dev;
1133 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
1134 if (!dev->dma_parms)
1137 /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
1138 arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
1140 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1141 dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1146 static struct platform_driver rk_iommu_domain_driver = {
1147 .probe = rk_iommu_domain_probe,
1149 .name = "rk_iommu_domain",
1153 static int rk_iommu_probe(struct platform_device *pdev)
1155 struct device *dev = &pdev->dev;
1156 struct rk_iommu *iommu;
1157 struct resource *res;
1158 int num_res = pdev->num_resources;
1161 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1165 platform_set_drvdata(pdev, iommu);
1169 iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
1174 for (i = 0; i < num_res; i++) {
1175 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1178 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1179 if (IS_ERR(iommu->bases[i]))
1183 if (iommu->num_mmu == 0)
1184 return PTR_ERR(iommu->bases[0]);
1186 iommu->irq = platform_get_irq(pdev, 0);
1187 if (iommu->irq < 0) {
1188 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
1192 iommu->reset_disabled = device_property_read_bool(dev,
1193 "rk_iommu,disable_reset_quirk");
1195 iommu->aclk = devm_clk_get(dev, "aclk");
1196 if (IS_ERR(iommu->aclk)) {
1197 dev_info(dev, "can't get aclk\n");
1201 iommu->hclk = devm_clk_get(dev, "hclk");
1202 if (IS_ERR(iommu->hclk)) {
1203 dev_info(dev, "can't get hclk\n");
1207 if (iommu->aclk && iommu->hclk) {
1208 clk_prepare(iommu->aclk);
1209 clk_prepare(iommu->hclk);
1212 pm_runtime_enable(dev);
1217 static int rk_iommu_remove(struct platform_device *pdev)
1219 pm_runtime_put(&pdev->dev);
1224 static const struct of_device_id rk_iommu_dt_ids[] = {
1225 { .compatible = "rockchip,iommu" },
1228 MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1230 static struct platform_driver rk_iommu_driver = {
1231 .probe = rk_iommu_probe,
1232 .remove = rk_iommu_remove,
1235 .of_match_table = rk_iommu_dt_ids,
1239 static int __init rk_iommu_init(void)
1241 struct device_node *np;
1244 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1250 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1254 ret = platform_driver_register(&rk_iommu_domain_driver);
1258 ret = platform_driver_register(&rk_iommu_driver);
1260 platform_driver_unregister(&rk_iommu_domain_driver);
1263 static void __exit rk_iommu_exit(void)
1265 platform_driver_unregister(&rk_iommu_driver);
1266 platform_driver_unregister(&rk_iommu_domain_driver);
1269 subsys_initcall(rk_iommu_init);
1270 module_exit(rk_iommu_exit);
1272 MODULE_DESCRIPTION("IOMMU API for Rockchip");
1273 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1274 MODULE_ALIAS("platform:rockchip-iommu");
1275 MODULE_LICENSE("GPL v2");