1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 typedef u32 sysmmu_iova_t;
33 typedef u32 sysmmu_pte_t;
35 /* We do not consider super section mapping (16MB) */
37 #define LPAGE_ORDER 16
38 #define SPAGE_ORDER 12
40 #define SECT_SIZE (1 << SECT_ORDER)
41 #define LPAGE_SIZE (1 << LPAGE_ORDER)
42 #define SPAGE_SIZE (1 << SPAGE_ORDER)
44 #define SECT_MASK (~(SECT_SIZE - 1))
45 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
46 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
48 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
54 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
56 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
58 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
60 static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
62 return iova & (size - 1);
65 #define section_phys(sent) (*(sent) & SECT_MASK)
66 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
70 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
72 #define NUM_LV1ENTRIES 4096
73 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
75 static u32 lv1ent_offset(sysmmu_iova_t iova)
77 return iova >> SECT_ORDER;
80 static u32 lv2ent_offset(sysmmu_iova_t iova)
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
85 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
87 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
89 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
91 #define mk_lv1ent_sect(pa) ((pa) | 2)
92 #define mk_lv1ent_page(pa) ((pa) | 1)
93 #define mk_lv2ent_lpage(pa) ((pa) | 1)
94 #define mk_lv2ent_spage(pa) ((pa) | 2)
96 #define CTRL_ENABLE 0x5
97 #define CTRL_BLOCK 0x7
98 #define CTRL_DISABLE 0x0
101 #define CFG_QOS(n) ((n & 0xF) << 7)
102 #define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
104 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
105 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
107 #define REG_MMU_CTRL 0x000
108 #define REG_MMU_CFG 0x004
109 #define REG_MMU_STATUS 0x008
110 #define REG_MMU_FLUSH 0x00C
111 #define REG_MMU_FLUSH_ENTRY 0x010
112 #define REG_PT_BASE_ADDR 0x014
113 #define REG_INT_STATUS 0x018
114 #define REG_INT_CLEAR 0x01C
116 #define REG_PAGE_FAULT_ADDR 0x024
117 #define REG_AW_FAULT_ADDR 0x028
118 #define REG_AR_FAULT_ADDR 0x02C
119 #define REG_DEFAULT_SLAVE_ADDR 0x030
121 #define REG_MMU_VERSION 0x034
123 #define MMU_MAJ_VER(val) ((val) >> 7)
124 #define MMU_MIN_VER(val) ((val) & 0x7F)
125 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
127 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
129 #define REG_PB0_SADDR 0x04C
130 #define REG_PB0_EADDR 0x050
131 #define REG_PB1_SADDR 0x054
132 #define REG_PB1_EADDR 0x058
134 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
136 static struct kmem_cache *lv2table_kmem_cache;
137 static sysmmu_pte_t *zero_lv2_table;
138 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
140 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
142 return pgtable + lv1ent_offset(iova);
145 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
147 return (sysmmu_pte_t *)phys_to_virt(
148 lv2table_base(sent)) + lv2ent_offset(iova);
151 enum exynos_sysmmu_inttype {
159 SYSMMU_AW_PROTECTION, /* 7 */
160 SYSMMU_FAULT_UNKNOWN,
164 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
168 REG_DEFAULT_SLAVE_ADDR,
175 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
177 "AR MULTI-HIT FAULT",
178 "AW MULTI-HIT FAULT",
180 "AR SECURITY PROTECTION FAULT",
181 "AR ACCESS PROTECTION FAULT",
182 "AW SECURITY PROTECTION FAULT",
183 "AW ACCESS PROTECTION FAULT",
187 /* attached to dev.archdata.iommu of the master device */
188 struct exynos_iommu_owner {
189 struct list_head client; /* entry of exynos_iommu_domain.clients */
191 struct device *sysmmu;
192 struct iommu_domain *domain;
193 void *vmm_data; /* IO virtual memory manager's data */
194 spinlock_t lock; /* Lock to preserve consistency of System MMU */
197 struct exynos_iommu_domain {
198 struct list_head clients; /* list of sysmmu_drvdata.node */
199 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
200 short *lv2entcnt; /* free lv2 entry counter for each section */
201 spinlock_t lock; /* lock for this structure */
202 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
203 struct iommu_domain domain; /* generic domain data structure */
206 struct sysmmu_drvdata {
207 struct device *sysmmu; /* System MMU's device descriptor */
208 struct device *master; /* Owner of system MMU */
209 void __iomem *sfrbase;
211 struct clk *clk_master;
214 struct iommu_domain *domain;
216 unsigned int version;
219 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
221 return container_of(dom, struct exynos_iommu_domain, domain);
224 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
226 /* return true if the System MMU was not active previously
227 and it needs to be initialized */
228 return ++data->activations == 1;
231 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
233 /* return true if the System MMU is needed to be disabled */
234 BUG_ON(data->activations < 1);
235 return --data->activations == 0;
238 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
240 return data->activations > 0;
243 static void sysmmu_unblock(void __iomem *sfrbase)
245 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
248 static bool sysmmu_block(void __iomem *sfrbase)
252 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
253 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
256 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
257 sysmmu_unblock(sfrbase);
264 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
266 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
269 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
270 sysmmu_iova_t iova, unsigned int num_inv)
274 for (i = 0; i < num_inv; i++) {
275 __raw_writel((iova & SPAGE_MASK) | 1,
276 sfrbase + REG_MMU_FLUSH_ENTRY);
281 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
284 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
286 __sysmmu_tlb_invalidate(sfrbase);
289 static void show_fault_information(const char *name,
290 enum exynos_sysmmu_inttype itype,
291 phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
295 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
296 itype = SYSMMU_FAULT_UNKNOWN;
298 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
299 sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
301 ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
302 pr_err("\tLv1 entry: %#x\n", *ent);
304 if (lv1ent_page(ent)) {
305 ent = page_entry(ent, fault_addr);
306 pr_err("\t Lv2 entry: %#x\n", *ent);
310 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
312 /* SYSMMU is in blocked state when interrupt occurred. */
313 struct sysmmu_drvdata *data = dev_id;
314 enum exynos_sysmmu_inttype itype;
315 sysmmu_iova_t addr = -1;
318 WARN_ON(!is_sysmmu_active(data));
320 spin_lock(&data->lock);
322 if (!IS_ERR(data->clk_master))
323 clk_enable(data->clk_master);
325 itype = (enum exynos_sysmmu_inttype)
326 __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
327 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
328 itype = SYSMMU_FAULT_UNKNOWN;
330 addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
332 if (itype == SYSMMU_FAULT_UNKNOWN) {
333 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
334 __func__, dev_name(data->sysmmu));
335 pr_err("%s: Please check if IRQ is correctly configured.\n",
340 __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
341 show_fault_information(dev_name(data->sysmmu),
344 ret = report_iommu_fault(data->domain,
345 data->master, addr, itype);
348 /* fault is not recovered by fault handler */
351 __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
353 sysmmu_unblock(data->sfrbase);
355 if (!IS_ERR(data->clk_master))
356 clk_disable(data->clk_master);
358 spin_unlock(&data->lock);
363 static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
365 if (!IS_ERR(data->clk_master))
366 clk_enable(data->clk_master);
368 __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
369 __raw_writel(0, data->sfrbase + REG_MMU_CFG);
371 clk_disable(data->clk);
372 if (!IS_ERR(data->clk_master))
373 clk_disable(data->clk_master);
376 static bool __sysmmu_disable(struct sysmmu_drvdata *data)
381 spin_lock_irqsave(&data->lock, flags);
383 disabled = set_sysmmu_inactive(data);
389 __sysmmu_disable_nocount(data);
391 dev_dbg(data->sysmmu, "Disabled\n");
393 dev_dbg(data->sysmmu, "%d times left to disable\n",
397 spin_unlock_irqrestore(&data->lock, flags);
402 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
404 unsigned int cfg = CFG_LRU | CFG_QOS(15);
407 ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
408 if (MMU_MAJ_VER(ver) == 3) {
409 if (MMU_MIN_VER(ver) >= 2) {
410 cfg |= CFG_FLPDCACHE;
411 if (MMU_MIN_VER(ver) == 3) {
420 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
424 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
426 if (!IS_ERR(data->clk_master))
427 clk_enable(data->clk_master);
428 clk_enable(data->clk);
430 __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
432 __sysmmu_init_config(data);
434 __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
436 __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
438 if (!IS_ERR(data->clk_master))
439 clk_disable(data->clk_master);
442 static int __sysmmu_enable(struct sysmmu_drvdata *data,
443 phys_addr_t pgtable, struct iommu_domain *domain)
448 spin_lock_irqsave(&data->lock, flags);
449 if (set_sysmmu_active(data)) {
450 data->pgtable = pgtable;
451 data->domain = domain;
453 __sysmmu_enable_nocount(data);
455 dev_dbg(data->sysmmu, "Enabled\n");
457 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
459 dev_dbg(data->sysmmu, "already enabled\n");
462 if (WARN_ON(ret < 0))
463 set_sysmmu_inactive(data); /* decrement count */
465 spin_unlock_irqrestore(&data->lock, flags);
470 /* __exynos_sysmmu_enable: Enables System MMU
472 * returns -error if an error occurred and System MMU is not enabled,
473 * 0 if the System MMU has been just enabled and 1 if System MMU was already
476 static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
477 struct iommu_domain *domain)
481 struct exynos_iommu_owner *owner = dev->archdata.iommu;
482 struct sysmmu_drvdata *data;
484 BUG_ON(!has_sysmmu(dev));
486 spin_lock_irqsave(&owner->lock, flags);
488 data = dev_get_drvdata(owner->sysmmu);
490 ret = __sysmmu_enable(data, pgtable, domain);
494 spin_unlock_irqrestore(&owner->lock, flags);
499 int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
501 BUG_ON(!memblock_is_memory(pgtable));
503 return __exynos_sysmmu_enable(dev, pgtable, NULL);
506 static bool exynos_sysmmu_disable(struct device *dev)
509 bool disabled = true;
510 struct exynos_iommu_owner *owner = dev->archdata.iommu;
511 struct sysmmu_drvdata *data;
513 BUG_ON(!has_sysmmu(dev));
515 spin_lock_irqsave(&owner->lock, flags);
517 data = dev_get_drvdata(owner->sysmmu);
519 disabled = __sysmmu_disable(data);
523 spin_unlock_irqrestore(&owner->lock, flags);
528 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
531 if (data->version == MAKE_MMU_VER(3, 3))
532 __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
535 static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
539 struct exynos_iommu_owner *owner = dev->archdata.iommu;
540 struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
542 if (!IS_ERR(data->clk_master))
543 clk_enable(data->clk_master);
545 spin_lock_irqsave(&data->lock, flags);
546 if (is_sysmmu_active(data))
547 __sysmmu_tlb_invalidate_flpdcache(data, iova);
548 spin_unlock_irqrestore(&data->lock, flags);
550 if (!IS_ERR(data->clk_master))
551 clk_disable(data->clk_master);
554 static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
557 struct exynos_iommu_owner *owner = dev->archdata.iommu;
559 struct sysmmu_drvdata *data;
561 data = dev_get_drvdata(owner->sysmmu);
563 spin_lock_irqsave(&data->lock, flags);
564 if (is_sysmmu_active(data)) {
565 unsigned int num_inv = 1;
567 if (!IS_ERR(data->clk_master))
568 clk_enable(data->clk_master);
571 * L2TLB invalidation required
572 * 4KB page: 1 invalidation
573 * 64KB page: 16 invalidations
574 * 1MB page: 64 invalidations
575 * because it is set-associative TLB
576 * with 8-way and 64 sets.
577 * 1MB page can be cached in one of all sets.
578 * 64KB page can be one of 16 consecutive sets.
580 if (MMU_MAJ_VER(data->version) == 2)
581 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
583 if (sysmmu_block(data->sfrbase)) {
584 __sysmmu_tlb_invalidate_entry(
585 data->sfrbase, iova, num_inv);
586 sysmmu_unblock(data->sfrbase);
588 if (!IS_ERR(data->clk_master))
589 clk_disable(data->clk_master);
591 dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
594 spin_unlock_irqrestore(&data->lock, flags);
597 void exynos_sysmmu_tlb_invalidate(struct device *dev)
599 struct exynos_iommu_owner *owner = dev->archdata.iommu;
601 struct sysmmu_drvdata *data;
603 data = dev_get_drvdata(owner->sysmmu);
605 spin_lock_irqsave(&data->lock, flags);
606 if (is_sysmmu_active(data)) {
607 if (!IS_ERR(data->clk_master))
608 clk_enable(data->clk_master);
609 if (sysmmu_block(data->sfrbase)) {
610 __sysmmu_tlb_invalidate(data->sfrbase);
611 sysmmu_unblock(data->sfrbase);
613 if (!IS_ERR(data->clk_master))
614 clk_disable(data->clk_master);
616 dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
618 spin_unlock_irqrestore(&data->lock, flags);
621 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
624 struct device *dev = &pdev->dev;
625 struct sysmmu_drvdata *data;
626 struct resource *res;
628 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
632 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
633 data->sfrbase = devm_ioremap_resource(dev, res);
634 if (IS_ERR(data->sfrbase))
635 return PTR_ERR(data->sfrbase);
637 irq = platform_get_irq(pdev, 0);
639 dev_err(dev, "Unable to find IRQ resource\n");
643 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
644 dev_name(dev), data);
646 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
650 data->clk = devm_clk_get(dev, "sysmmu");
651 if (IS_ERR(data->clk)) {
652 dev_err(dev, "Failed to get clock!\n");
653 return PTR_ERR(data->clk);
655 ret = clk_prepare(data->clk);
657 dev_err(dev, "Failed to prepare clk\n");
662 data->clk_master = devm_clk_get(dev, "master");
663 if (!IS_ERR(data->clk_master)) {
664 ret = clk_prepare(data->clk_master);
666 clk_unprepare(data->clk);
667 dev_err(dev, "Failed to prepare master's clk\n");
673 spin_lock_init(&data->lock);
675 platform_set_drvdata(pdev, data);
677 pm_runtime_enable(dev);
682 static const struct of_device_id sysmmu_of_match[] __initconst = {
683 { .compatible = "samsung,exynos-sysmmu", },
687 static struct platform_driver exynos_sysmmu_driver __refdata = {
688 .probe = exynos_sysmmu_probe,
690 .name = "exynos-sysmmu",
691 .of_match_table = sysmmu_of_match,
695 static inline void pgtable_flush(void *vastart, void *vaend)
697 dmac_flush_range(vastart, vaend);
698 outer_flush_range(virt_to_phys(vastart),
699 virt_to_phys(vaend));
702 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
704 struct exynos_iommu_domain *exynos_domain;
707 if (type != IOMMU_DOMAIN_UNMANAGED)
710 exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
714 exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
715 if (!exynos_domain->pgtable)
718 exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
719 if (!exynos_domain->lv2entcnt)
722 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
723 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
724 exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
725 exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
726 exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
727 exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
728 exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
729 exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
730 exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
731 exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
734 pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
736 spin_lock_init(&exynos_domain->lock);
737 spin_lock_init(&exynos_domain->pgtablelock);
738 INIT_LIST_HEAD(&exynos_domain->clients);
740 exynos_domain->domain.geometry.aperture_start = 0;
741 exynos_domain->domain.geometry.aperture_end = ~0UL;
742 exynos_domain->domain.geometry.force_aperture = true;
744 return &exynos_domain->domain;
747 free_pages((unsigned long)exynos_domain->pgtable, 2);
749 kfree(exynos_domain);
753 static void exynos_iommu_domain_free(struct iommu_domain *domain)
755 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
756 struct exynos_iommu_owner *owner;
760 WARN_ON(!list_empty(&priv->clients));
762 spin_lock_irqsave(&priv->lock, flags);
764 list_for_each_entry(owner, &priv->clients, client) {
765 while (!exynos_sysmmu_disable(owner->dev))
766 ; /* until System MMU is actually disabled */
769 while (!list_empty(&priv->clients))
770 list_del_init(priv->clients.next);
772 spin_unlock_irqrestore(&priv->lock, flags);
774 for (i = 0; i < NUM_LV1ENTRIES; i++)
775 if (lv1ent_page(priv->pgtable + i))
776 kmem_cache_free(lv2table_kmem_cache,
777 phys_to_virt(lv2table_base(priv->pgtable + i)));
779 free_pages((unsigned long)priv->pgtable, 2);
780 free_pages((unsigned long)priv->lv2entcnt, 1);
784 static int exynos_iommu_attach_device(struct iommu_domain *domain,
787 struct exynos_iommu_owner *owner = dev->archdata.iommu;
788 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
789 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
793 spin_lock_irqsave(&priv->lock, flags);
795 ret = __exynos_sysmmu_enable(dev, pagetable, domain);
797 list_add_tail(&owner->client, &priv->clients);
798 owner->domain = domain;
801 spin_unlock_irqrestore(&priv->lock, flags);
804 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
805 __func__, &pagetable);
809 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
810 __func__, &pagetable, (ret == 0) ? "" : ", again");
815 static void exynos_iommu_detach_device(struct iommu_domain *domain,
818 struct exynos_iommu_owner *owner;
819 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
820 phys_addr_t pagetable = virt_to_phys(priv->pgtable);
823 spin_lock_irqsave(&priv->lock, flags);
825 list_for_each_entry(owner, &priv->clients, client) {
826 if (owner == dev->archdata.iommu) {
827 if (exynos_sysmmu_disable(dev)) {
828 list_del_init(&owner->client);
829 owner->domain = NULL;
835 spin_unlock_irqrestore(&priv->lock, flags);
837 if (owner == dev->archdata.iommu)
838 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
839 __func__, &pagetable);
841 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
844 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
845 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
847 if (lv1ent_section(sent)) {
848 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
849 return ERR_PTR(-EADDRINUSE);
852 if (lv1ent_fault(sent)) {
854 bool need_flush_flpd_cache = lv1ent_zero(sent);
856 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
857 BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
859 return ERR_PTR(-ENOMEM);
861 *sent = mk_lv1ent_page(virt_to_phys(pent));
862 kmemleak_ignore(pent);
863 *pgcounter = NUM_LV2ENTRIES;
864 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
865 pgtable_flush(sent, sent + 1);
868 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
869 * FLPD cache may cache the address of zero_l2_table. This
870 * function replaces the zero_l2_table with new L2 page table
871 * to write valid mappings.
872 * Accessing the valid area may cause page fault since FLPD
873 * cache may still cache zero_l2_table for the valid area
874 * instead of new L2 page table that has the mapping
875 * information of the valid area.
876 * Thus any replacement of zero_l2_table with other valid L2
877 * page table must involve FLPD cache invalidation for System
879 * FLPD cache invalidation is performed with TLB invalidation
880 * by VPN without blocking. It is safe to invalidate TLB without
881 * blocking because the target address of TLB invalidation is
882 * not currently mapped.
884 if (need_flush_flpd_cache) {
885 struct exynos_iommu_owner *owner;
887 spin_lock(&priv->lock);
888 list_for_each_entry(owner, &priv->clients, client)
889 sysmmu_tlb_invalidate_flpdcache(
891 spin_unlock(&priv->lock);
895 return page_entry(sent, iova);
898 static int lv1set_section(struct exynos_iommu_domain *priv,
899 sysmmu_pte_t *sent, sysmmu_iova_t iova,
900 phys_addr_t paddr, short *pgcnt)
902 if (lv1ent_section(sent)) {
903 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
908 if (lv1ent_page(sent)) {
909 if (*pgcnt != NUM_LV2ENTRIES) {
910 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
915 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
919 *sent = mk_lv1ent_sect(paddr);
921 pgtable_flush(sent, sent + 1);
923 spin_lock(&priv->lock);
924 if (lv1ent_page_zero(sent)) {
925 struct exynos_iommu_owner *owner;
927 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
928 * entry by speculative prefetch of SLPD which has no mapping.
930 list_for_each_entry(owner, &priv->clients, client)
931 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
933 spin_unlock(&priv->lock);
938 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
941 if (size == SPAGE_SIZE) {
942 if (WARN_ON(!lv2ent_fault(pent)))
945 *pent = mk_lv2ent_spage(paddr);
946 pgtable_flush(pent, pent + 1);
948 } else { /* size == LPAGE_SIZE */
951 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
952 if (WARN_ON(!lv2ent_fault(pent))) {
954 memset(pent - i, 0, sizeof(*pent) * i);
958 *pent = mk_lv2ent_lpage(paddr);
960 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
961 *pgcnt -= SPAGES_PER_LPAGE;
968 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
970 * System MMU v3.x has advanced logic to improve address translation
971 * performance with caching more page table entries by a page table walk.
972 * However, the logic has a bug that while caching faulty page table entries,
973 * System MMU reports page fault if the cached fault entry is hit even though
974 * the fault entry is updated to a valid entry after the entry is cached.
975 * To prevent caching faulty page table entries which may be updated to valid
976 * entries later, the virtual memory manager should care about the workaround
977 * for the problem. The following describes the workaround.
979 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
980 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
982 * Precisely, any start address of I/O virtual region must be aligned with
983 * the following sizes for System MMU v3.1 and v3.2.
984 * System MMU v3.1: 128KiB
985 * System MMU v3.2: 256KiB
987 * Because System MMU v3.3 caches page table entries more aggressively, it needs
989 * - Any two consecutive I/O virtual regions must have a hole of size larger
990 * than or equal to 128KiB.
991 * - Start address of an I/O virtual region must be aligned by 128KiB.
993 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
994 phys_addr_t paddr, size_t size, int prot)
996 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
998 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1002 BUG_ON(priv->pgtable == NULL);
1004 spin_lock_irqsave(&priv->pgtablelock, flags);
1006 entry = section_entry(priv->pgtable, iova);
1008 if (size == SECT_SIZE) {
1009 ret = lv1set_section(priv, entry, iova, paddr,
1010 &priv->lv2entcnt[lv1ent_offset(iova)]);
1014 pent = alloc_lv2entry(priv, entry, iova,
1015 &priv->lv2entcnt[lv1ent_offset(iova)]);
1018 ret = PTR_ERR(pent);
1020 ret = lv2set_page(pent, paddr, size,
1021 &priv->lv2entcnt[lv1ent_offset(iova)]);
1025 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1026 __func__, ret, size, iova);
1028 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1033 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1034 sysmmu_iova_t iova, size_t size)
1036 struct exynos_iommu_owner *owner;
1037 unsigned long flags;
1039 spin_lock_irqsave(&priv->lock, flags);
1041 list_for_each_entry(owner, &priv->clients, client)
1042 sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1044 spin_unlock_irqrestore(&priv->lock, flags);
1047 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
1048 unsigned long l_iova, size_t size)
1050 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
1051 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1054 unsigned long flags;
1056 BUG_ON(priv->pgtable == NULL);
1058 spin_lock_irqsave(&priv->pgtablelock, flags);
1060 ent = section_entry(priv->pgtable, iova);
1062 if (lv1ent_section(ent)) {
1063 if (WARN_ON(size < SECT_SIZE)) {
1064 err_pgsize = SECT_SIZE;
1068 /* workaround for h/w bug in System MMU v3.3 */
1069 *ent = ZERO_LV2LINK;
1070 pgtable_flush(ent, ent + 1);
1075 if (unlikely(lv1ent_fault(ent))) {
1076 if (size > SECT_SIZE)
1081 /* lv1ent_page(sent) == true here */
1083 ent = page_entry(ent, iova);
1085 if (unlikely(lv2ent_fault(ent))) {
1090 if (lv2ent_small(ent)) {
1093 pgtable_flush(ent, ent + 1);
1094 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1098 /* lv1ent_large(ent) == true here */
1099 if (WARN_ON(size < LPAGE_SIZE)) {
1100 err_pgsize = LPAGE_SIZE;
1104 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1105 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
1108 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1110 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1112 exynos_iommu_tlb_invalidate_entry(priv, iova, size);
1116 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1118 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1119 __func__, size, iova, err_pgsize);
1124 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1127 struct exynos_iommu_domain *priv = to_exynos_domain(domain);
1128 sysmmu_pte_t *entry;
1129 unsigned long flags;
1130 phys_addr_t phys = 0;
1132 spin_lock_irqsave(&priv->pgtablelock, flags);
1134 entry = section_entry(priv->pgtable, iova);
1136 if (lv1ent_section(entry)) {
1137 phys = section_phys(entry) + section_offs(iova);
1138 } else if (lv1ent_page(entry)) {
1139 entry = page_entry(entry, iova);
1141 if (lv2ent_large(entry))
1142 phys = lpage_phys(entry) + lpage_offs(iova);
1143 else if (lv2ent_small(entry))
1144 phys = spage_phys(entry) + spage_offs(iova);
1147 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1152 static int exynos_iommu_add_device(struct device *dev)
1154 struct iommu_group *group;
1157 group = iommu_group_get(dev);
1160 group = iommu_group_alloc();
1161 if (IS_ERR(group)) {
1162 dev_err(dev, "Failed to allocate IOMMU group\n");
1163 return PTR_ERR(group);
1167 ret = iommu_group_add_device(group, dev);
1168 iommu_group_put(group);
1173 static void exynos_iommu_remove_device(struct device *dev)
1175 iommu_group_remove_device(dev);
1178 static const struct iommu_ops exynos_iommu_ops = {
1179 .domain_alloc = exynos_iommu_domain_alloc,
1180 .domain_free = exynos_iommu_domain_free,
1181 .attach_dev = exynos_iommu_attach_device,
1182 .detach_dev = exynos_iommu_detach_device,
1183 .map = exynos_iommu_map,
1184 .unmap = exynos_iommu_unmap,
1185 .map_sg = default_iommu_map_sg,
1186 .iova_to_phys = exynos_iommu_iova_to_phys,
1187 .add_device = exynos_iommu_add_device,
1188 .remove_device = exynos_iommu_remove_device,
1189 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1192 static int __init exynos_iommu_init(void)
1194 struct device_node *np;
1197 np = of_find_matching_node(NULL, sysmmu_of_match);
1203 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1204 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1205 if (!lv2table_kmem_cache) {
1206 pr_err("%s: Failed to create kmem cache\n", __func__);
1210 ret = platform_driver_register(&exynos_sysmmu_driver);
1212 pr_err("%s: Failed to register driver\n", __func__);
1213 goto err_reg_driver;
1216 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1217 if (zero_lv2_table == NULL) {
1218 pr_err("%s: Failed to allocate zero level2 page table\n",
1224 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1226 pr_err("%s: Failed to register exynos-iommu driver.\n",
1233 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1235 platform_driver_unregister(&exynos_sysmmu_driver);
1237 kmem_cache_destroy(lv2table_kmem_cache);
1240 subsys_initcall(exynos_iommu_init);