2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/platform_device.h>
20 #include <linux/iommu.h>
21 #include <linux/omap-iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
25 #include <linux/pm_runtime.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_irq.h>
30 #include <asm/cacheflush.h>
32 #include <linux/platform_data/iommu-omap.h>
34 #include "omap-iopgtable.h"
35 #include "omap-iommu.h"
37 #define for_each_iotlb_cr(obj, n, __i, cr) \
39 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
42 /* bitmap of the page sizes currently supported */
43 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
46 * struct omap_iommu_domain - omap iommu domain
47 * @pgtable: the page table
48 * @iommu_dev: an omap iommu device attached to this domain. only a single
49 * iommu device can be attached for now.
50 * @dev: Device using this domain.
51 * @lock: domain lock, should be taken when attaching/detaching
53 struct omap_iommu_domain {
55 struct omap_iommu *iommu_dev;
60 #define MMU_LOCK_BASE_SHIFT 10
61 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
62 #define MMU_LOCK_BASE(x) \
63 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
65 #define MMU_LOCK_VICT_SHIFT 4
66 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
67 #define MMU_LOCK_VICT(x) \
68 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
75 /* accommodate the difference between omap1 and omap2/3 */
76 static const struct iommu_functions *arch_iommu;
78 static struct platform_driver omap_iommu_driver;
79 static struct kmem_cache *iopte_cachep;
82 * omap_install_iommu_arch - Install archtecure specific iommu functions
83 * @ops: a pointer to architecture specific iommu functions
85 * There are several kind of iommu algorithm(tlb, pagetable) among
86 * omap series. This interface installs such an iommu algorighm.
88 int omap_install_iommu_arch(const struct iommu_functions *ops)
96 EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
99 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
100 * @ops: a pointer to architecture specific iommu functions
102 * This interface uninstalls the iommu algorighm installed previously.
104 void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
106 if (arch_iommu != ops)
107 pr_err("%s: not your arch\n", __func__);
111 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
114 * omap_iommu_save_ctx - Save registers for pm off-mode support
115 * @dev: client device
117 void omap_iommu_save_ctx(struct device *dev)
119 struct omap_iommu *obj = dev_to_omap_iommu(dev);
121 arch_iommu->save_ctx(obj);
123 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
126 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
127 * @dev: client device
129 void omap_iommu_restore_ctx(struct device *dev)
131 struct omap_iommu *obj = dev_to_omap_iommu(dev);
133 arch_iommu->restore_ctx(obj);
135 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
138 * omap_iommu_arch_version - Return running iommu arch version
140 u32 omap_iommu_arch_version(void)
142 return arch_iommu->version;
144 EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
146 static int iommu_enable(struct omap_iommu *obj)
149 struct platform_device *pdev = to_platform_device(obj->dev);
150 struct iommu_platform_data *pdata = pdev->dev.platform_data;
155 if (pdata && pdata->deassert_reset) {
156 err = pdata->deassert_reset(pdev, pdata->reset_name);
158 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
163 pm_runtime_get_sync(obj->dev);
165 err = arch_iommu->enable(obj);
170 static void iommu_disable(struct omap_iommu *obj)
172 struct platform_device *pdev = to_platform_device(obj->dev);
173 struct iommu_platform_data *pdata = pdev->dev.platform_data;
175 arch_iommu->disable(obj);
177 pm_runtime_put_sync(obj->dev);
179 if (pdata && pdata->assert_reset)
180 pdata->assert_reset(pdev, pdata->reset_name);
186 void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
190 arch_iommu->cr_to_e(cr, e);
192 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
194 static inline int iotlb_cr_valid(struct cr_regs *cr)
199 return arch_iommu->cr_valid(cr);
202 static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
203 struct iotlb_entry *e)
208 return arch_iommu->alloc_cr(obj, e);
211 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
213 return arch_iommu->cr_to_virt(cr);
216 static u32 get_iopte_attr(struct iotlb_entry *e)
218 return arch_iommu->get_pte_attr(e);
221 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
223 return arch_iommu->fault_isr(obj, da);
226 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
230 val = iommu_read_reg(obj, MMU_LOCK);
232 l->base = MMU_LOCK_BASE(val);
233 l->vict = MMU_LOCK_VICT(val);
237 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
241 val = (l->base << MMU_LOCK_BASE_SHIFT);
242 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
244 iommu_write_reg(obj, val, MMU_LOCK);
247 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
249 arch_iommu->tlb_read_cr(obj, cr);
252 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
254 arch_iommu->tlb_load_cr(obj, cr);
256 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
257 iommu_write_reg(obj, 1, MMU_LD_TLB);
261 * iotlb_dump_cr - Dump an iommu tlb entry into buf
263 * @cr: contents of cam and ram register
264 * @buf: output buffer
266 static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
271 return arch_iommu->dump_cr(obj, cr, buf);
274 /* only used in iotlb iteration for-loop */
275 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
280 iotlb_lock_get(obj, &l);
282 iotlb_lock_set(obj, &l);
283 iotlb_read_cr(obj, &cr);
289 * load_iotlb_entry - Set an iommu tlb entry
291 * @e: an iommu tlb entry info
293 #ifdef PREFETCH_IOTLB
294 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
300 if (!obj || !obj->nr_tlb_entries || !e)
303 pm_runtime_get_sync(obj->dev);
305 iotlb_lock_get(obj, &l);
306 if (l.base == obj->nr_tlb_entries) {
307 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
315 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
316 if (!iotlb_cr_valid(&tmp))
319 if (i == obj->nr_tlb_entries) {
320 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
325 iotlb_lock_get(obj, &l);
328 iotlb_lock_set(obj, &l);
331 cr = iotlb_alloc_cr(obj, e);
333 pm_runtime_put_sync(obj->dev);
337 iotlb_load_cr(obj, cr);
342 /* increment victim for next tlb load */
343 if (++l.vict == obj->nr_tlb_entries)
345 iotlb_lock_set(obj, &l);
347 pm_runtime_put_sync(obj->dev);
351 #else /* !PREFETCH_IOTLB */
353 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
358 #endif /* !PREFETCH_IOTLB */
360 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
362 return load_iotlb_entry(obj, e);
366 * flush_iotlb_page - Clear an iommu tlb entry
368 * @da: iommu device virtual address
370 * Clear an iommu tlb entry which includes 'da' address.
372 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
377 pm_runtime_get_sync(obj->dev);
379 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
383 if (!iotlb_cr_valid(&cr))
386 start = iotlb_cr_to_virt(&cr);
387 bytes = iopgsz_to_bytes(cr.cam & 3);
389 if ((start <= da) && (da < start + bytes)) {
390 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
391 __func__, start, da, bytes);
392 iotlb_load_cr(obj, &cr);
393 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
396 pm_runtime_put_sync(obj->dev);
398 if (i == obj->nr_tlb_entries)
399 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
403 * flush_iotlb_all - Clear all iommu tlb entries
406 static void flush_iotlb_all(struct omap_iommu *obj)
410 pm_runtime_get_sync(obj->dev);
414 iotlb_lock_set(obj, &l);
416 iommu_write_reg(obj, 1, MMU_GFLUSH);
418 pm_runtime_put_sync(obj->dev);
421 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
423 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
428 pm_runtime_get_sync(obj->dev);
430 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
432 pm_runtime_put_sync(obj->dev);
436 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
439 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
442 struct iotlb_lock saved;
444 struct cr_regs *p = crs;
446 pm_runtime_get_sync(obj->dev);
447 iotlb_lock_get(obj, &saved);
449 for_each_iotlb_cr(obj, num, i, tmp) {
450 if (!iotlb_cr_valid(&tmp))
455 iotlb_lock_set(obj, &saved);
456 pm_runtime_put_sync(obj->dev);
462 * omap_dump_tlb_entries - dump cr arrays to given buffer
464 * @buf: output buffer
466 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
472 num = bytes / sizeof(*cr);
473 num = min(obj->nr_tlb_entries, num);
475 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
479 num = __dump_tlb_entries(obj, cr, num);
480 for (i = 0; i < num; i++)
481 p += iotlb_dump_cr(obj, cr + i, p);
486 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
488 int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
490 return driver_for_each_device(&omap_iommu_driver.driver,
493 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
495 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
498 * H/W pagetable operations
500 static void flush_iopgd_range(u32 *first, u32 *last)
502 /* FIXME: L2 cache should be taken care of if it exists */
504 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
506 first += L1_CACHE_BYTES / sizeof(*first);
507 } while (first <= last);
510 static void flush_iopte_range(u32 *first, u32 *last)
512 /* FIXME: L2 cache should be taken care of if it exists */
514 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
516 first += L1_CACHE_BYTES / sizeof(*first);
517 } while (first <= last);
520 static void iopte_free(u32 *iopte)
522 /* Note: freed iopte's must be clean ready for re-use */
523 kmem_cache_free(iopte_cachep, iopte);
526 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
530 /* a table has already existed */
535 * do the allocation outside the page table lock
537 spin_unlock(&obj->page_table_lock);
538 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
539 spin_lock(&obj->page_table_lock);
543 return ERR_PTR(-ENOMEM);
545 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
546 flush_iopgd_range(iopgd, iopgd);
548 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
550 /* We raced, free the reduniovant table */
555 iopte = iopte_offset(iopgd, da);
558 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
559 __func__, da, iopgd, *iopgd, iopte, *iopte);
564 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
566 u32 *iopgd = iopgd_offset(obj, da);
568 if ((da | pa) & ~IOSECTION_MASK) {
569 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
570 __func__, da, pa, IOSECTION_SIZE);
574 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
575 flush_iopgd_range(iopgd, iopgd);
579 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
581 u32 *iopgd = iopgd_offset(obj, da);
584 if ((da | pa) & ~IOSUPER_MASK) {
585 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
586 __func__, da, pa, IOSUPER_SIZE);
590 for (i = 0; i < 16; i++)
591 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
592 flush_iopgd_range(iopgd, iopgd + 15);
596 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
598 u32 *iopgd = iopgd_offset(obj, da);
599 u32 *iopte = iopte_alloc(obj, iopgd, da);
602 return PTR_ERR(iopte);
604 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
605 flush_iopte_range(iopte, iopte);
607 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
608 __func__, da, pa, iopte, *iopte);
613 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
615 u32 *iopgd = iopgd_offset(obj, da);
616 u32 *iopte = iopte_alloc(obj, iopgd, da);
619 if ((da | pa) & ~IOLARGE_MASK) {
620 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
621 __func__, da, pa, IOLARGE_SIZE);
626 return PTR_ERR(iopte);
628 for (i = 0; i < 16; i++)
629 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
630 flush_iopte_range(iopte, iopte + 15);
635 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
637 int (*fn)(struct omap_iommu *, u32, u32, u32);
645 case MMU_CAM_PGSZ_16M:
646 fn = iopgd_alloc_super;
648 case MMU_CAM_PGSZ_1M:
649 fn = iopgd_alloc_section;
651 case MMU_CAM_PGSZ_64K:
652 fn = iopte_alloc_large;
654 case MMU_CAM_PGSZ_4K:
655 fn = iopte_alloc_page;
663 prot = get_iopte_attr(e);
665 spin_lock(&obj->page_table_lock);
666 err = fn(obj, e->da, e->pa, prot);
667 spin_unlock(&obj->page_table_lock);
673 * omap_iopgtable_store_entry - Make an iommu pte entry
675 * @e: an iommu tlb entry info
677 int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
681 flush_iotlb_page(obj, e->da);
682 err = iopgtable_store_entry_core(obj, e);
684 prefetch_iotlb_entry(obj, e);
687 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
690 * iopgtable_lookup_entry - Lookup an iommu pte entry
692 * @da: iommu device virtual address
693 * @ppgd: iommu pgd entry pointer to be returned
694 * @ppte: iommu pte entry pointer to be returned
697 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
699 u32 *iopgd, *iopte = NULL;
701 iopgd = iopgd_offset(obj, da);
705 if (iopgd_is_table(*iopgd))
706 iopte = iopte_offset(iopgd, da);
712 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
715 u32 *iopgd = iopgd_offset(obj, da);
721 if (iopgd_is_table(*iopgd)) {
723 u32 *iopte = iopte_offset(iopgd, da);
726 if (*iopte & IOPTE_LARGE) {
728 /* rewind to the 1st entry */
729 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
732 memset(iopte, 0, nent * sizeof(*iopte));
733 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
736 * do table walk to check if this table is necessary or not
738 iopte = iopte_offset(iopgd, 0);
739 for (i = 0; i < PTRS_PER_IOPTE; i++)
744 nent = 1; /* for the next L1 entry */
747 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
749 /* rewind to the 1st entry */
750 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
754 memset(iopgd, 0, nent * sizeof(*iopgd));
755 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
761 * iopgtable_clear_entry - Remove an iommu pte entry
763 * @da: iommu device virtual address
765 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
769 spin_lock(&obj->page_table_lock);
771 bytes = iopgtable_clear_entry_core(obj, da);
772 flush_iotlb_page(obj, da);
774 spin_unlock(&obj->page_table_lock);
779 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
783 spin_lock(&obj->page_table_lock);
785 for (i = 0; i < PTRS_PER_IOPGD; i++) {
789 da = i << IOPGD_SHIFT;
790 iopgd = iopgd_offset(obj, da);
795 if (iopgd_is_table(*iopgd))
796 iopte_free(iopte_offset(iopgd, 0));
799 flush_iopgd_range(iopgd, iopgd);
802 flush_iotlb_all(obj);
804 spin_unlock(&obj->page_table_lock);
808 * Device IOMMU generic operations
810 static irqreturn_t iommu_fault_handler(int irq, void *data)
814 struct omap_iommu *obj = data;
815 struct iommu_domain *domain = obj->domain;
820 errs = iommu_report_fault(obj, &da);
824 /* Fault callback or TLB/PTE Dynamic loading */
825 if (!report_iommu_fault(domain, obj->dev, da, 0))
830 iopgd = iopgd_offset(obj, da);
832 if (!iopgd_is_table(*iopgd)) {
833 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
834 obj->name, errs, da, iopgd, *iopgd);
838 iopte = iopte_offset(iopgd, da);
840 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
841 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
846 static int device_match_by_alias(struct device *dev, void *data)
848 struct omap_iommu *obj = to_iommu(dev);
849 const char *name = data;
851 pr_debug("%s: %s %s\n", __func__, obj->name, name);
853 return strcmp(obj->name, name) == 0;
857 * omap_iommu_attach() - attach iommu device to an iommu domain
858 * @name: name of target omap iommu device
861 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
865 struct omap_iommu *obj;
867 dev = driver_find_device(&omap_iommu_driver.driver, NULL,
869 device_match_by_alias);
871 return ERR_PTR(-ENODEV);
875 spin_lock(&obj->iommu_lock);
877 /* an iommu device can only be attached once */
878 if (++obj->refcount > 1) {
879 dev_err(dev, "%s: already attached!\n", obj->name);
885 err = iommu_enable(obj);
888 flush_iotlb_all(obj);
890 if (!try_module_get(obj->owner)) {
895 spin_unlock(&obj->iommu_lock);
897 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
901 if (obj->refcount == 1)
905 spin_unlock(&obj->iommu_lock);
910 * omap_iommu_detach - release iommu device
913 static void omap_iommu_detach(struct omap_iommu *obj)
915 if (!obj || IS_ERR(obj))
918 spin_lock(&obj->iommu_lock);
920 if (--obj->refcount == 0)
923 module_put(obj->owner);
927 spin_unlock(&obj->iommu_lock);
929 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
933 * OMAP Device MMU(IOMMU) detection
935 static int omap_iommu_probe(struct platform_device *pdev)
939 struct omap_iommu *obj;
940 struct resource *res;
941 struct iommu_platform_data *pdata = pdev->dev.platform_data;
942 struct device_node *of = pdev->dev.of_node;
944 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
949 obj->name = dev_name(&pdev->dev);
950 obj->nr_tlb_entries = 32;
951 err = of_property_read_u32(of, "ti,#tlb-entries",
952 &obj->nr_tlb_entries);
953 if (err && err != -EINVAL)
955 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
958 * da_start and da_end are needed for omap-iovmm, so hardcode
959 * these values as used by OMAP3 ISP - the only user for
963 obj->da_end = 0xfffff000;
965 obj->nr_tlb_entries = pdata->nr_tlb_entries;
966 obj->name = pdata->name;
967 obj->da_start = pdata->da_start;
968 obj->da_end = pdata->da_end;
970 if (obj->da_end <= obj->da_start)
973 obj->dev = &pdev->dev;
974 obj->ctx = (void *)obj + sizeof(*obj);
976 spin_lock_init(&obj->iommu_lock);
977 mutex_init(&obj->mmap_lock);
978 spin_lock_init(&obj->page_table_lock);
979 INIT_LIST_HEAD(&obj->mmap);
981 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
982 obj->regbase = devm_ioremap_resource(obj->dev, res);
983 if (IS_ERR(obj->regbase))
984 return PTR_ERR(obj->regbase);
986 irq = platform_get_irq(pdev, 0);
990 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
991 dev_name(obj->dev), obj);
994 platform_set_drvdata(pdev, obj);
996 pm_runtime_irq_safe(obj->dev);
997 pm_runtime_enable(obj->dev);
999 dev_info(&pdev->dev, "%s registered\n", obj->name);
1003 static int omap_iommu_remove(struct platform_device *pdev)
1005 struct omap_iommu *obj = platform_get_drvdata(pdev);
1007 iopgtable_clear_entry_all(obj);
1009 pm_runtime_disable(obj->dev);
1011 dev_info(&pdev->dev, "%s removed\n", obj->name);
1015 static struct of_device_id omap_iommu_of_match[] = {
1016 { .compatible = "ti,omap2-iommu" },
1017 { .compatible = "ti,omap4-iommu" },
1018 { .compatible = "ti,dra7-iommu" },
1021 MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
1023 static struct platform_driver omap_iommu_driver = {
1024 .probe = omap_iommu_probe,
1025 .remove = omap_iommu_remove,
1027 .name = "omap-iommu",
1028 .of_match_table = of_match_ptr(omap_iommu_of_match),
1032 static void iopte_cachep_ctor(void *iopte)
1034 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1037 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
1040 memset(e, 0, sizeof(*e));
1045 /* FIXME: add OMAP1 support */
1046 e->pgsz = flags & MMU_CAM_PGSZ_MASK;
1047 e->endian = flags & MMU_RAM_ENDIAN_MASK;
1048 e->elsz = flags & MMU_RAM_ELSZ_MASK;
1049 e->mixed = flags & MMU_RAM_MIXED_MASK;
1051 return iopgsz_to_bytes(e->pgsz);
1054 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1055 phys_addr_t pa, size_t bytes, int prot)
1057 struct omap_iommu_domain *omap_domain = domain->priv;
1058 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1059 struct device *dev = oiommu->dev;
1060 struct iotlb_entry e;
1064 /* we only support mapping a single iommu page for now */
1065 omap_pgsz = bytes_to_iopgsz(bytes);
1066 if (omap_pgsz < 0) {
1067 dev_err(dev, "invalid size to map: %d\n", bytes);
1071 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1073 flags = omap_pgsz | prot;
1075 iotlb_init_entry(&e, da, pa, flags);
1077 ret = omap_iopgtable_store_entry(oiommu, &e);
1079 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1084 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1087 struct omap_iommu_domain *omap_domain = domain->priv;
1088 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1089 struct device *dev = oiommu->dev;
1091 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1093 return iopgtable_clear_entry(oiommu, da);
1097 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1099 struct omap_iommu_domain *omap_domain = domain->priv;
1100 struct omap_iommu *oiommu;
1101 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1104 spin_lock(&omap_domain->lock);
1106 /* only a single device is supported per domain for now */
1107 if (omap_domain->iommu_dev) {
1108 dev_err(dev, "iommu domain is already attached\n");
1113 /* get a handle to and enable the omap iommu */
1114 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1115 if (IS_ERR(oiommu)) {
1116 ret = PTR_ERR(oiommu);
1117 dev_err(dev, "can't get omap iommu: %d\n", ret);
1121 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1122 omap_domain->dev = dev;
1123 oiommu->domain = domain;
1126 spin_unlock(&omap_domain->lock);
1130 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1133 struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1134 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1136 /* only a single device is supported per domain for now */
1137 if (omap_domain->iommu_dev != oiommu) {
1138 dev_err(dev, "invalid iommu device\n");
1142 iopgtable_clear_entry_all(oiommu);
1144 omap_iommu_detach(oiommu);
1146 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1147 omap_domain->dev = NULL;
1150 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1153 struct omap_iommu_domain *omap_domain = domain->priv;
1155 spin_lock(&omap_domain->lock);
1156 _omap_iommu_detach_dev(omap_domain, dev);
1157 spin_unlock(&omap_domain->lock);
1160 static int omap_iommu_domain_init(struct iommu_domain *domain)
1162 struct omap_iommu_domain *omap_domain;
1164 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1166 pr_err("kzalloc failed\n");
1170 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1171 if (!omap_domain->pgtable) {
1172 pr_err("kzalloc failed\n");
1177 * should never fail, but please keep this around to ensure
1178 * we keep the hardware happy
1180 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1182 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1183 spin_lock_init(&omap_domain->lock);
1185 domain->priv = omap_domain;
1187 domain->geometry.aperture_start = 0;
1188 domain->geometry.aperture_end = (1ULL << 32) - 1;
1189 domain->geometry.force_aperture = true;
1199 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1201 struct omap_iommu_domain *omap_domain = domain->priv;
1203 domain->priv = NULL;
1206 * An iommu device is still attached
1207 * (currently, only one device can be attached) ?
1209 if (omap_domain->iommu_dev)
1210 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1212 kfree(omap_domain->pgtable);
1216 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1219 struct omap_iommu_domain *omap_domain = domain->priv;
1220 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1221 struct device *dev = oiommu->dev;
1223 phys_addr_t ret = 0;
1225 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1228 if (iopte_is_small(*pte))
1229 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1230 else if (iopte_is_large(*pte))
1231 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1233 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1234 (unsigned long long)da);
1236 if (iopgd_is_section(*pgd))
1237 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1238 else if (iopgd_is_super(*pgd))
1239 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1241 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1242 (unsigned long long)da);
1248 static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1254 static struct iommu_ops omap_iommu_ops = {
1255 .domain_init = omap_iommu_domain_init,
1256 .domain_destroy = omap_iommu_domain_destroy,
1257 .attach_dev = omap_iommu_attach_dev,
1258 .detach_dev = omap_iommu_detach_dev,
1259 .map = omap_iommu_map,
1260 .unmap = omap_iommu_unmap,
1261 .iova_to_phys = omap_iommu_iova_to_phys,
1262 .domain_has_cap = omap_iommu_domain_has_cap,
1263 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1266 static int __init omap_iommu_init(void)
1268 struct kmem_cache *p;
1269 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1270 size_t align = 1 << 10; /* L2 pagetable alignement */
1272 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1278 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1280 return platform_driver_register(&omap_iommu_driver);
1282 /* must be ready before omap3isp is probed */
1283 subsys_initcall(omap_iommu_init);
1285 static void __exit omap_iommu_exit(void)
1287 kmem_cache_destroy(iopte_cachep);
1289 platform_driver_unregister(&omap_iommu_driver);
1291 module_exit(omap_iommu_exit);
1293 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1294 MODULE_ALIAS("platform:omap-iommu");
1295 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1296 MODULE_LICENSE("GPL v2");