2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/omap-iommu.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
26 #include <asm/cacheflush.h>
28 #include <plat/iommu.h>
30 #include "omap-iopgtable.h"
32 #define for_each_iotlb_cr(obj, n, __i, cr) \
34 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
37 /* bitmap of the page sizes currently supported */
38 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
41 * struct omap_iommu_domain - omap iommu domain
42 * @pgtable: the page table
43 * @iommu_dev: an omap iommu device attached to this domain. only a single
44 * iommu device can be attached for now.
45 * @dev: Device using this domain.
46 * @lock: domain lock, should be taken when attaching/detaching
48 struct omap_iommu_domain {
50 struct omap_iommu *iommu_dev;
55 /* accommodate the difference between omap1 and omap2/3 */
56 static const struct iommu_functions *arch_iommu;
58 static struct platform_driver omap_iommu_driver;
59 static struct kmem_cache *iopte_cachep;
62 * omap_install_iommu_arch - Install archtecure specific iommu functions
63 * @ops: a pointer to architecture specific iommu functions
65 * There are several kind of iommu algorithm(tlb, pagetable) among
66 * omap series. This interface installs such an iommu algorighm.
68 int omap_install_iommu_arch(const struct iommu_functions *ops)
76 EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
79 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
80 * @ops: a pointer to architecture specific iommu functions
82 * This interface uninstalls the iommu algorighm installed previously.
84 void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
86 if (arch_iommu != ops)
87 pr_err("%s: not your arch\n", __func__);
91 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
94 * omap_iommu_save_ctx - Save registers for pm off-mode support
97 void omap_iommu_save_ctx(struct device *dev)
99 struct omap_iommu *obj = dev_to_omap_iommu(dev);
101 arch_iommu->save_ctx(obj);
103 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
106 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
107 * @dev: client device
109 void omap_iommu_restore_ctx(struct device *dev)
111 struct omap_iommu *obj = dev_to_omap_iommu(dev);
113 arch_iommu->restore_ctx(obj);
115 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
118 * omap_iommu_arch_version - Return running iommu arch version
120 u32 omap_iommu_arch_version(void)
122 return arch_iommu->version;
124 EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
126 static int iommu_enable(struct omap_iommu *obj)
136 clk_enable(obj->clk);
138 err = arch_iommu->enable(obj);
140 clk_disable(obj->clk);
144 static void iommu_disable(struct omap_iommu *obj)
149 clk_enable(obj->clk);
151 arch_iommu->disable(obj);
153 clk_disable(obj->clk);
159 void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
163 arch_iommu->cr_to_e(cr, e);
165 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
167 static inline int iotlb_cr_valid(struct cr_regs *cr)
172 return arch_iommu->cr_valid(cr);
175 static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
176 struct iotlb_entry *e)
181 return arch_iommu->alloc_cr(obj, e);
184 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
186 return arch_iommu->cr_to_virt(cr);
189 static u32 get_iopte_attr(struct iotlb_entry *e)
191 return arch_iommu->get_pte_attr(e);
194 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
196 return arch_iommu->fault_isr(obj, da);
199 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
203 val = iommu_read_reg(obj, MMU_LOCK);
205 l->base = MMU_LOCK_BASE(val);
206 l->vict = MMU_LOCK_VICT(val);
210 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
214 val = (l->base << MMU_LOCK_BASE_SHIFT);
215 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
217 iommu_write_reg(obj, val, MMU_LOCK);
220 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
222 arch_iommu->tlb_read_cr(obj, cr);
225 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
227 arch_iommu->tlb_load_cr(obj, cr);
229 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
230 iommu_write_reg(obj, 1, MMU_LD_TLB);
234 * iotlb_dump_cr - Dump an iommu tlb entry into buf
236 * @cr: contents of cam and ram register
237 * @buf: output buffer
239 static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
244 return arch_iommu->dump_cr(obj, cr, buf);
247 /* only used in iotlb iteration for-loop */
248 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
253 iotlb_lock_get(obj, &l);
255 iotlb_lock_set(obj, &l);
256 iotlb_read_cr(obj, &cr);
262 * load_iotlb_entry - Set an iommu tlb entry
264 * @e: an iommu tlb entry info
266 #ifdef PREFETCH_IOTLB
267 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
273 if (!obj || !obj->nr_tlb_entries || !e)
276 clk_enable(obj->clk);
278 iotlb_lock_get(obj, &l);
279 if (l.base == obj->nr_tlb_entries) {
280 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
288 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
289 if (!iotlb_cr_valid(&tmp))
292 if (i == obj->nr_tlb_entries) {
293 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
298 iotlb_lock_get(obj, &l);
301 iotlb_lock_set(obj, &l);
304 cr = iotlb_alloc_cr(obj, e);
306 clk_disable(obj->clk);
310 iotlb_load_cr(obj, cr);
315 /* increment victim for next tlb load */
316 if (++l.vict == obj->nr_tlb_entries)
318 iotlb_lock_set(obj, &l);
320 clk_disable(obj->clk);
324 #else /* !PREFETCH_IOTLB */
326 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
331 #endif /* !PREFETCH_IOTLB */
333 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
335 return load_iotlb_entry(obj, e);
339 * flush_iotlb_page - Clear an iommu tlb entry
341 * @da: iommu device virtual address
343 * Clear an iommu tlb entry which includes 'da' address.
345 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
350 clk_enable(obj->clk);
352 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
356 if (!iotlb_cr_valid(&cr))
359 start = iotlb_cr_to_virt(&cr);
360 bytes = iopgsz_to_bytes(cr.cam & 3);
362 if ((start <= da) && (da < start + bytes)) {
363 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
364 __func__, start, da, bytes);
365 iotlb_load_cr(obj, &cr);
366 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
369 clk_disable(obj->clk);
371 if (i == obj->nr_tlb_entries)
372 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
376 * flush_iotlb_all - Clear all iommu tlb entries
379 static void flush_iotlb_all(struct omap_iommu *obj)
383 clk_enable(obj->clk);
387 iotlb_lock_set(obj, &l);
389 iommu_write_reg(obj, 1, MMU_GFLUSH);
391 clk_disable(obj->clk);
394 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
396 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
401 clk_enable(obj->clk);
403 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
405 clk_disable(obj->clk);
409 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
412 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
415 struct iotlb_lock saved;
417 struct cr_regs *p = crs;
419 clk_enable(obj->clk);
420 iotlb_lock_get(obj, &saved);
422 for_each_iotlb_cr(obj, num, i, tmp) {
423 if (!iotlb_cr_valid(&tmp))
428 iotlb_lock_set(obj, &saved);
429 clk_disable(obj->clk);
435 * omap_dump_tlb_entries - dump cr arrays to given buffer
437 * @buf: output buffer
439 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
445 num = bytes / sizeof(*cr);
446 num = min(obj->nr_tlb_entries, num);
448 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
452 num = __dump_tlb_entries(obj, cr, num);
453 for (i = 0; i < num; i++)
454 p += iotlb_dump_cr(obj, cr + i, p);
459 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
461 int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
463 return driver_for_each_device(&omap_iommu_driver.driver,
466 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
468 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
471 * H/W pagetable operations
473 static void flush_iopgd_range(u32 *first, u32 *last)
475 /* FIXME: L2 cache should be taken care of if it exists */
477 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
479 first += L1_CACHE_BYTES / sizeof(*first);
480 } while (first <= last);
483 static void flush_iopte_range(u32 *first, u32 *last)
485 /* FIXME: L2 cache should be taken care of if it exists */
487 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
489 first += L1_CACHE_BYTES / sizeof(*first);
490 } while (first <= last);
493 static void iopte_free(u32 *iopte)
495 /* Note: freed iopte's must be clean ready for re-use */
496 kmem_cache_free(iopte_cachep, iopte);
499 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
503 /* a table has already existed */
508 * do the allocation outside the page table lock
510 spin_unlock(&obj->page_table_lock);
511 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
512 spin_lock(&obj->page_table_lock);
516 return ERR_PTR(-ENOMEM);
518 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
519 flush_iopgd_range(iopgd, iopgd);
521 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
523 /* We raced, free the reduniovant table */
528 iopte = iopte_offset(iopgd, da);
531 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
532 __func__, da, iopgd, *iopgd, iopte, *iopte);
537 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
539 u32 *iopgd = iopgd_offset(obj, da);
541 if ((da | pa) & ~IOSECTION_MASK) {
542 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
543 __func__, da, pa, IOSECTION_SIZE);
547 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
548 flush_iopgd_range(iopgd, iopgd);
552 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
554 u32 *iopgd = iopgd_offset(obj, da);
557 if ((da | pa) & ~IOSUPER_MASK) {
558 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
559 __func__, da, pa, IOSUPER_SIZE);
563 for (i = 0; i < 16; i++)
564 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
565 flush_iopgd_range(iopgd, iopgd + 15);
569 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
571 u32 *iopgd = iopgd_offset(obj, da);
572 u32 *iopte = iopte_alloc(obj, iopgd, da);
575 return PTR_ERR(iopte);
577 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
578 flush_iopte_range(iopte, iopte);
580 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
581 __func__, da, pa, iopte, *iopte);
586 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
588 u32 *iopgd = iopgd_offset(obj, da);
589 u32 *iopte = iopte_alloc(obj, iopgd, da);
592 if ((da | pa) & ~IOLARGE_MASK) {
593 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
594 __func__, da, pa, IOLARGE_SIZE);
599 return PTR_ERR(iopte);
601 for (i = 0; i < 16; i++)
602 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
603 flush_iopte_range(iopte, iopte + 15);
608 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
610 int (*fn)(struct omap_iommu *, u32, u32, u32);
618 case MMU_CAM_PGSZ_16M:
619 fn = iopgd_alloc_super;
621 case MMU_CAM_PGSZ_1M:
622 fn = iopgd_alloc_section;
624 case MMU_CAM_PGSZ_64K:
625 fn = iopte_alloc_large;
627 case MMU_CAM_PGSZ_4K:
628 fn = iopte_alloc_page;
636 prot = get_iopte_attr(e);
638 spin_lock(&obj->page_table_lock);
639 err = fn(obj, e->da, e->pa, prot);
640 spin_unlock(&obj->page_table_lock);
646 * omap_iopgtable_store_entry - Make an iommu pte entry
648 * @e: an iommu tlb entry info
650 int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
654 flush_iotlb_page(obj, e->da);
655 err = iopgtable_store_entry_core(obj, e);
657 prefetch_iotlb_entry(obj, e);
660 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
663 * iopgtable_lookup_entry - Lookup an iommu pte entry
665 * @da: iommu device virtual address
666 * @ppgd: iommu pgd entry pointer to be returned
667 * @ppte: iommu pte entry pointer to be returned
670 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
672 u32 *iopgd, *iopte = NULL;
674 iopgd = iopgd_offset(obj, da);
678 if (iopgd_is_table(*iopgd))
679 iopte = iopte_offset(iopgd, da);
685 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
688 u32 *iopgd = iopgd_offset(obj, da);
694 if (iopgd_is_table(*iopgd)) {
696 u32 *iopte = iopte_offset(iopgd, da);
699 if (*iopte & IOPTE_LARGE) {
701 /* rewind to the 1st entry */
702 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
705 memset(iopte, 0, nent * sizeof(*iopte));
706 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
709 * do table walk to check if this table is necessary or not
711 iopte = iopte_offset(iopgd, 0);
712 for (i = 0; i < PTRS_PER_IOPTE; i++)
717 nent = 1; /* for the next L1 entry */
720 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
722 /* rewind to the 1st entry */
723 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
727 memset(iopgd, 0, nent * sizeof(*iopgd));
728 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
734 * iopgtable_clear_entry - Remove an iommu pte entry
736 * @da: iommu device virtual address
738 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
742 spin_lock(&obj->page_table_lock);
744 bytes = iopgtable_clear_entry_core(obj, da);
745 flush_iotlb_page(obj, da);
747 spin_unlock(&obj->page_table_lock);
752 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
756 spin_lock(&obj->page_table_lock);
758 for (i = 0; i < PTRS_PER_IOPGD; i++) {
762 da = i << IOPGD_SHIFT;
763 iopgd = iopgd_offset(obj, da);
768 if (iopgd_is_table(*iopgd))
769 iopte_free(iopte_offset(iopgd, 0));
772 flush_iopgd_range(iopgd, iopgd);
775 flush_iotlb_all(obj);
777 spin_unlock(&obj->page_table_lock);
781 * Device IOMMU generic operations
783 static irqreturn_t iommu_fault_handler(int irq, void *data)
787 struct omap_iommu *obj = data;
788 struct iommu_domain *domain = obj->domain;
793 clk_enable(obj->clk);
794 errs = iommu_report_fault(obj, &da);
795 clk_disable(obj->clk);
799 /* Fault callback or TLB/PTE Dynamic loading */
800 if (!report_iommu_fault(domain, obj->dev, da, 0))
805 iopgd = iopgd_offset(obj, da);
807 if (!iopgd_is_table(*iopgd)) {
808 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
809 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
813 iopte = iopte_offset(iopgd, da);
815 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
816 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
822 static int device_match_by_alias(struct device *dev, void *data)
824 struct omap_iommu *obj = to_iommu(dev);
825 const char *name = data;
827 pr_debug("%s: %s %s\n", __func__, obj->name, name);
829 return strcmp(obj->name, name) == 0;
833 * omap_iommu_attach() - attach iommu device to an iommu domain
834 * @name: name of target omap iommu device
837 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
841 struct omap_iommu *obj;
843 dev = driver_find_device(&omap_iommu_driver.driver, NULL,
845 device_match_by_alias);
851 spin_lock(&obj->iommu_lock);
853 /* an iommu device can only be attached once */
854 if (++obj->refcount > 1) {
855 dev_err(dev, "%s: already attached!\n", obj->name);
861 err = iommu_enable(obj);
864 flush_iotlb_all(obj);
866 if (!try_module_get(obj->owner))
869 spin_unlock(&obj->iommu_lock);
871 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
875 if (obj->refcount == 1)
879 spin_unlock(&obj->iommu_lock);
884 * omap_iommu_detach - release iommu device
887 static void omap_iommu_detach(struct omap_iommu *obj)
889 if (!obj || IS_ERR(obj))
892 spin_lock(&obj->iommu_lock);
894 if (--obj->refcount == 0)
897 module_put(obj->owner);
901 spin_unlock(&obj->iommu_lock);
903 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
907 * OMAP Device MMU(IOMMU) detection
909 static int __devinit omap_iommu_probe(struct platform_device *pdev)
913 struct omap_iommu *obj;
914 struct resource *res;
915 struct iommu_platform_data *pdata = pdev->dev.platform_data;
917 if (pdev->num_resources != 2)
920 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
924 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
925 if (IS_ERR(obj->clk))
928 obj->nr_tlb_entries = pdata->nr_tlb_entries;
929 obj->name = pdata->name;
930 obj->dev = &pdev->dev;
931 obj->ctx = (void *)obj + sizeof(*obj);
932 obj->da_start = pdata->da_start;
933 obj->da_end = pdata->da_end;
935 spin_lock_init(&obj->iommu_lock);
936 mutex_init(&obj->mmap_lock);
937 spin_lock_init(&obj->page_table_lock);
938 INIT_LIST_HEAD(&obj->mmap);
940 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
946 res = request_mem_region(res->start, resource_size(res),
947 dev_name(&pdev->dev));
953 obj->regbase = ioremap(res->start, resource_size(res));
959 irq = platform_get_irq(pdev, 0);
964 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
965 dev_name(&pdev->dev), obj);
968 platform_set_drvdata(pdev, obj);
970 dev_info(&pdev->dev, "%s registered\n", obj->name);
974 iounmap(obj->regbase);
976 release_mem_region(res->start, resource_size(res));
984 static int __devexit omap_iommu_remove(struct platform_device *pdev)
987 struct resource *res;
988 struct omap_iommu *obj = platform_get_drvdata(pdev);
990 platform_set_drvdata(pdev, NULL);
992 iopgtable_clear_entry_all(obj);
994 irq = platform_get_irq(pdev, 0);
996 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
997 release_mem_region(res->start, resource_size(res));
998 iounmap(obj->regbase);
1001 dev_info(&pdev->dev, "%s removed\n", obj->name);
1006 static struct platform_driver omap_iommu_driver = {
1007 .probe = omap_iommu_probe,
1008 .remove = __devexit_p(omap_iommu_remove),
1010 .name = "omap-iommu",
1014 static void iopte_cachep_ctor(void *iopte)
1016 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1019 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1020 phys_addr_t pa, size_t bytes, int prot)
1022 struct omap_iommu_domain *omap_domain = domain->priv;
1023 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1024 struct device *dev = oiommu->dev;
1025 struct iotlb_entry e;
1029 /* we only support mapping a single iommu page for now */
1030 omap_pgsz = bytes_to_iopgsz(bytes);
1031 if (omap_pgsz < 0) {
1032 dev_err(dev, "invalid size to map: %d\n", bytes);
1036 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1038 flags = omap_pgsz | prot;
1040 iotlb_init_entry(&e, da, pa, flags);
1042 ret = omap_iopgtable_store_entry(oiommu, &e);
1044 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1049 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1052 struct omap_iommu_domain *omap_domain = domain->priv;
1053 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1054 struct device *dev = oiommu->dev;
1056 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1058 return iopgtable_clear_entry(oiommu, da);
1062 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1064 struct omap_iommu_domain *omap_domain = domain->priv;
1065 struct omap_iommu *oiommu;
1066 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1069 spin_lock(&omap_domain->lock);
1071 /* only a single device is supported per domain for now */
1072 if (omap_domain->iommu_dev) {
1073 dev_err(dev, "iommu domain is already attached\n");
1078 /* get a handle to and enable the omap iommu */
1079 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1080 if (IS_ERR(oiommu)) {
1081 ret = PTR_ERR(oiommu);
1082 dev_err(dev, "can't get omap iommu: %d\n", ret);
1086 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1087 omap_domain->dev = dev;
1088 oiommu->domain = domain;
1091 spin_unlock(&omap_domain->lock);
1095 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1098 struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1099 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1101 /* only a single device is supported per domain for now */
1102 if (omap_domain->iommu_dev != oiommu) {
1103 dev_err(dev, "invalid iommu device\n");
1107 iopgtable_clear_entry_all(oiommu);
1109 omap_iommu_detach(oiommu);
1111 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1112 omap_domain->dev = NULL;
1115 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1118 struct omap_iommu_domain *omap_domain = domain->priv;
1120 spin_lock(&omap_domain->lock);
1121 _omap_iommu_detach_dev(omap_domain, dev);
1122 spin_unlock(&omap_domain->lock);
1125 static int omap_iommu_domain_init(struct iommu_domain *domain)
1127 struct omap_iommu_domain *omap_domain;
1129 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1131 pr_err("kzalloc failed\n");
1135 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1136 if (!omap_domain->pgtable) {
1137 pr_err("kzalloc failed\n");
1142 * should never fail, but please keep this around to ensure
1143 * we keep the hardware happy
1145 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1147 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1148 spin_lock_init(&omap_domain->lock);
1150 domain->priv = omap_domain;
1152 domain->geometry.aperture_start = 0;
1153 domain->geometry.aperture_end = (1ULL << 32) - 1;
1154 domain->geometry.force_aperture = true;
1164 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1166 struct omap_iommu_domain *omap_domain = domain->priv;
1168 domain->priv = NULL;
1171 * An iommu device is still attached
1172 * (currently, only one device can be attached) ?
1174 if (omap_domain->iommu_dev)
1175 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1177 kfree(omap_domain->pgtable);
1181 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1184 struct omap_iommu_domain *omap_domain = domain->priv;
1185 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1186 struct device *dev = oiommu->dev;
1188 phys_addr_t ret = 0;
1190 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1193 if (iopte_is_small(*pte))
1194 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1195 else if (iopte_is_large(*pte))
1196 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1198 dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
1200 if (iopgd_is_section(*pgd))
1201 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1202 else if (iopgd_is_super(*pgd))
1203 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1205 dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
1211 static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1217 static struct iommu_ops omap_iommu_ops = {
1218 .domain_init = omap_iommu_domain_init,
1219 .domain_destroy = omap_iommu_domain_destroy,
1220 .attach_dev = omap_iommu_attach_dev,
1221 .detach_dev = omap_iommu_detach_dev,
1222 .map = omap_iommu_map,
1223 .unmap = omap_iommu_unmap,
1224 .iova_to_phys = omap_iommu_iova_to_phys,
1225 .domain_has_cap = omap_iommu_domain_has_cap,
1226 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1229 static int __init omap_iommu_init(void)
1231 struct kmem_cache *p;
1232 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1233 size_t align = 1 << 10; /* L2 pagetable alignement */
1235 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1241 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1243 return platform_driver_register(&omap_iommu_driver);
1245 /* must be ready before omap3isp is probed */
1246 subsys_initcall(omap_iommu_init);
1248 static void __exit omap_iommu_exit(void)
1250 kmem_cache_destroy(iopte_cachep);
1252 platform_driver_unregister(&omap_iommu_driver);
1254 module_exit(omap_iommu_exit);
1256 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1257 MODULE_ALIAS("platform:omap-iommu");
1258 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1259 MODULE_LICENSE("GPL v2");