2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/device.h>
18 #include <linux/scatterlist.h>
19 #include <linux/iommu.h>
20 #include <linux/omap-iommu.h>
22 #include <asm/cacheflush.h>
23 #include <asm/mach/map.h>
25 #include <plat/iommu.h>
27 #include "omap-iopgtable.h"
28 #include "omap-iommu.h"
31 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
33 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
35 #define IOVMF_SW_SHIFT 16
38 * iovma: h/w flags derived from cam and ram attribute
40 #define IOVMF_CAM_MASK (~((1 << 10) - 1))
41 #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
43 #define IOVMF_PGSZ_MASK (3 << 0)
44 #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
45 #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
46 #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
47 #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
49 #define IOVMF_ENDIAN_MASK (1 << 9)
50 #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
52 #define IOVMF_ELSZ_MASK (3 << 7)
53 #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
54 #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
55 #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
57 #define IOVMF_MIXED_MASK (1 << 6)
58 #define IOVMF_MIXED MMU_RAM_MIXED
61 * iovma: s/w flags, used for mapping and umapping internally.
63 #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
64 #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
65 #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
67 /* "superpages" is supported just with physically linear pages */
68 #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
69 #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
70 #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
72 #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
74 static struct kmem_cache *iovm_area_cachep;
76 /* return the offset of the first scatterlist entry in a sg table */
77 static unsigned int sgtable_offset(const struct sg_table *sgt)
79 if (!sgt || !sgt->nents)
82 return sgt->sgl->offset;
85 /* return total bytes of sg buffers */
86 static size_t sgtable_len(const struct sg_table *sgt)
88 unsigned int i, total = 0;
89 struct scatterlist *sg;
94 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
97 bytes = sg->length + sg->offset;
99 if (!iopgsz_ok(bytes)) {
100 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
101 __func__, i, bytes, sg->offset);
105 if (i && sg->offset) {
106 pr_err("%s: sg[%d] offset not allowed in internal "
107 "entries\n", __func__, i);
116 #define sgtable_ok(x) (!!sgtable_len(x))
118 static unsigned max_alignment(u32 addr)
121 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
122 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
124 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
128 * calculate the optimal number sg elements from total bytes based on
131 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
133 unsigned nr_entries = 0, ent_sz;
135 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
136 pr_err("%s: wrong size %08x\n", __func__, bytes);
141 ent_sz = max_alignment(da | pa);
142 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
152 /* allocate and initialize sg_table header(a kind of 'superblock') */
153 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
156 unsigned int nr_entries;
158 struct sg_table *sgt;
161 return ERR_PTR(-EINVAL);
163 if (!IS_ALIGNED(bytes, PAGE_SIZE))
164 return ERR_PTR(-EINVAL);
166 if (flags & IOVMF_LINEAR) {
167 nr_entries = sgtable_nents(bytes, da, pa);
169 return ERR_PTR(-EINVAL);
171 nr_entries = bytes / PAGE_SIZE;
173 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
175 return ERR_PTR(-ENOMEM);
177 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
183 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
188 /* free sg_table header(a kind of superblock) */
189 static void sgtable_free(struct sg_table *sgt)
197 pr_debug("%s: sgt:%p\n", __func__, sgt);
200 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
201 static void *vmap_sg(const struct sg_table *sgt)
206 struct scatterlist *sg;
207 struct vm_struct *new;
208 const struct mem_type *mtype;
210 mtype = get_mem_type(MT_DEVICE);
212 return ERR_PTR(-EINVAL);
214 total = sgtable_len(sgt);
216 return ERR_PTR(-EINVAL);
218 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
220 return ERR_PTR(-ENOMEM);
223 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
228 pa = sg_phys(sg) - sg->offset;
229 bytes = sg->length + sg->offset;
231 BUG_ON(bytes != PAGE_SIZE);
233 err = ioremap_page(va, pa, mtype);
240 flush_cache_vmap((unsigned long)new->addr,
241 (unsigned long)(new->addr + total));
245 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
247 return ERR_PTR(-EAGAIN);
250 static inline void vunmap_sg(const void *va)
255 static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
258 struct iovm_struct *tmp;
260 list_for_each_entry(tmp, &obj->mmap, list) {
261 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
264 len = tmp->da_end - tmp->da_start;
266 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
267 __func__, tmp->da_start, da, tmp->da_end, len,
278 * omap_find_iovm_area - find iovma which includes @da
279 * @dev: client device
280 * @da: iommu device virtual address
282 * Find the existing iovma starting at @da
284 struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
286 struct omap_iommu *obj = dev_to_omap_iommu(dev);
287 struct iovm_struct *area;
289 mutex_lock(&obj->mmap_lock);
290 area = __find_iovm_area(obj, da);
291 mutex_unlock(&obj->mmap_lock);
295 EXPORT_SYMBOL_GPL(omap_find_iovm_area);
298 * This finds the hole(area) which fits the requested address and len
299 * in iovmas mmap, and returns the new allocated iovma.
301 static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
302 size_t bytes, u32 flags)
304 struct iovm_struct *new, *tmp;
305 u32 start, prev_end, alignment;
308 return ERR_PTR(-EINVAL);
311 alignment = PAGE_SIZE;
313 if (~flags & IOVMF_DA_FIXED) {
314 /* Don't map address 0 */
315 start = obj->da_start ? obj->da_start : alignment;
317 if (flags & IOVMF_LINEAR)
318 alignment = iopgsz_max(bytes);
319 start = roundup(start, alignment);
320 } else if (start < obj->da_start || start > obj->da_end ||
321 obj->da_end - start < bytes) {
322 return ERR_PTR(-EINVAL);
326 if (list_empty(&obj->mmap))
330 list_for_each_entry(tmp, &obj->mmap, list) {
332 if (prev_end > start)
335 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
338 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
339 start = roundup(tmp->da_end + 1, alignment);
341 prev_end = tmp->da_end;
344 if ((start >= prev_end) && (obj->da_end - start >= bytes))
347 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
348 __func__, da, bytes, flags);
350 return ERR_PTR(-EINVAL);
353 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
355 return ERR_PTR(-ENOMEM);
358 new->da_start = start;
359 new->da_end = start + bytes;
363 * keep ascending order of iovmas
366 list_add_tail(&new->list, &tmp->list);
368 list_add(&new->list, &obj->mmap);
370 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
371 __func__, new->da_start, start, new->da_end, bytes, flags);
376 static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
380 BUG_ON(!obj || !area);
382 bytes = area->da_end - area->da_start;
384 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
385 __func__, area->da_start, area->da_end, bytes, area->flags);
387 list_del(&area->list);
388 kmem_cache_free(iovm_area_cachep, area);
392 * omap_da_to_va - convert (d) to (v)
393 * @dev: client device
394 * @da: iommu device virtual address
395 * @va: mpu virtual address
397 * Returns mpu virtual addr which corresponds to a given device virtual addr
399 void *omap_da_to_va(struct device *dev, u32 da)
401 struct omap_iommu *obj = dev_to_omap_iommu(dev);
403 struct iovm_struct *area;
405 mutex_lock(&obj->mmap_lock);
407 area = __find_iovm_area(obj, da);
409 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
414 mutex_unlock(&obj->mmap_lock);
418 EXPORT_SYMBOL_GPL(omap_da_to_va);
420 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
423 struct scatterlist *sg;
427 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
429 const size_t bytes = PAGE_SIZE;
432 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
434 pg = vmalloc_to_page(va);
436 sg_set_page(sg, pg, bytes, 0);
441 va_end = _va + PAGE_SIZE * i;
444 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
447 * Actually this is not necessary at all, just exists for
448 * consistency of the code readability.
453 /* create 'da' <-> 'pa' mapping from 'sgt' */
454 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
455 const struct sg_table *sgt, u32 flags)
459 struct scatterlist *sg;
460 u32 da = new->da_start;
465 BUG_ON(!sgtable_ok(sgt));
467 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
471 pa = sg_phys(sg) - sg->offset;
472 bytes = sg->length + sg->offset;
474 flags &= ~IOVMF_PGSZ_MASK;
476 if (bytes_to_iopgsz(bytes) < 0)
479 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
482 err = iommu_map(domain, da, pa, bytes, flags);
493 for_each_sg(sgt->sgl, sg, i, j) {
496 bytes = sg->length + sg->offset;
498 /* ignore failures.. we're already handling one */
499 iommu_unmap(domain, da, bytes);
506 /* release 'da' <-> 'pa' mapping */
507 static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
508 struct iovm_struct *area)
511 size_t total = area->da_end - area->da_start;
512 const struct sg_table *sgt = area->sgt;
513 struct scatterlist *sg;
517 BUG_ON(!sgtable_ok(sgt));
518 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
520 start = area->da_start;
521 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
524 bytes = sg->length + sg->offset;
526 unmapped = iommu_unmap(domain, start, bytes);
527 if (unmapped < bytes)
530 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
531 __func__, start, bytes, area->flags);
533 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
541 /* template function for all unmapping */
542 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
543 struct omap_iommu *obj, const u32 da,
544 void (*fn)(const void *), u32 flags)
546 struct sg_table *sgt = NULL;
547 struct iovm_struct *area;
549 if (!IS_ALIGNED(da, PAGE_SIZE)) {
550 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
554 mutex_lock(&obj->mmap_lock);
556 area = __find_iovm_area(obj, da);
558 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
562 if ((area->flags & flags) != flags) {
563 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
567 sgt = (struct sg_table *)area->sgt;
569 unmap_iovm_area(domain, obj, area);
573 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
574 area->da_start, da, area->da_end,
575 area->da_end - area->da_start, area->flags);
577 free_iovm_area(obj, area);
579 mutex_unlock(&obj->mmap_lock);
584 static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
585 u32 da, const struct sg_table *sgt, void *va,
586 size_t bytes, u32 flags)
589 struct iovm_struct *new;
591 mutex_lock(&obj->mmap_lock);
593 new = alloc_iovm_area(obj, da, bytes, flags);
596 goto err_alloc_iovma;
601 if (map_iovm_area(domain, new, sgt, new->flags))
604 mutex_unlock(&obj->mmap_lock);
606 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
607 __func__, new->da_start, bytes, new->flags, va);
609 return new->da_start;
612 free_iovm_area(obj, new);
614 mutex_unlock(&obj->mmap_lock);
619 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
620 u32 da, const struct sg_table *sgt,
621 void *va, size_t bytes, u32 flags)
623 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
627 * omap_iommu_vmap - (d)-(p)-(v) address mapper
628 * @domain: iommu domain
629 * @dev: client device
630 * @sgt: address of scatter gather table
631 * @flags: iovma and page property
633 * Creates 1-n-1 mapping with given @sgt and returns @da.
634 * All @sgt element must be io page size aligned.
636 u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
637 const struct sg_table *sgt, u32 flags)
639 struct omap_iommu *obj = dev_to_omap_iommu(dev);
643 if (!obj || !obj->dev || !sgt)
646 bytes = sgtable_len(sgt);
649 bytes = PAGE_ALIGN(bytes);
651 if (flags & IOVMF_MMIO) {
657 flags |= IOVMF_DISCONT;
660 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
661 if (IS_ERR_VALUE(da))
664 return da + sgtable_offset(sgt);
666 EXPORT_SYMBOL_GPL(omap_iommu_vmap);
669 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
670 * @domain: iommu domain
671 * @dev: client device
672 * @da: iommu device virtual address
674 * Free the iommu virtually contiguous memory area starting at
675 * @da, which was returned by 'omap_iommu_vmap()'.
678 omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
680 struct omap_iommu *obj = dev_to_omap_iommu(dev);
681 struct sg_table *sgt;
683 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
684 * Just returns 'sgt' to the caller to free
687 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
688 IOVMF_DISCONT | IOVMF_MMIO);
690 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
693 EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
696 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
697 * @dev: client device
698 * @da: contiguous iommu virtual memory
699 * @bytes: allocation size
700 * @flags: iovma and page property
702 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
703 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
706 omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
707 size_t bytes, u32 flags)
709 struct omap_iommu *obj = dev_to_omap_iommu(dev);
711 struct sg_table *sgt;
713 if (!obj || !obj->dev || !bytes)
716 bytes = PAGE_ALIGN(bytes);
722 flags |= IOVMF_DISCONT;
723 flags |= IOVMF_ALLOC;
725 sgt = sgtable_alloc(bytes, flags, da, 0);
730 sgtable_fill_vmalloc(sgt, va);
732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
733 if (IS_ERR_VALUE(da))
739 sgtable_drain_vmalloc(sgt);
745 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
748 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
749 * @dev: client device
750 * @da: iommu device virtual address
752 * Frees the iommu virtually continuous memory area starting at
753 * @da, as obtained from 'omap_iommu_vmalloc()'.
755 void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
758 struct omap_iommu *obj = dev_to_omap_iommu(dev);
759 struct sg_table *sgt;
761 sgt = unmap_vm_area(domain, obj, da, vfree,
762 IOVMF_DISCONT | IOVMF_ALLOC);
764 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
767 EXPORT_SYMBOL_GPL(omap_iommu_vfree);
769 static int __init iovmm_init(void)
771 const unsigned long flags = SLAB_HWCACHE_ALIGN;
772 struct kmem_cache *p;
774 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
778 iovm_area_cachep = p;
782 module_init(iovmm_init);
784 static void __exit iovmm_exit(void)
786 kmem_cache_destroy(iovm_area_cachep);
788 module_exit(iovmm_exit);
790 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
791 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
792 MODULE_LICENSE("GPL v2");