2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/iommu.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mach/map.h>
23 #include <plat/iommu.h>
24 #include <plat/iovmm.h>
26 #include <plat/iopgtable.h>
29 * A device driver needs to create address mappings between:
31 * - iommu/device address
33 * - mpu virtual address
35 * There are 4 possible patterns for them:
37 * |iova/ mapping iommu_ page
38 * | da pa va (d)-(p)-(v) function type
39 * ---------------------------------------------------------------------------
40 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
41 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
42 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
43 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
46 * 'iova': device iommu virtual address
47 * 'da': alias of 'iova'
48 * 'pa': physical address
49 * 'va': mpu virtual address
51 * 'c': contiguous memory area
52 * 'd': discontiguous memory area
53 * 'a': anonymous memory allocation
54 * '()': optional feature
56 * 'n': a normal page(4KB) size is used.
57 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
59 * '*': not yet, but feasible.
62 static struct kmem_cache *iovm_area_cachep;
64 /* return total bytes of sg buffers */
65 static size_t sgtable_len(const struct sg_table *sgt)
67 unsigned int i, total = 0;
68 struct scatterlist *sg;
73 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
78 if (!iopgsz_ok(bytes)) {
79 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
89 #define sgtable_ok(x) (!!sgtable_len(x))
91 static unsigned max_alignment(u32 addr)
94 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
95 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
97 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
101 * calculate the optimal number sg elements from total bytes based on
104 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
106 unsigned nr_entries = 0, ent_sz;
108 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
109 pr_err("%s: wrong size %08x\n", __func__, bytes);
114 ent_sz = max_alignment(da | pa);
115 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
125 /* allocate and initialize sg_table header(a kind of 'superblock') */
126 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
129 unsigned int nr_entries;
131 struct sg_table *sgt;
134 return ERR_PTR(-EINVAL);
136 if (!IS_ALIGNED(bytes, PAGE_SIZE))
137 return ERR_PTR(-EINVAL);
139 if (flags & IOVMF_LINEAR) {
140 nr_entries = sgtable_nents(bytes, da, pa);
142 return ERR_PTR(-EINVAL);
144 nr_entries = bytes / PAGE_SIZE;
146 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
148 return ERR_PTR(-ENOMEM);
150 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
156 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
161 /* free sg_table header(a kind of superblock) */
162 static void sgtable_free(struct sg_table *sgt)
170 pr_debug("%s: sgt:%p\n", __func__, sgt);
173 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
174 static void *vmap_sg(const struct sg_table *sgt)
179 struct scatterlist *sg;
180 struct vm_struct *new;
181 const struct mem_type *mtype;
183 mtype = get_mem_type(MT_DEVICE);
185 return ERR_PTR(-EINVAL);
187 total = sgtable_len(sgt);
189 return ERR_PTR(-EINVAL);
191 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
193 return ERR_PTR(-ENOMEM);
196 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
204 BUG_ON(bytes != PAGE_SIZE);
206 err = ioremap_page(va, pa, mtype);
213 flush_cache_vmap((unsigned long)new->addr,
214 (unsigned long)(new->addr + total));
218 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
220 return ERR_PTR(-EAGAIN);
223 static inline void vunmap_sg(const void *va)
228 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
230 struct iovm_struct *tmp;
232 list_for_each_entry(tmp, &obj->mmap, list) {
233 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
236 len = tmp->da_end - tmp->da_start;
238 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
239 __func__, tmp->da_start, da, tmp->da_end, len,
250 * find_iovm_area - find iovma which includes @da
251 * @da: iommu device virtual address
253 * Find the existing iovma starting at @da
255 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
257 struct iovm_struct *area;
259 mutex_lock(&obj->mmap_lock);
260 area = __find_iovm_area(obj, da);
261 mutex_unlock(&obj->mmap_lock);
265 EXPORT_SYMBOL_GPL(find_iovm_area);
268 * This finds the hole(area) which fits the requested address and len
269 * in iovmas mmap, and returns the new allocated iovma.
271 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
272 size_t bytes, u32 flags)
274 struct iovm_struct *new, *tmp;
275 u32 start, prev_end, alignment;
278 return ERR_PTR(-EINVAL);
281 alignment = PAGE_SIZE;
283 if (~flags & IOVMF_DA_FIXED) {
284 /* Don't map address 0 */
285 start = obj->da_start ? obj->da_start : alignment;
287 if (flags & IOVMF_LINEAR)
288 alignment = iopgsz_max(bytes);
289 start = roundup(start, alignment);
290 } else if (start < obj->da_start || start > obj->da_end ||
291 obj->da_end - start < bytes) {
292 return ERR_PTR(-EINVAL);
296 if (list_empty(&obj->mmap))
300 list_for_each_entry(tmp, &obj->mmap, list) {
302 if (prev_end > start)
305 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
308 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
309 start = roundup(tmp->da_end + 1, alignment);
311 prev_end = tmp->da_end;
314 if ((start >= prev_end) && (obj->da_end - start >= bytes))
317 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
318 __func__, da, bytes, flags);
320 return ERR_PTR(-EINVAL);
323 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
325 return ERR_PTR(-ENOMEM);
328 new->da_start = start;
329 new->da_end = start + bytes;
333 * keep ascending order of iovmas
336 list_add_tail(&new->list, &tmp->list);
338 list_add(&new->list, &obj->mmap);
340 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
341 __func__, new->da_start, start, new->da_end, bytes, flags);
346 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
350 BUG_ON(!obj || !area);
352 bytes = area->da_end - area->da_start;
354 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
355 __func__, area->da_start, area->da_end, bytes, area->flags);
357 list_del(&area->list);
358 kmem_cache_free(iovm_area_cachep, area);
362 * da_to_va - convert (d) to (v)
363 * @obj: objective iommu
364 * @da: iommu device virtual address
365 * @va: mpu virtual address
367 * Returns mpu virtual addr which corresponds to a given device virtual addr
369 void *da_to_va(struct iommu *obj, u32 da)
372 struct iovm_struct *area;
374 mutex_lock(&obj->mmap_lock);
376 area = __find_iovm_area(obj, da);
378 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
383 mutex_unlock(&obj->mmap_lock);
387 EXPORT_SYMBOL_GPL(da_to_va);
389 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
392 struct scatterlist *sg;
396 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
398 const size_t bytes = PAGE_SIZE;
401 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
403 pg = vmalloc_to_page(va);
405 sg_set_page(sg, pg, bytes, 0);
410 va_end = _va + PAGE_SIZE * i;
413 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
416 * Actually this is not necessary at all, just exists for
417 * consistency of the code readability.
422 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
426 struct scatterlist *sg;
428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
434 BUG_ON(!iopgsz_ok(bytes));
436 sg_set_buf(sg, phys_to_virt(pa), bytes);
438 * 'pa' is cotinuous(linear).
447 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
450 * Actually this is not necessary at all, just exists for
451 * consistency of the code readability
456 /* create 'da' <-> 'pa' mapping from 'sgt' */
457 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
458 const struct sg_table *sgt, u32 flags)
462 struct scatterlist *sg;
463 u32 da = new->da_start;
469 BUG_ON(!sgtable_ok(sgt));
471 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
478 flags &= ~IOVMF_PGSZ_MASK;
480 if (bytes_to_iopgsz(bytes) < 0)
483 order = get_order(bytes);
485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
488 err = iommu_map(domain, da, pa, order, flags);
499 for_each_sg(sgt->sgl, sg, i, j) {
503 order = get_order(bytes);
505 /* ignore failures.. we're already handling one */
506 iommu_unmap(domain, da, order);
513 /* release 'da' <-> 'pa' mapping */
514 static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
515 struct iovm_struct *area)
518 size_t total = area->da_end - area->da_start;
519 const struct sg_table *sgt = area->sgt;
520 struct scatterlist *sg;
523 BUG_ON(!sgtable_ok(sgt));
524 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
526 start = area->da_start;
527 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
532 order = get_order(bytes);
534 err = iommu_unmap(domain, start, order);
538 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
539 __func__, start, bytes, area->flags);
541 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
549 /* template function for all unmapping */
550 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
551 struct iommu *obj, const u32 da,
552 void (*fn)(const void *), u32 flags)
554 struct sg_table *sgt = NULL;
555 struct iovm_struct *area;
557 if (!IS_ALIGNED(da, PAGE_SIZE)) {
558 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
562 mutex_lock(&obj->mmap_lock);
564 area = __find_iovm_area(obj, da);
566 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
570 if ((area->flags & flags) != flags) {
571 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
575 sgt = (struct sg_table *)area->sgt;
577 unmap_iovm_area(domain, obj, area);
581 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
582 area->da_start, da, area->da_end,
583 area->da_end - area->da_start, area->flags);
585 free_iovm_area(obj, area);
587 mutex_unlock(&obj->mmap_lock);
592 static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
593 u32 da, const struct sg_table *sgt, void *va,
594 size_t bytes, u32 flags)
597 struct iovm_struct *new;
599 mutex_lock(&obj->mmap_lock);
601 new = alloc_iovm_area(obj, da, bytes, flags);
604 goto err_alloc_iovma;
609 if (map_iovm_area(domain, new, sgt, new->flags))
612 mutex_unlock(&obj->mmap_lock);
614 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
615 __func__, new->da_start, bytes, new->flags, va);
617 return new->da_start;
620 free_iovm_area(obj, new);
622 mutex_unlock(&obj->mmap_lock);
626 static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
627 u32 da, const struct sg_table *sgt,
628 void *va, size_t bytes, u32 flags)
630 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
634 * iommu_vmap - (d)-(p)-(v) address mapper
635 * @obj: objective iommu
636 * @sgt: address of scatter gather table
637 * @flags: iovma and page property
639 * Creates 1-n-1 mapping with given @sgt and returns @da.
640 * All @sgt element must be io page size aligned.
642 u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
643 const struct sg_table *sgt, u32 flags)
648 if (!obj || !obj->dev || !sgt)
651 bytes = sgtable_len(sgt);
654 bytes = PAGE_ALIGN(bytes);
656 if (flags & IOVMF_MMIO) {
662 flags |= IOVMF_DISCONT;
665 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
666 if (IS_ERR_VALUE(da))
671 EXPORT_SYMBOL_GPL(iommu_vmap);
674 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
675 * @obj: objective iommu
676 * @da: iommu device virtual address
678 * Free the iommu virtually contiguous memory area starting at
679 * @da, which was returned by 'iommu_vmap()'.
682 iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
684 struct sg_table *sgt;
686 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
687 * Just returns 'sgt' to the caller to free
689 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
690 IOVMF_DISCONT | IOVMF_MMIO);
692 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
695 EXPORT_SYMBOL_GPL(iommu_vunmap);
698 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
699 * @obj: objective iommu
700 * @da: contiguous iommu virtual memory
701 * @bytes: allocation size
702 * @flags: iovma and page property
704 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
705 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
707 u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
708 size_t bytes, u32 flags)
711 struct sg_table *sgt;
713 if (!obj || !obj->dev || !bytes)
716 bytes = PAGE_ALIGN(bytes);
722 flags |= IOVMF_DISCONT;
723 flags |= IOVMF_ALLOC;
725 sgt = sgtable_alloc(bytes, flags, da, 0);
730 sgtable_fill_vmalloc(sgt, va);
732 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
733 if (IS_ERR_VALUE(da))
739 sgtable_drain_vmalloc(sgt);
745 EXPORT_SYMBOL_GPL(iommu_vmalloc);
748 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
749 * @obj: objective iommu
750 * @da: iommu device virtual address
752 * Frees the iommu virtually continuous memory area starting at
753 * @da, as obtained from 'iommu_vmalloc()'.
755 void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
757 struct sg_table *sgt;
759 sgt = unmap_vm_area(domain, obj, da, vfree,
760 IOVMF_DISCONT | IOVMF_ALLOC);
762 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
765 EXPORT_SYMBOL_GPL(iommu_vfree);
767 static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
768 u32 da, u32 pa, void *va, size_t bytes, u32 flags)
770 struct sg_table *sgt;
772 sgt = sgtable_alloc(bytes, flags, da, pa);
776 sgtable_fill_kmalloc(sgt, pa, da, bytes);
778 da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
779 if (IS_ERR_VALUE(da)) {
780 sgtable_drain_kmalloc(sgt);
788 * iommu_kmap - (d)-(p)-(v) address mapper
789 * @obj: objective iommu
790 * @da: contiguous iommu virtual memory
791 * @pa: contiguous physical memory
792 * @flags: iovma and page property
794 * Creates 1-1-1 mapping and returns @da again, which can be
795 * adjusted if 'IOVMF_DA_FIXED' is not set.
797 u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
798 size_t bytes, u32 flags)
802 if (!obj || !obj->dev || !bytes)
805 bytes = PAGE_ALIGN(bytes);
807 va = ioremap(pa, bytes);
811 flags |= IOVMF_LINEAR;
814 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
815 if (IS_ERR_VALUE(da))
820 EXPORT_SYMBOL_GPL(iommu_kmap);
823 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
824 * @obj: objective iommu
825 * @da: iommu device virtual address
827 * Frees the iommu virtually contiguous memory area starting at
828 * @da, which was passed to and was returned by'iommu_kmap()'.
830 void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
832 struct sg_table *sgt;
833 typedef void (*func_t)(const void *);
835 sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
836 IOVMF_LINEAR | IOVMF_MMIO);
838 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
841 EXPORT_SYMBOL_GPL(iommu_kunmap);
844 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
845 * @obj: objective iommu
846 * @da: contiguous iommu virtual memory
847 * @bytes: bytes for allocation
848 * @flags: iovma and page property
850 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
851 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
853 u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
854 size_t bytes, u32 flags)
859 if (!obj || !obj->dev || !bytes)
862 bytes = PAGE_ALIGN(bytes);
864 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
867 pa = virt_to_phys(va);
869 flags |= IOVMF_LINEAR;
870 flags |= IOVMF_ALLOC;
872 da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
873 if (IS_ERR_VALUE(da))
878 EXPORT_SYMBOL_GPL(iommu_kmalloc);
881 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
882 * @obj: objective iommu
883 * @da: iommu device virtual address
885 * Frees the iommu virtually contiguous memory area starting at
886 * @da, which was passed to and was returned by'iommu_kmalloc()'.
888 void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
890 struct sg_table *sgt;
892 sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
894 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
897 EXPORT_SYMBOL_GPL(iommu_kfree);
900 static int __init iovmm_init(void)
902 const unsigned long flags = SLAB_HWCACHE_ALIGN;
903 struct kmem_cache *p;
905 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
909 iovm_area_cachep = p;
913 module_init(iovmm_init);
915 static void __exit iovmm_exit(void)
917 kmem_cache_destroy(iovm_area_cachep);
919 module_exit(iovmm_exit);
921 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
922 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
923 MODULE_LICENSE("GPL v2");