2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/iommu.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mach/map.h>
23 #include <plat/iommu.h>
24 #include <plat/iovmm.h>
26 #include <plat/iopgtable.h>
28 static struct kmem_cache *iovm_area_cachep;
30 /* return total bytes of sg buffers */
31 static size_t sgtable_len(const struct sg_table *sgt)
33 unsigned int i, total = 0;
34 struct scatterlist *sg;
39 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
44 if (!iopgsz_ok(bytes)) {
45 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
55 #define sgtable_ok(x) (!!sgtable_len(x))
57 static unsigned max_alignment(u32 addr)
60 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
61 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
63 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
67 * calculate the optimal number sg elements from total bytes based on
70 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
72 unsigned nr_entries = 0, ent_sz;
74 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
75 pr_err("%s: wrong size %08x\n", __func__, bytes);
80 ent_sz = max_alignment(da | pa);
81 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
91 /* allocate and initialize sg_table header(a kind of 'superblock') */
92 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
95 unsigned int nr_entries;
100 return ERR_PTR(-EINVAL);
102 if (!IS_ALIGNED(bytes, PAGE_SIZE))
103 return ERR_PTR(-EINVAL);
105 if (flags & IOVMF_LINEAR) {
106 nr_entries = sgtable_nents(bytes, da, pa);
108 return ERR_PTR(-EINVAL);
110 nr_entries = bytes / PAGE_SIZE;
112 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
114 return ERR_PTR(-ENOMEM);
116 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
122 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
127 /* free sg_table header(a kind of superblock) */
128 static void sgtable_free(struct sg_table *sgt)
136 pr_debug("%s: sgt:%p\n", __func__, sgt);
139 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
140 static void *vmap_sg(const struct sg_table *sgt)
145 struct scatterlist *sg;
146 struct vm_struct *new;
147 const struct mem_type *mtype;
149 mtype = get_mem_type(MT_DEVICE);
151 return ERR_PTR(-EINVAL);
153 total = sgtable_len(sgt);
155 return ERR_PTR(-EINVAL);
157 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
159 return ERR_PTR(-ENOMEM);
162 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
170 BUG_ON(bytes != PAGE_SIZE);
172 err = ioremap_page(va, pa, mtype);
179 flush_cache_vmap((unsigned long)new->addr,
180 (unsigned long)(new->addr + total));
184 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
186 return ERR_PTR(-EAGAIN);
189 static inline void vunmap_sg(const void *va)
194 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
196 struct iovm_struct *tmp;
198 list_for_each_entry(tmp, &obj->mmap, list) {
199 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
202 len = tmp->da_end - tmp->da_start;
204 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
205 __func__, tmp->da_start, da, tmp->da_end, len,
216 * find_iovm_area - find iovma which includes @da
217 * @da: iommu device virtual address
219 * Find the existing iovma starting at @da
221 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
223 struct iovm_struct *area;
225 mutex_lock(&obj->mmap_lock);
226 area = __find_iovm_area(obj, da);
227 mutex_unlock(&obj->mmap_lock);
231 EXPORT_SYMBOL_GPL(find_iovm_area);
234 * This finds the hole(area) which fits the requested address and len
235 * in iovmas mmap, and returns the new allocated iovma.
237 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
238 size_t bytes, u32 flags)
240 struct iovm_struct *new, *tmp;
241 u32 start, prev_end, alignment;
244 return ERR_PTR(-EINVAL);
247 alignment = PAGE_SIZE;
249 if (~flags & IOVMF_DA_FIXED) {
250 /* Don't map address 0 */
251 start = obj->da_start ? obj->da_start : alignment;
253 if (flags & IOVMF_LINEAR)
254 alignment = iopgsz_max(bytes);
255 start = roundup(start, alignment);
256 } else if (start < obj->da_start || start > obj->da_end ||
257 obj->da_end - start < bytes) {
258 return ERR_PTR(-EINVAL);
262 if (list_empty(&obj->mmap))
266 list_for_each_entry(tmp, &obj->mmap, list) {
268 if (prev_end > start)
271 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
274 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
275 start = roundup(tmp->da_end + 1, alignment);
277 prev_end = tmp->da_end;
280 if ((start >= prev_end) && (obj->da_end - start >= bytes))
283 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
284 __func__, da, bytes, flags);
286 return ERR_PTR(-EINVAL);
289 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
291 return ERR_PTR(-ENOMEM);
294 new->da_start = start;
295 new->da_end = start + bytes;
299 * keep ascending order of iovmas
302 list_add_tail(&new->list, &tmp->list);
304 list_add(&new->list, &obj->mmap);
306 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
307 __func__, new->da_start, start, new->da_end, bytes, flags);
312 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
316 BUG_ON(!obj || !area);
318 bytes = area->da_end - area->da_start;
320 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
321 __func__, area->da_start, area->da_end, bytes, area->flags);
323 list_del(&area->list);
324 kmem_cache_free(iovm_area_cachep, area);
328 * da_to_va - convert (d) to (v)
329 * @obj: objective iommu
330 * @da: iommu device virtual address
331 * @va: mpu virtual address
333 * Returns mpu virtual addr which corresponds to a given device virtual addr
335 void *da_to_va(struct iommu *obj, u32 da)
338 struct iovm_struct *area;
340 mutex_lock(&obj->mmap_lock);
342 area = __find_iovm_area(obj, da);
344 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
349 mutex_unlock(&obj->mmap_lock);
353 EXPORT_SYMBOL_GPL(da_to_va);
355 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
358 struct scatterlist *sg;
362 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
364 const size_t bytes = PAGE_SIZE;
367 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
369 pg = vmalloc_to_page(va);
371 sg_set_page(sg, pg, bytes, 0);
376 va_end = _va + PAGE_SIZE * i;
379 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
382 * Actually this is not necessary at all, just exists for
383 * consistency of the code readability.
388 /* create 'da' <-> 'pa' mapping from 'sgt' */
389 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
390 const struct sg_table *sgt, u32 flags)
394 struct scatterlist *sg;
395 u32 da = new->da_start;
401 BUG_ON(!sgtable_ok(sgt));
403 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
410 flags &= ~IOVMF_PGSZ_MASK;
412 if (bytes_to_iopgsz(bytes) < 0)
415 order = get_order(bytes);
417 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
420 err = iommu_map(domain, da, pa, order, flags);
431 for_each_sg(sgt->sgl, sg, i, j) {
435 order = get_order(bytes);
437 /* ignore failures.. we're already handling one */
438 iommu_unmap(domain, da, order);
445 /* release 'da' <-> 'pa' mapping */
446 static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
447 struct iovm_struct *area)
450 size_t total = area->da_end - area->da_start;
451 const struct sg_table *sgt = area->sgt;
452 struct scatterlist *sg;
455 BUG_ON(!sgtable_ok(sgt));
456 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
458 start = area->da_start;
459 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
464 order = get_order(bytes);
466 err = iommu_unmap(domain, start, order);
470 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
471 __func__, start, bytes, area->flags);
473 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
481 /* template function for all unmapping */
482 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
483 struct iommu *obj, const u32 da,
484 void (*fn)(const void *), u32 flags)
486 struct sg_table *sgt = NULL;
487 struct iovm_struct *area;
489 if (!IS_ALIGNED(da, PAGE_SIZE)) {
490 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
494 mutex_lock(&obj->mmap_lock);
496 area = __find_iovm_area(obj, da);
498 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
502 if ((area->flags & flags) != flags) {
503 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
507 sgt = (struct sg_table *)area->sgt;
509 unmap_iovm_area(domain, obj, area);
513 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
514 area->da_start, da, area->da_end,
515 area->da_end - area->da_start, area->flags);
517 free_iovm_area(obj, area);
519 mutex_unlock(&obj->mmap_lock);
524 static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
525 u32 da, const struct sg_table *sgt, void *va,
526 size_t bytes, u32 flags)
529 struct iovm_struct *new;
531 mutex_lock(&obj->mmap_lock);
533 new = alloc_iovm_area(obj, da, bytes, flags);
536 goto err_alloc_iovma;
541 if (map_iovm_area(domain, new, sgt, new->flags))
544 mutex_unlock(&obj->mmap_lock);
546 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
547 __func__, new->da_start, bytes, new->flags, va);
549 return new->da_start;
552 free_iovm_area(obj, new);
554 mutex_unlock(&obj->mmap_lock);
558 static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
559 u32 da, const struct sg_table *sgt,
560 void *va, size_t bytes, u32 flags)
562 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
566 * iommu_vmap - (d)-(p)-(v) address mapper
567 * @obj: objective iommu
568 * @sgt: address of scatter gather table
569 * @flags: iovma and page property
571 * Creates 1-n-1 mapping with given @sgt and returns @da.
572 * All @sgt element must be io page size aligned.
574 u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
575 const struct sg_table *sgt, u32 flags)
580 if (!obj || !obj->dev || !sgt)
583 bytes = sgtable_len(sgt);
586 bytes = PAGE_ALIGN(bytes);
588 if (flags & IOVMF_MMIO) {
594 flags |= IOVMF_DISCONT;
597 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
598 if (IS_ERR_VALUE(da))
603 EXPORT_SYMBOL_GPL(iommu_vmap);
606 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
607 * @obj: objective iommu
608 * @da: iommu device virtual address
610 * Free the iommu virtually contiguous memory area starting at
611 * @da, which was returned by 'iommu_vmap()'.
614 iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
616 struct sg_table *sgt;
618 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
619 * Just returns 'sgt' to the caller to free
621 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
622 IOVMF_DISCONT | IOVMF_MMIO);
624 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
627 EXPORT_SYMBOL_GPL(iommu_vunmap);
630 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
631 * @obj: objective iommu
632 * @da: contiguous iommu virtual memory
633 * @bytes: allocation size
634 * @flags: iovma and page property
636 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
637 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
639 u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
640 size_t bytes, u32 flags)
643 struct sg_table *sgt;
645 if (!obj || !obj->dev || !bytes)
648 bytes = PAGE_ALIGN(bytes);
654 flags |= IOVMF_DISCONT;
655 flags |= IOVMF_ALLOC;
657 sgt = sgtable_alloc(bytes, flags, da, 0);
662 sgtable_fill_vmalloc(sgt, va);
664 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
665 if (IS_ERR_VALUE(da))
671 sgtable_drain_vmalloc(sgt);
677 EXPORT_SYMBOL_GPL(iommu_vmalloc);
680 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
681 * @obj: objective iommu
682 * @da: iommu device virtual address
684 * Frees the iommu virtually continuous memory area starting at
685 * @da, as obtained from 'iommu_vmalloc()'.
687 void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
689 struct sg_table *sgt;
691 sgt = unmap_vm_area(domain, obj, da, vfree,
692 IOVMF_DISCONT | IOVMF_ALLOC);
694 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
697 EXPORT_SYMBOL_GPL(iommu_vfree);
699 static int __init iovmm_init(void)
701 const unsigned long flags = SLAB_HWCACHE_ALIGN;
702 struct kmem_cache *p;
704 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
708 iovm_area_cachep = p;
712 module_init(iovmm_init);
714 static void __exit iovmm_exit(void)
716 kmem_cache_destroy(iovm_area_cachep);
718 module_exit(iovmm_exit);
720 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
721 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
722 MODULE_LICENSE("GPL v2");