2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
11 #include <linux/kernel.h>
12 #include <linux/hardirq.h>
13 #include <linux/slab.h>
14 #include <linux/scatterlist.h>
15 #include <linux/err.h>
18 #include <linux/of_platform.h>
20 #include "rockchip-iommu.h"
22 static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
24 struct rk_vm_region *region;
26 list_for_each_entry(region, &vmm->regions_list, node)
27 if (region->start == iova)
33 int iovmm_activate(struct device *dev)
35 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
37 return iommu_attach_device(vmm->domain, dev);
40 void iovmm_deactivate(struct device *dev)
42 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
44 iommu_detach_device(vmm->domain, dev);
47 dma_addr_t iovmm_map(struct device *dev,struct scatterlist *sg, off_t offset,size_t size)
50 dma_addr_t addr, start = 0;
51 size_t mapped_size = 0;
52 struct rk_vm_region *region;
53 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
57 for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
58 offset -= sg_dma_len(sg);
60 start_off = offset_in_page(sg_phys(sg) + offset);
61 size = PAGE_ALIGN(size + start_off);
63 order = __fls(min_t(size_t, size, SZ_1M));
65 region = kmalloc(sizeof(*region), GFP_KERNEL);
72 //start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, size, order);
74 start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
89 /* if back to back sg entries are contiguous consolidate them */
90 while (sg_next(sg) &&sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg)))
92 len += sg_dma_len(sg_next(sg));
103 if (offset_in_page(phys))
105 len += offset_in_page(phys);
106 phys = round_down(phys, PAGE_SIZE);
109 len = PAGE_ALIGN(len);
111 if (len > (size - mapped_size))
112 len = size - mapped_size;
114 ret = iommu_map(vmm->domain, addr, phys, len, 0);
120 } while ((sg = sg_next(sg)) && (mapped_size < size));
122 BUG_ON(mapped_size > size);
124 if (mapped_size < size)
127 region->start = start + start_off;
130 INIT_LIST_HEAD(®ion->node);
132 spin_lock(&vmm->lock);
134 list_add(®ion->node, &vmm->regions_list);
136 spin_unlock(&vmm->lock);
138 dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",region->start, region->size);
140 return region->start;
143 iommu_unmap(vmm->domain, start, mapped_size);
144 gen_pool_free(vmm->vmm_pool, start, size);
148 dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",size);
149 return (dma_addr_t)ret;
152 void iovmm_unmap(struct device *dev, dma_addr_t iova)
154 struct rk_vm_region *region;
155 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
156 size_t unmapped_size;
158 /* This function must not be called in IRQ handlers */
161 spin_lock(&vmm->lock);
163 region = find_region(vmm, iova);
164 if (WARN_ON(!region))
166 spin_unlock(&vmm->lock);
170 list_del(®ion->node);
172 spin_unlock(&vmm->lock);
174 region->start = round_down(region->start, PAGE_SIZE);
176 unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
178 rockchip_sysmmu_tlb_invalidate(dev);
180 gen_pool_free(vmm->vmm_pool, region->start, region->size);
182 WARN_ON(unmapped_size != region->size);
183 dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
188 int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
190 struct rk_vm_region *region;
191 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
194 if (WARN_ON((phys + size) >= IOVA_START))
196 dev_err(dev,"Unable to create one to one mapping for %#x @ %#x\n",size, phys);
200 region = kmalloc(sizeof(*region), GFP_KERNEL);
204 if (WARN_ON(phys & ~PAGE_MASK))
205 phys = round_down(phys, PAGE_SIZE);
208 ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
215 region->start = (dma_addr_t)phys;
217 INIT_LIST_HEAD(®ion->node);
219 spin_lock(&vmm->lock);
221 list_add(®ion->node, &vmm->regions_list);
223 spin_unlock(&vmm->lock);
228 void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
230 struct rk_vm_region *region;
231 struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
232 size_t unmapped_size;
234 /* This function must not be called in IRQ handlers */
237 if (WARN_ON(phys & ~PAGE_MASK))
238 phys = round_down(phys, PAGE_SIZE);
240 spin_lock(&vmm->lock);
242 region = find_region(vmm, (dma_addr_t)phys);
243 if (WARN_ON(!region))
245 spin_unlock(&vmm->lock);
249 list_del(®ion->node);
251 spin_unlock(&vmm->lock);
253 unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
254 rockchip_sysmmu_tlb_invalidate(dev);
255 WARN_ON(unmapped_size != region->size);
256 dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",unmapped_size, region->start);
261 int rockchip_init_iovmm(struct device *sysmmu, struct rk_iovmm *vmm)
265 vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
269 goto err_setup_genalloc;
272 /* (1GB - 4KB) addr space from 0x10000000 */
273 ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
275 goto err_setup_domain;
277 vmm->domain = iommu_domain_alloc(&platform_bus_type);
281 goto err_setup_domain;
284 spin_lock_init(&vmm->lock);
286 INIT_LIST_HEAD(&vmm->regions_list);
288 pr_info("IOVMM: Created %#x B IOVMM from %#x.\n",IOVM_SIZE, IOVA_START);
289 dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",IOVM_SIZE, IOVA_START);
292 gen_pool_destroy(vmm->vmm_pool);
294 dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);
300 1,success : pointer to the device inside of platform device
303 struct device *rockchip_get_sysmmu_device_by_compatible(const char *compt)
305 struct device_node *dn = NULL;
306 struct platform_device *pd = NULL;
307 struct device *ret = NULL ;
310 dn = of_find_node_by_name(NULL,name);
313 dn = of_find_compatible_node(NULL,NULL,compt);
316 printk("can't find device node %s \r\n",compt);
320 pd = of_find_device_by_node(dn);
323 printk("can't find platform device in device node %s \r\n",compt);