Merge branch 'develop-3.10' of ssh://10.10.10.29/rk/kernel into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iovmm.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/kernel.h>
12 #include <linux/hardirq.h>
13 #include <linux/slab.h>
14 #include <linux/scatterlist.h>
15 #include <linux/err.h>
16
17 #include <linux/of.h>
18 #include <linux/of_platform.h>
19
20 #include "rockchip-iommu.h"
21
22 static struct rk_vm_region *find_region(struct rk_iovmm *vmm, dma_addr_t iova)
23 {
24         struct rk_vm_region *region;
25
26         list_for_each_entry(region, &vmm->regions_list, node)
27                 if (region->start == iova)
28                         return region;
29
30         return NULL;
31 }
32
33 void rockchip_iovmm_set_fault_handler(struct device *dev,
34                                        rockchip_iommu_fault_handler_t handler)
35 {
36         unsigned long flags;
37         struct iommu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
38         
39         write_lock_irqsave(&data->lock, flags);
40         data->fault_handler = handler;
41         write_unlock_irqrestore(&data->lock, flags);
42 }
43
44 int rockchip_iovmm_activate(struct device *dev)
45 {
46         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
47
48         return iommu_attach_device(vmm->domain, dev);
49 }
50
51 void rockchip_iovmm_deactivate(struct device *dev)
52 {
53         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
54
55         iommu_detach_device(vmm->domain, dev);
56 }
57
58 dma_addr_t rockchip_iovmm_map(struct device *dev,
59         struct scatterlist *sg, off_t offset, size_t size)
60 {
61         off_t start_off;
62         dma_addr_t addr, start = 0;
63         size_t mapped_size = 0;
64         struct rk_vm_region *region;
65         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
66         int order;
67         int ret;
68         
69         for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
70                 offset -= sg_dma_len(sg);
71
72         start_off = offset_in_page(sg_phys(sg) + offset);
73         size = PAGE_ALIGN(size + start_off);
74
75         order = __fls(min_t(size_t, size, SZ_1M));
76
77         region = kmalloc(sizeof(*region), GFP_KERNEL);
78         if (!region) {
79                 ret = -ENOMEM;
80                 goto err_map_nomem;
81         }
82
83         start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
84         if (!start) {
85                 ret = -ENOMEM;
86                 goto err_map_noiomem;
87         }
88
89         addr = start;
90         do {
91                 phys_addr_t phys;
92                 size_t len;
93
94                 phys = sg_phys(sg);
95                 len = sg_dma_len(sg);
96
97                 /* if back to back sg entries are contiguous consolidate them */
98                 while (sg_next(sg) && sg_phys(sg) +
99                        sg_dma_len(sg) == sg_phys(sg_next(sg))) {
100                         len += sg_dma_len(sg_next(sg));
101                         sg = sg_next(sg);
102                 }
103
104                 if (offset > 0) {
105                         len -= offset;
106                         phys += offset;
107                         offset = 0;
108                 }
109
110                 if (offset_in_page(phys)) {
111                         len += offset_in_page(phys);
112                         phys = round_down(phys, PAGE_SIZE);
113                 }
114
115                 len = PAGE_ALIGN(len);
116
117                 if (len > (size - mapped_size))
118                         len = size - mapped_size;
119
120                 ret = iommu_map(vmm->domain, addr, phys, len, 0);
121                 if (ret)
122                         break;
123
124                 addr += len;
125                 mapped_size += len;
126         } while ((sg = sg_next(sg)) && (mapped_size < size));
127
128         BUG_ON(mapped_size > size);
129
130         if (mapped_size < size)
131                 goto err_map_map;
132
133         region->start = start + start_off;
134         region->size = size;
135
136         INIT_LIST_HEAD(&region->node);
137
138         spin_lock(&vmm->lock);
139
140         list_add(&region->node, &vmm->regions_list);
141
142         spin_unlock(&vmm->lock);
143
144         rockchip_iommu_tlb_invalidate(dev);
145         /*
146         pr_err("IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
147         region->start, region->size);
148         */
149         return region->start;
150
151 err_map_map:
152         iommu_unmap(vmm->domain, start, mapped_size);
153         gen_pool_free(vmm->vmm_pool, start, size);
154 err_map_noiomem:
155         kfree(region);
156 err_map_nomem:
157         pr_err("IOVMM: Failed to allocated VM region for %#x bytes.\n", size);
158         return (dma_addr_t)ret;
159 }
160
161 void rockchip_iovmm_unmap(struct device *dev, dma_addr_t iova)
162 {
163         struct rk_vm_region *region;
164         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
165         size_t unmapped_size;
166
167         /* This function must not be called in IRQ handlers */
168         BUG_ON(in_irq());
169
170         spin_lock(&vmm->lock);
171
172         region = find_region(vmm, iova);
173         if (WARN_ON(!region)) {
174                 spin_unlock(&vmm->lock);
175                 return;
176         }
177
178         list_del(&region->node);
179
180         spin_unlock(&vmm->lock);
181
182         region->start = round_down(region->start, PAGE_SIZE);
183
184         unmapped_size = iommu_unmap(vmm->domain,
185                                     region->start, region->size);
186         /*
187         rockchip_iommu_tlb_invalidate(dev);
188         */
189         gen_pool_free(vmm->vmm_pool, region->start, region->size);
190
191         WARN_ON(unmapped_size != region->size);
192         /*
193         pr_err("IOVMM: Unmapped %#x bytes from %#x.\n",
194                 unmapped_size, region->start);
195         */
196         kfree(region);
197 }
198
199 int rockchip_iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
200 {
201         struct rk_vm_region *region;
202         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
203         int ret;
204
205         if (WARN_ON((phys + size) >= IOVA_START)) {
206                 pr_err("Unable to create one to one mapping for %#x @ %#x\n",
207                        size, phys);
208                 return -EINVAL;
209         }
210
211         region = kmalloc(sizeof(*region), GFP_KERNEL);
212         if (!region)
213                 return -ENOMEM;
214
215         if (WARN_ON(phys & ~PAGE_MASK))
216                 phys = round_down(phys, PAGE_SIZE);
217
218
219         ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
220         if (ret < 0) {
221                 kfree(region);
222                 return ret;
223         }
224
225         region->start = (dma_addr_t)phys;
226         region->size = size;
227         INIT_LIST_HEAD(&region->node);
228
229         spin_lock(&vmm->lock);
230
231         list_add(&region->node, &vmm->regions_list);
232
233         spin_unlock(&vmm->lock);
234
235         rockchip_iommu_tlb_invalidate(dev);
236
237         return 0;
238 }
239
240 void rockchip_iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
241 {
242         struct rk_vm_region *region;
243         struct rk_iovmm *vmm = rockchip_get_iovmm(dev);
244         size_t unmapped_size;
245
246         /* This function must not be called in IRQ handlers */
247         BUG_ON(in_irq());
248
249         if (WARN_ON(phys & ~PAGE_MASK))
250                 phys = round_down(phys, PAGE_SIZE);
251
252         spin_lock(&vmm->lock);
253
254         region = find_region(vmm, (dma_addr_t)phys);
255         if (WARN_ON(!region)) {
256                 spin_unlock(&vmm->lock);
257                 return;
258         }
259
260         list_del(&region->node);
261
262         spin_unlock(&vmm->lock);
263
264         unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);
265         rockchip_iommu_tlb_invalidate(dev);
266         WARN_ON(unmapped_size != region->size);
267         pr_err("IOVMM: Unmapped %#x bytes from %#x.\n",
268                unmapped_size, region->start);
269
270         kfree(region);
271 }
272
273 int rockchip_init_iovmm(struct device *iommu, struct rk_iovmm *vmm)
274 {
275         int ret = 0;
276
277         vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
278         if (!vmm->vmm_pool) {
279                 ret = -ENOMEM;
280                 goto err_setup_genalloc;
281         }
282
283         /* (1GB - 4KB) addr space from 0x10000000 */
284         ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
285         if (ret)
286                 goto err_setup_domain;
287
288         vmm->domain = iommu_domain_alloc(&platform_bus_type);
289         if (!vmm->domain) {
290                 ret = -ENOMEM;
291                 goto err_setup_domain;
292         }
293
294         spin_lock_init(&vmm->lock);
295
296         INIT_LIST_HEAD(&vmm->regions_list);
297
298         pr_info("IOVMM: Created %#x B IOVMM from %#x.\n",
299                 IOVM_SIZE, IOVA_START);
300         return 0;
301 err_setup_domain:
302         gen_pool_destroy(vmm->vmm_pool);
303 err_setup_genalloc:
304         pr_err("IOVMM: Failed to create IOVMM (%d)\n", ret);
305
306         return ret;
307 }