Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / drivers / media / v4l2-core / videobuf2-vmalloc.c
1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-vmalloc.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_vmalloc_buf {
25         void                            *vaddr;
26         struct page                     **pages;
27         struct vm_area_struct           *vma;
28         enum dma_data_direction         dma_dir;
29         unsigned long                   size;
30         unsigned int                    n_pages;
31         atomic_t                        refcount;
32         struct vb2_vmarea_handler       handler;
33         struct dma_buf                  *dbuf;
34 };
35
36 static void vb2_vmalloc_put(void *buf_priv);
37
38 static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
39                                enum dma_data_direction dma_dir, gfp_t gfp_flags)
40 {
41         struct vb2_vmalloc_buf *buf;
42
43         buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44         if (!buf)
45                 return NULL;
46
47         buf->size = size;
48         buf->vaddr = vmalloc_user(buf->size);
49         buf->dma_dir = dma_dir;
50         buf->handler.refcount = &buf->refcount;
51         buf->handler.put = vb2_vmalloc_put;
52         buf->handler.arg = buf;
53
54         if (!buf->vaddr) {
55                 pr_debug("vmalloc of size %ld failed\n", buf->size);
56                 kfree(buf);
57                 return NULL;
58         }
59
60         atomic_inc(&buf->refcount);
61         return buf;
62 }
63
64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66         struct vb2_vmalloc_buf *buf = buf_priv;
67
68         if (atomic_dec_and_test(&buf->refcount)) {
69                 vfree(buf->vaddr);
70                 kfree(buf);
71         }
72 }
73
74 static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
75                                      unsigned long size,
76                                      enum dma_data_direction dma_dir)
77 {
78         struct vb2_vmalloc_buf *buf;
79         unsigned long first, last;
80         int n_pages, offset;
81         struct vm_area_struct *vma;
82         dma_addr_t physp;
83
84         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85         if (!buf)
86                 return NULL;
87
88         buf->dma_dir = dma_dir;
89         offset = vaddr & ~PAGE_MASK;
90         buf->size = size;
91
92
93         vma = find_vma(current->mm, vaddr);
94         if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
95                 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
96                         goto fail_pages_array_alloc;
97                 buf->vma = vma;
98                 buf->vaddr = ioremap_nocache(physp, size);
99                 if (!buf->vaddr)
100                         goto fail_pages_array_alloc;
101         } else {
102                 first = vaddr >> PAGE_SHIFT;
103                 last  = (vaddr + size - 1) >> PAGE_SHIFT;
104                 buf->n_pages = last - first + 1;
105                 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
106                                      GFP_KERNEL);
107                 if (!buf->pages)
108                         goto fail_pages_array_alloc;
109
110                 /* current->mm->mmap_sem is taken by videobuf2 core */
111                 n_pages = get_user_pages(current, current->mm,
112                                          vaddr & PAGE_MASK, buf->n_pages,
113                                          dma_dir == DMA_FROM_DEVICE,
114                                          1, /* force */
115                                          buf->pages, NULL);
116                 if (n_pages != buf->n_pages)
117                         goto fail_get_user_pages;
118
119                 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
120                                         PAGE_KERNEL);
121                 if (!buf->vaddr)
122                         goto fail_get_user_pages;
123         }
124
125         buf->vaddr += offset;
126         return buf;
127
128 fail_get_user_pages:
129         pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
130                  buf->n_pages);
131         while (--n_pages >= 0)
132                 put_page(buf->pages[n_pages]);
133         kfree(buf->pages);
134
135 fail_pages_array_alloc:
136         kfree(buf);
137
138         return NULL;
139 }
140
141 static void vb2_vmalloc_put_userptr(void *buf_priv)
142 {
143         struct vb2_vmalloc_buf *buf = buf_priv;
144         unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
145         unsigned int i;
146
147         if (buf->pages) {
148                 if (vaddr)
149                         vm_unmap_ram((void *)vaddr, buf->n_pages);
150                 for (i = 0; i < buf->n_pages; ++i) {
151                         if (buf->dma_dir == DMA_FROM_DEVICE)
152                                 set_page_dirty_lock(buf->pages[i]);
153                         put_page(buf->pages[i]);
154                 }
155                 kfree(buf->pages);
156         } else {
157                 vb2_put_vma(buf->vma);
158                 iounmap(buf->vaddr);
159         }
160         kfree(buf);
161 }
162
163 static void *vb2_vmalloc_vaddr(void *buf_priv)
164 {
165         struct vb2_vmalloc_buf *buf = buf_priv;
166
167         if (!buf->vaddr) {
168                 pr_err("Address of an unallocated plane requested "
169                        "or cannot map user pointer\n");
170                 return NULL;
171         }
172
173         return buf->vaddr;
174 }
175
176 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
177 {
178         struct vb2_vmalloc_buf *buf = buf_priv;
179         return atomic_read(&buf->refcount);
180 }
181
182 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
183 {
184         struct vb2_vmalloc_buf *buf = buf_priv;
185         int ret;
186
187         if (!buf) {
188                 pr_err("No memory to map\n");
189                 return -EINVAL;
190         }
191
192         ret = remap_vmalloc_range(vma, buf->vaddr, 0);
193         if (ret) {
194                 pr_err("Remapping vmalloc memory, error: %d\n", ret);
195                 return ret;
196         }
197
198         /*
199          * Make sure that vm_areas for 2 buffers won't be merged together
200          */
201         vma->vm_flags           |= VM_DONTEXPAND;
202
203         /*
204          * Use common vm_area operations to track buffer refcount.
205          */
206         vma->vm_private_data    = &buf->handler;
207         vma->vm_ops             = &vb2_common_vm_ops;
208
209         vma->vm_ops->open(vma);
210
211         return 0;
212 }
213
214 /*********************************************/
215 /*         DMABUF ops for exporters          */
216 /*********************************************/
217
218 struct vb2_vmalloc_attachment {
219         struct sg_table sgt;
220         enum dma_data_direction dma_dir;
221 };
222
223 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
224         struct dma_buf_attachment *dbuf_attach)
225 {
226         struct vb2_vmalloc_attachment *attach;
227         struct vb2_vmalloc_buf *buf = dbuf->priv;
228         int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
229         struct sg_table *sgt;
230         struct scatterlist *sg;
231         void *vaddr = buf->vaddr;
232         int ret;
233         int i;
234
235         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236         if (!attach)
237                 return -ENOMEM;
238
239         sgt = &attach->sgt;
240         ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
241         if (ret) {
242                 kfree(attach);
243                 return ret;
244         }
245         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
246                 struct page *page = vmalloc_to_page(vaddr);
247
248                 if (!page) {
249                         sg_free_table(sgt);
250                         kfree(attach);
251                         return -ENOMEM;
252                 }
253                 sg_set_page(sg, page, PAGE_SIZE, 0);
254                 vaddr += PAGE_SIZE;
255         }
256
257         attach->dma_dir = DMA_NONE;
258         dbuf_attach->priv = attach;
259         return 0;
260 }
261
262 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
263         struct dma_buf_attachment *db_attach)
264 {
265         struct vb2_vmalloc_attachment *attach = db_attach->priv;
266         struct sg_table *sgt;
267
268         if (!attach)
269                 return;
270
271         sgt = &attach->sgt;
272
273         /* release the scatterlist cache */
274         if (attach->dma_dir != DMA_NONE)
275                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
276                         attach->dma_dir);
277         sg_free_table(sgt);
278         kfree(attach);
279         db_attach->priv = NULL;
280 }
281
282 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
283         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
284 {
285         struct vb2_vmalloc_attachment *attach = db_attach->priv;
286         /* stealing dmabuf mutex to serialize map/unmap operations */
287         struct mutex *lock = &db_attach->dmabuf->lock;
288         struct sg_table *sgt;
289         int ret;
290
291         mutex_lock(lock);
292
293         sgt = &attach->sgt;
294         /* return previously mapped sg table */
295         if (attach->dma_dir == dma_dir) {
296                 mutex_unlock(lock);
297                 return sgt;
298         }
299
300         /* release any previous cache */
301         if (attach->dma_dir != DMA_NONE) {
302                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303                         attach->dma_dir);
304                 attach->dma_dir = DMA_NONE;
305         }
306
307         /* mapping to the client with new direction */
308         ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
309         if (ret <= 0) {
310                 pr_err("failed to map scatterlist\n");
311                 mutex_unlock(lock);
312                 return ERR_PTR(-EIO);
313         }
314
315         attach->dma_dir = dma_dir;
316
317         mutex_unlock(lock);
318
319         return sgt;
320 }
321
322 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
323         struct sg_table *sgt, enum dma_data_direction dma_dir)
324 {
325         /* nothing to be done here */
326 }
327
328 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
329 {
330         /* drop reference obtained in vb2_vmalloc_get_dmabuf */
331         vb2_vmalloc_put(dbuf->priv);
332 }
333
334 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
335 {
336         struct vb2_vmalloc_buf *buf = dbuf->priv;
337
338         return buf->vaddr + pgnum * PAGE_SIZE;
339 }
340
341 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
342 {
343         struct vb2_vmalloc_buf *buf = dbuf->priv;
344
345         return buf->vaddr;
346 }
347
348 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
349         struct vm_area_struct *vma)
350 {
351         return vb2_vmalloc_mmap(dbuf->priv, vma);
352 }
353
354 static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
355         .attach = vb2_vmalloc_dmabuf_ops_attach,
356         .detach = vb2_vmalloc_dmabuf_ops_detach,
357         .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
358         .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
359         .kmap = vb2_vmalloc_dmabuf_ops_kmap,
360         .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
361         .vmap = vb2_vmalloc_dmabuf_ops_vmap,
362         .mmap = vb2_vmalloc_dmabuf_ops_mmap,
363         .release = vb2_vmalloc_dmabuf_ops_release,
364 };
365
366 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
367 {
368         struct vb2_vmalloc_buf *buf = buf_priv;
369         struct dma_buf *dbuf;
370
371         if (WARN_ON(!buf->vaddr))
372                 return NULL;
373
374         dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
375         if (IS_ERR(dbuf))
376                 return NULL;
377
378         /* dmabuf keeps reference to vb2 buffer */
379         atomic_inc(&buf->refcount);
380
381         return dbuf;
382 }
383
384 /*********************************************/
385 /*       callbacks for DMABUF buffers        */
386 /*********************************************/
387
388 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
389 {
390         struct vb2_vmalloc_buf *buf = mem_priv;
391
392         buf->vaddr = dma_buf_vmap(buf->dbuf);
393
394         return buf->vaddr ? 0 : -EFAULT;
395 }
396
397 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
398 {
399         struct vb2_vmalloc_buf *buf = mem_priv;
400
401         dma_buf_vunmap(buf->dbuf, buf->vaddr);
402         buf->vaddr = NULL;
403 }
404
405 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
406 {
407         struct vb2_vmalloc_buf *buf = mem_priv;
408
409         if (buf->vaddr)
410                 dma_buf_vunmap(buf->dbuf, buf->vaddr);
411
412         kfree(buf);
413 }
414
415 static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
416         unsigned long size, enum dma_data_direction dma_dir)
417 {
418         struct vb2_vmalloc_buf *buf;
419
420         if (dbuf->size < size)
421                 return ERR_PTR(-EFAULT);
422
423         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
424         if (!buf)
425                 return ERR_PTR(-ENOMEM);
426
427         buf->dbuf = dbuf;
428         buf->dma_dir = dma_dir;
429         buf->size = size;
430
431         return buf;
432 }
433
434
435 const struct vb2_mem_ops vb2_vmalloc_memops = {
436         .alloc          = vb2_vmalloc_alloc,
437         .put            = vb2_vmalloc_put,
438         .get_userptr    = vb2_vmalloc_get_userptr,
439         .put_userptr    = vb2_vmalloc_put_userptr,
440         .get_dmabuf     = vb2_vmalloc_get_dmabuf,
441         .map_dmabuf     = vb2_vmalloc_map_dmabuf,
442         .unmap_dmabuf   = vb2_vmalloc_unmap_dmabuf,
443         .attach_dmabuf  = vb2_vmalloc_attach_dmabuf,
444         .detach_dmabuf  = vb2_vmalloc_detach_dmabuf,
445         .vaddr          = vb2_vmalloc_vaddr,
446         .mmap           = vb2_vmalloc_mmap,
447         .num_users      = vb2_vmalloc_num_users,
448 };
449 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
450
451 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
452 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
453 MODULE_LICENSE("GPL");