fb:support 180 degree rotate
[firefly-linux-kernel-4.4.55.git] / drivers / media / video / videobuf-dma-contig.c
1 /*
2  * helper functions for physically contiguous capture buffers
3  *
4  * The functions support hardware lacking scatter gather support
5  * (i.e. the buffers must be linear in physical memory)
6  *
7  * Copyright (c) 2008 Magnus Damm
8  *
9  * Based on videobuf-vmalloc.c,
10  * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2
15  */
16
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <media/videobuf-dma-contig.h>
24
25 struct videobuf_dma_contig_memory {
26         u32 magic;
27         void *vaddr;
28         dma_addr_t dma_handle;
29         unsigned long size;
30         int is_userptr;
31 };
32
33 #define MAGIC_DC_MEM 0x0733ac61
34 #define MAGIC_CHECK(is, should)                                             \
35         if (unlikely((is) != (should))) {                                   \
36                 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37                 BUG();                                                      \
38         }
39
40 static void
41 videobuf_vm_open(struct vm_area_struct *vma)
42 {
43         struct videobuf_mapping *map = vma->vm_private_data;
44
45         dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
46                 map, map->count, vma->vm_start, vma->vm_end);
47
48         map->count++;
49 }
50
51 static void videobuf_vm_close(struct vm_area_struct *vma)
52 {
53         struct videobuf_mapping *map = vma->vm_private_data;
54         struct videobuf_queue *q = map->q;
55         int i;
56
57         dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
58                 map, map->count, vma->vm_start, vma->vm_end);
59
60         map->count--;
61         if (0 == map->count) {
62                 struct videobuf_dma_contig_memory *mem;
63
64                 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
65                 mutex_lock(&q->vb_lock);
66
67                 /* We need first to cancel streams, before unmapping */
68                 if (q->streaming)
69                         videobuf_queue_cancel(q);
70
71                 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
72                         if (NULL == q->bufs[i])
73                                 continue;
74
75                         if (q->bufs[i]->map != map)
76                                 continue;
77
78                         mem = q->bufs[i]->priv;
79                         if (mem) {
80                                 /* This callback is called only if kernel has
81                                    allocated memory and this memory is mmapped.
82                                    In this case, memory should be freed,
83                                    in order to do memory unmap.
84                                  */
85
86                                 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
87
88                                 /* vfree is not atomic - can't be
89                                    called with IRQ's disabled
90                                  */
91                                 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
92                                         i, mem->vaddr);
93
94                                 dma_free_coherent(q->dev, mem->size,
95                                                   mem->vaddr, mem->dma_handle);
96                                 mem->vaddr = NULL;
97                         }
98
99                         q->bufs[i]->map   = NULL;
100                         q->bufs[i]->baddr = 0;
101                 }
102
103                 kfree(map);
104
105                 mutex_unlock(&q->vb_lock);
106         }
107 }
108
109 static const struct vm_operations_struct videobuf_vm_ops = {
110         .open     = videobuf_vm_open,
111         .close    = videobuf_vm_close,
112 };
113
114 /**
115  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
116  * @mem: per-buffer private videobuf-dma-contig data
117  *
118  * This function resets the user space pointer
119  */
120 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
121 {
122         mem->is_userptr = 0;
123         mem->dma_handle = 0;
124         mem->size = 0;
125 }
126
127 /**
128  * videobuf_dma_contig_user_get() - setup user space memory pointer
129  * @mem: per-buffer private videobuf-dma-contig data
130  * @vb: video buffer to map
131  *
132  * This function validates and sets up a pointer to user space memory.
133  * Only physically contiguous pfn-mapped memory is accepted.
134  *
135  * Returns 0 if successful.
136  */
137 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
138                                         struct videobuf_buffer *vb)
139 {
140         struct mm_struct *mm = current->mm;
141         struct vm_area_struct *vma;
142         unsigned long prev_pfn, this_pfn;
143         unsigned long pages_done, user_address;
144         int ret;
145
146         mem->size = PAGE_ALIGN(vb->size);
147         mem->is_userptr = 0;
148         ret = -EINVAL;
149
150         down_read(&mm->mmap_sem);
151
152         vma = find_vma(mm, vb->baddr);
153         if (!vma)
154                 goto out_up;
155
156         if ((vb->baddr + mem->size) > vma->vm_end)
157                 goto out_up;
158
159         pages_done = 0;
160         prev_pfn = 0; /* kill warning */
161         user_address = vb->baddr;
162
163         while (pages_done < (mem->size >> PAGE_SHIFT)) {
164                 ret = follow_pfn(vma, user_address, &this_pfn);
165                 if (ret)
166                         break;
167
168                 if (pages_done == 0)
169                         mem->dma_handle = this_pfn << PAGE_SHIFT;
170                 else if (this_pfn != (prev_pfn + 1))
171                         ret = -EFAULT;
172
173                 if (ret)
174                         break;
175
176                 prev_pfn = this_pfn;
177                 user_address += PAGE_SIZE;
178                 pages_done++;
179         }
180
181         if (!ret)
182                 mem->is_userptr = 1;
183
184  out_up:
185         up_read(&current->mm->mmap_sem);
186
187         return ret;
188 }
189
190 static void *__videobuf_alloc(size_t size)
191 {
192         struct videobuf_dma_contig_memory *mem;
193         struct videobuf_buffer *vb;
194
195         vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
196         if (vb) {
197                 mem = vb->priv = ((char *)vb) + size;
198                 mem->magic = MAGIC_DC_MEM;
199         }
200
201         return vb;
202 }
203
204 static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
205 {
206         struct videobuf_dma_contig_memory *mem = buf->priv;
207
208         BUG_ON(!mem);
209         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
210
211         return mem->vaddr;
212 }
213
214 static int __videobuf_iolock(struct videobuf_queue *q,
215                              struct videobuf_buffer *vb,
216                              struct v4l2_framebuffer *fbuf)
217 {
218         struct videobuf_dma_contig_memory *mem = vb->priv;
219
220         BUG_ON(!mem);
221         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
222
223         switch (vb->memory) {
224         case V4L2_MEMORY_MMAP:
225                 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
226
227                 /* All handling should be done by __videobuf_mmap_mapper() */
228                 if (!mem->vaddr) {
229                         dev_err(q->dev, "memory is not alloced/mmapped.\n");
230                         return -EINVAL;
231                 }
232                 break;
233         case V4L2_MEMORY_USERPTR:
234                 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
235
236                 /* handle pointer from user space */
237                 if (vb->baddr)
238                         return videobuf_dma_contig_user_get(mem, vb);
239
240                 /* allocate memory for the read() method */
241                 mem->size = PAGE_ALIGN(vb->size);
242                 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
243                                                 &mem->dma_handle, GFP_KERNEL);
244                 if (!mem->vaddr) {
245                         dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
246                                          mem->size);
247                         return -ENOMEM;
248                 }
249
250                 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
251                         mem->vaddr, mem->size);
252                 break;
253         case V4L2_MEMORY_OVERLAY:
254                 break; /* ddl@rock-chips.com : nzy modify V4L2_MEMORY_OVERLAY   */
255         default:
256                 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
257                         __func__);
258                 return -EINVAL;
259         }
260
261         return 0;
262 }
263
264 static int __videobuf_mmap_free(struct videobuf_queue *q)
265 {
266         unsigned int i;
267
268         dev_dbg(q->dev, "%s\n", __func__);
269         for (i = 0; i < VIDEO_MAX_FRAME; i++) {
270                 if (q->bufs[i] && q->bufs[i]->map)
271                         return -EBUSY;
272         }
273
274         return 0;
275 }
276
277 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
278                                   struct vm_area_struct *vma)
279 {
280         struct videobuf_dma_contig_memory *mem;
281         struct videobuf_mapping *map;
282         unsigned int first;
283         int retval;
284         unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
285
286         dev_dbg(q->dev, "%s\n", __func__);
287         if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
288                 return -EINVAL;
289
290         /* look for first buffer to map */
291         for (first = 0; first < VIDEO_MAX_FRAME; first++) {
292                 if (!q->bufs[first])
293                         continue;
294
295                 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
296                         continue;
297                 if (q->bufs[first]->boff == offset)
298                         break;
299         }
300         if (VIDEO_MAX_FRAME == first) {
301                 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
302                         offset);
303                 return -EINVAL;
304         }
305
306         /* create mapping + update buffer list */
307         map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
308         if (!map)
309                 return -ENOMEM;
310
311         q->bufs[first]->map = map;
312         map->start = vma->vm_start;
313         map->end = vma->vm_end;
314         map->q = q;
315
316         q->bufs[first]->baddr = vma->vm_start;
317
318         mem = q->bufs[first]->priv;
319         BUG_ON(!mem);
320         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
321
322         mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
323         mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
324                                         &mem->dma_handle, GFP_KERNEL);
325         if (!mem->vaddr) {
326                 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
327                         mem->size);
328                 goto error;
329         }
330         dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
331                 mem->vaddr, mem->size);
332
333         /* Try to remap memory */
334
335         size = vma->vm_end - vma->vm_start;
336         size = (size < mem->size) ? size : mem->size;
337
338         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
339         retval = remap_pfn_range(vma, vma->vm_start,
340                                  mem->dma_handle >> PAGE_SHIFT,
341                                  size, vma->vm_page_prot);
342         if (retval) {
343                 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
344                 dma_free_coherent(q->dev, mem->size,
345                                   mem->vaddr, mem->dma_handle);
346                 goto error;
347         }
348
349         vma->vm_ops          = &videobuf_vm_ops;
350         vma->vm_flags       |= VM_DONTEXPAND;
351         vma->vm_private_data = map;
352
353         dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
354                 map, q, vma->vm_start, vma->vm_end,
355                 (long int) q->bufs[first]->bsize,
356                 vma->vm_pgoff, first);
357
358         videobuf_vm_open(vma);
359
360         return 0;
361
362 error:
363         kfree(map);
364         return -ENOMEM;
365 }
366
367 static int __videobuf_copy_to_user(struct videobuf_queue *q,
368                                    char __user *data, size_t count,
369                                    int nonblocking)
370 {
371         struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
372         void *vaddr;
373
374         BUG_ON(!mem);
375         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
376         BUG_ON(!mem->vaddr);
377
378         /* copy to userspace */
379         if (count > q->read_buf->size - q->read_off)
380                 count = q->read_buf->size - q->read_off;
381
382         vaddr = mem->vaddr;
383
384         if (copy_to_user(data, vaddr + q->read_off, count))
385                 return -EFAULT;
386
387         return count;
388 }
389
390 static int __videobuf_copy_stream(struct videobuf_queue *q,
391                                   char __user *data, size_t count, size_t pos,
392                                   int vbihack, int nonblocking)
393 {
394         unsigned int  *fc;
395         struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
396
397         BUG_ON(!mem);
398         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
399
400         if (vbihack) {
401                 /* dirty, undocumented hack -- pass the frame counter
402                         * within the last four bytes of each vbi data block.
403                         * We need that one to maintain backward compatibility
404                         * to all vbi decoding software out there ... */
405                 fc = (unsigned int *)mem->vaddr;
406                 fc += (q->read_buf->size >> 2) - 1;
407                 *fc = q->read_buf->field_count >> 1;
408                 dev_dbg(q->dev, "vbihack: %d\n", *fc);
409         }
410
411         /* copy stuff using the common method */
412         count = __videobuf_copy_to_user(q, data, count, nonblocking);
413
414         if ((count == -EFAULT) && (pos == 0))
415                 return -EFAULT;
416
417         return count;
418 }
419
420 static struct videobuf_qtype_ops qops = {
421         .magic        = MAGIC_QTYPE_OPS,
422
423         .alloc        = __videobuf_alloc,
424         .iolock       = __videobuf_iolock,
425         .mmap_free    = __videobuf_mmap_free,
426         .mmap_mapper  = __videobuf_mmap_mapper,
427         .video_copy_to_user = __videobuf_copy_to_user,
428         .copy_stream  = __videobuf_copy_stream,
429         .vmalloc      = __videobuf_to_vmalloc,
430 };
431
432 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
433                                     struct videobuf_queue_ops *ops,
434                                     struct device *dev,
435                                     spinlock_t *irqlock,
436                                     enum v4l2_buf_type type,
437                                     enum v4l2_field field,
438                                     unsigned int msize,
439                                     void *priv)
440 {
441         videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
442                                  priv, &qops);
443 }
444 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
445
446 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
447 {
448         struct videobuf_dma_contig_memory *mem = buf->priv;
449
450         BUG_ON(!mem);
451         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
452
453         return mem->dma_handle;
454 }
455 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
456
457 void videobuf_dma_contig_free(struct videobuf_queue *q,
458                               struct videobuf_buffer *buf)
459 {
460         struct videobuf_dma_contig_memory *mem = buf->priv;
461
462         /* mmapped memory can't be freed here, otherwise mmapped region
463            would be released, while still needed. In this case, the memory
464            release should happen inside videobuf_vm_close().
465            So, it should free memory only if the memory were allocated for
466            read() operation.
467          */
468         if (buf->memory != V4L2_MEMORY_USERPTR)
469                 return;
470
471         if (!mem)
472                 return;
473
474         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
475
476         /* handle user space pointer case */
477         if (buf->baddr) {
478                 videobuf_dma_contig_user_put(mem);
479                 return;
480         }
481
482         /* read() method */
483         dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
484         mem->vaddr = NULL;
485 }
486 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
487
488 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
489 MODULE_AUTHOR("Magnus Damm");
490 MODULE_LICENSE("GPL");