Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / comedi / comedi_buf.c
1 /*
2  * comedi_buf.c
3  *
4  * COMEDI - Linux Control and Measurement Device Interface
5  * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/vmalloc.h>
19 #include <linux/slab.h>
20
21 #include "comedidev.h"
22 #include "comedi_internal.h"
23
24 #ifdef PAGE_KERNEL_NOCACHE
25 #define COMEDI_PAGE_PROTECTION          PAGE_KERNEL_NOCACHE
26 #else
27 #define COMEDI_PAGE_PROTECTION          PAGE_KERNEL
28 #endif
29
30 static void comedi_buf_map_kref_release(struct kref *kref)
31 {
32         struct comedi_buf_map *bm =
33                 container_of(kref, struct comedi_buf_map, refcount);
34         struct comedi_buf_page *buf;
35         unsigned int i;
36
37         if (bm->page_list) {
38                 for (i = 0; i < bm->n_pages; i++) {
39                         buf = &bm->page_list[i];
40                         clear_bit(PG_reserved,
41                                   &(virt_to_page(buf->virt_addr)->flags));
42                         if (bm->dma_dir != DMA_NONE) {
43 #ifdef CONFIG_HAS_DMA
44                                 dma_free_coherent(bm->dma_hw_dev,
45                                                   PAGE_SIZE,
46                                                   buf->virt_addr,
47                                                   buf->dma_addr);
48 #endif
49                         } else {
50                                 free_page((unsigned long)buf->virt_addr);
51                         }
52                 }
53                 vfree(bm->page_list);
54         }
55         if (bm->dma_dir != DMA_NONE)
56                 put_device(bm->dma_hw_dev);
57         kfree(bm);
58 }
59
60 static void __comedi_buf_free(struct comedi_device *dev,
61                               struct comedi_subdevice *s)
62 {
63         struct comedi_async *async = s->async;
64         struct comedi_buf_map *bm;
65         unsigned long flags;
66
67         if (async->prealloc_buf) {
68                 vunmap(async->prealloc_buf);
69                 async->prealloc_buf = NULL;
70                 async->prealloc_bufsz = 0;
71         }
72
73         spin_lock_irqsave(&s->spin_lock, flags);
74         bm = async->buf_map;
75         async->buf_map = NULL;
76         spin_unlock_irqrestore(&s->spin_lock, flags);
77         comedi_buf_map_put(bm);
78 }
79
80 static void __comedi_buf_alloc(struct comedi_device *dev,
81                                struct comedi_subdevice *s,
82                                unsigned n_pages)
83 {
84         struct comedi_async *async = s->async;
85         struct page **pages = NULL;
86         struct comedi_buf_map *bm;
87         struct comedi_buf_page *buf;
88         unsigned long flags;
89         unsigned i;
90
91         if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
92                 dev_err(dev->class_dev,
93                         "dma buffer allocation not supported\n");
94                 return;
95         }
96
97         bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
98         if (!bm)
99                 return;
100
101         kref_init(&bm->refcount);
102         spin_lock_irqsave(&s->spin_lock, flags);
103         async->buf_map = bm;
104         spin_unlock_irqrestore(&s->spin_lock, flags);
105         bm->dma_dir = s->async_dma_dir;
106         if (bm->dma_dir != DMA_NONE)
107                 /* Need ref to hardware device to free buffer later. */
108                 bm->dma_hw_dev = get_device(dev->hw_dev);
109
110         bm->page_list = vzalloc(sizeof(*buf) * n_pages);
111         if (bm->page_list)
112                 pages = vmalloc(sizeof(struct page *) * n_pages);
113
114         if (!pages)
115                 return;
116
117         for (i = 0; i < n_pages; i++) {
118                 buf = &bm->page_list[i];
119                 if (bm->dma_dir != DMA_NONE)
120 #ifdef CONFIG_HAS_DMA
121                         buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
122                                                             PAGE_SIZE,
123                                                             &buf->dma_addr,
124                                                             GFP_KERNEL |
125                                                             __GFP_COMP);
126 #else
127                         break;
128 #endif
129                 else
130                         buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
131                 if (!buf->virt_addr)
132                         break;
133
134                 set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
135
136                 pages[i] = virt_to_page(buf->virt_addr);
137         }
138         spin_lock_irqsave(&s->spin_lock, flags);
139         bm->n_pages = i;
140         spin_unlock_irqrestore(&s->spin_lock, flags);
141
142         /* vmap the prealloc_buf if all the pages were allocated */
143         if (i == n_pages)
144                 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
145                                            COMEDI_PAGE_PROTECTION);
146
147         vfree(pages);
148 }
149
150 void comedi_buf_map_get(struct comedi_buf_map *bm)
151 {
152         if (bm)
153                 kref_get(&bm->refcount);
154 }
155
156 int comedi_buf_map_put(struct comedi_buf_map *bm)
157 {
158         if (bm)
159                 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
160         return 1;
161 }
162
163 /* returns s->async->buf_map and increments its kref refcount */
164 struct comedi_buf_map *
165 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
166 {
167         struct comedi_async *async = s->async;
168         struct comedi_buf_map *bm = NULL;
169         unsigned long flags;
170
171         if (!async)
172                 return NULL;
173
174         spin_lock_irqsave(&s->spin_lock, flags);
175         bm = async->buf_map;
176         /* only want it if buffer pages allocated */
177         if (bm && bm->n_pages)
178                 comedi_buf_map_get(bm);
179         else
180                 bm = NULL;
181         spin_unlock_irqrestore(&s->spin_lock, flags);
182
183         return bm;
184 }
185
186 bool comedi_buf_is_mmapped(struct comedi_async *async)
187 {
188         struct comedi_buf_map *bm = async->buf_map;
189
190         return bm && (atomic_read(&bm->refcount.refcount) > 1);
191 }
192
193 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
194                      unsigned long new_size)
195 {
196         struct comedi_async *async = s->async;
197
198         /* Round up new_size to multiple of PAGE_SIZE */
199         new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
200
201         /* if no change is required, do nothing */
202         if (async->prealloc_buf && async->prealloc_bufsz == new_size)
203                 return 0;
204
205         /* deallocate old buffer */
206         __comedi_buf_free(dev, s);
207
208         /* allocate new buffer */
209         if (new_size) {
210                 unsigned n_pages = new_size >> PAGE_SHIFT;
211
212                 __comedi_buf_alloc(dev, s, n_pages);
213
214                 if (!async->prealloc_buf) {
215                         /* allocation failed */
216                         __comedi_buf_free(dev, s);
217                         return -ENOMEM;
218                 }
219         }
220         async->prealloc_bufsz = new_size;
221
222         return 0;
223 }
224
225 void comedi_buf_reset(struct comedi_async *async)
226 {
227         async->buf_write_alloc_count = 0;
228         async->buf_write_count = 0;
229         async->buf_read_alloc_count = 0;
230         async->buf_read_count = 0;
231
232         async->buf_write_ptr = 0;
233         async->buf_read_ptr = 0;
234
235         async->cur_chan = 0;
236         async->scan_progress = 0;
237         async->munge_chan = 0;
238         async->munge_count = 0;
239         async->munge_ptr = 0;
240
241         async->events = 0;
242 }
243
244 static unsigned int comedi_buf_write_n_available(struct comedi_async *async)
245 {
246         unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
247
248         return free_end - async->buf_write_alloc_count;
249 }
250
251 static unsigned int __comedi_buf_write_alloc(struct comedi_async *async,
252                                              unsigned int nbytes,
253                                              int strict)
254 {
255         unsigned int available = comedi_buf_write_n_available(async);
256
257         if (nbytes > available)
258                 nbytes = strict ? 0 : available;
259
260         async->buf_write_alloc_count += nbytes;
261
262         /*
263          * ensure the async buffer 'counts' are read and updated
264          * before we write data to the write-alloc'ed buffer space
265          */
266         smp_mb();
267
268         return nbytes;
269 }
270
271 /* allocates chunk for the writer from free buffer space */
272 unsigned int comedi_buf_write_alloc(struct comedi_async *async,
273                                     unsigned int nbytes)
274 {
275         return __comedi_buf_write_alloc(async, nbytes, 0);
276 }
277 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
278
279 /*
280  * munging is applied to data by core as it passes between user
281  * and kernel space
282  */
283 static unsigned int comedi_buf_munge(struct comedi_async *async,
284                                      unsigned int num_bytes)
285 {
286         struct comedi_subdevice *s = async->subdevice;
287         unsigned int count = 0;
288         const unsigned num_sample_bytes = bytes_per_sample(s);
289
290         if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
291                 async->munge_count += num_bytes;
292                 count = num_bytes;
293         } else {
294                 /* don't munge partial samples */
295                 num_bytes -= num_bytes % num_sample_bytes;
296                 while (count < num_bytes) {
297                         int block_size = num_bytes - count;
298                         unsigned int buf_end;
299
300                         buf_end = async->prealloc_bufsz - async->munge_ptr;
301                         if (block_size > buf_end)
302                                 block_size = buf_end;
303
304                         s->munge(s->device, s,
305                                  async->prealloc_buf + async->munge_ptr,
306                                  block_size, async->munge_chan);
307
308                         /*
309                          * ensure data is munged in buffer before the
310                          * async buffer munge_count is incremented
311                          */
312                         smp_wmb();
313
314                         async->munge_chan += block_size / num_sample_bytes;
315                         async->munge_chan %= async->cmd.chanlist_len;
316                         async->munge_count += block_size;
317                         async->munge_ptr += block_size;
318                         async->munge_ptr %= async->prealloc_bufsz;
319                         count += block_size;
320                 }
321         }
322
323         return count;
324 }
325
326 unsigned int comedi_buf_write_n_allocated(struct comedi_async *async)
327 {
328         return async->buf_write_alloc_count - async->buf_write_count;
329 }
330
331 /* transfers a chunk from writer to filled buffer space */
332 unsigned int comedi_buf_write_free(struct comedi_async *async,
333                                    unsigned int nbytes)
334 {
335         unsigned int allocated = comedi_buf_write_n_allocated(async);
336
337         if (nbytes > allocated)
338                 nbytes = allocated;
339
340         async->buf_write_count += nbytes;
341         async->buf_write_ptr += nbytes;
342         comedi_buf_munge(async, async->buf_write_count - async->munge_count);
343         if (async->buf_write_ptr >= async->prealloc_bufsz)
344                 async->buf_write_ptr %= async->prealloc_bufsz;
345
346         return nbytes;
347 }
348 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
349
350 unsigned int comedi_buf_read_n_available(struct comedi_async *async)
351 {
352         unsigned num_bytes;
353
354         if (!async)
355                 return 0;
356
357         num_bytes = async->munge_count - async->buf_read_count;
358
359         /*
360          * ensure the async buffer 'counts' are read before we
361          * attempt to read data from the buffer
362          */
363         smp_rmb();
364
365         return num_bytes;
366 }
367 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
368
369 /* allocates a chunk for the reader from filled (and munged) buffer space */
370 unsigned int comedi_buf_read_alloc(struct comedi_async *async,
371                                    unsigned int nbytes)
372 {
373         unsigned int available;
374
375         available = async->munge_count - async->buf_read_alloc_count;
376         if (nbytes > available)
377                 nbytes = available;
378
379         async->buf_read_alloc_count += nbytes;
380
381         /*
382          * ensure the async buffer 'counts' are read before we
383          * attempt to read data from the read-alloc'ed buffer space
384          */
385         smp_rmb();
386
387         return nbytes;
388 }
389 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
390
391 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
392 {
393         return async->buf_read_alloc_count - async->buf_read_count;
394 }
395
396 /* transfers control of a chunk from reader to free buffer space */
397 unsigned int comedi_buf_read_free(struct comedi_async *async,
398                                   unsigned int nbytes)
399 {
400         unsigned int allocated;
401
402         /*
403          * ensure data has been read out of buffer before
404          * the async read count is incremented
405          */
406         smp_mb();
407
408         allocated = comedi_buf_read_n_allocated(async);
409         if (nbytes > allocated)
410                 nbytes = allocated;
411
412         async->buf_read_count += nbytes;
413         async->buf_read_ptr += nbytes;
414         async->buf_read_ptr %= async->prealloc_bufsz;
415         return nbytes;
416 }
417 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
418
419 int comedi_buf_put(struct comedi_async *async, unsigned short x)
420 {
421         unsigned int n = __comedi_buf_write_alloc(async, sizeof(short), 1);
422
423         if (n < sizeof(short)) {
424                 async->events |= COMEDI_CB_ERROR;
425                 return 0;
426         }
427         *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
428         comedi_buf_write_free(async, sizeof(short));
429         return 1;
430 }
431 EXPORT_SYMBOL_GPL(comedi_buf_put);
432
433 int comedi_buf_get(struct comedi_async *async, unsigned short *x)
434 {
435         unsigned int n = comedi_buf_read_n_available(async);
436
437         if (n < sizeof(short))
438                 return 0;
439         comedi_buf_read_alloc(async, sizeof(short));
440         *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
441         comedi_buf_read_free(async, sizeof(short));
442         return 1;
443 }
444 EXPORT_SYMBOL_GPL(comedi_buf_get);
445
446 void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
447                           const void *data, unsigned int num_bytes)
448 {
449         unsigned int write_ptr = async->buf_write_ptr + offset;
450
451         if (write_ptr >= async->prealloc_bufsz)
452                 write_ptr %= async->prealloc_bufsz;
453
454         while (num_bytes) {
455                 unsigned int block_size;
456
457                 if (write_ptr + num_bytes > async->prealloc_bufsz)
458                         block_size = async->prealloc_bufsz - write_ptr;
459                 else
460                         block_size = num_bytes;
461
462                 memcpy(async->prealloc_buf + write_ptr, data, block_size);
463
464                 data += block_size;
465                 num_bytes -= block_size;
466
467                 write_ptr = 0;
468         }
469 }
470 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_to);
471
472 void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
473                             void *dest, unsigned int nbytes)
474 {
475         void *src;
476         unsigned int read_ptr = async->buf_read_ptr + offset;
477
478         if (read_ptr >= async->prealloc_bufsz)
479                 read_ptr %= async->prealloc_bufsz;
480
481         while (nbytes) {
482                 unsigned int block_size;
483
484                 src = async->prealloc_buf + read_ptr;
485
486                 if (nbytes >= async->prealloc_bufsz - read_ptr)
487                         block_size = async->prealloc_bufsz - read_ptr;
488                 else
489                         block_size = nbytes;
490
491                 memcpy(dest, src, block_size);
492                 nbytes -= block_size;
493                 dest += block_size;
494                 read_ptr = 0;
495         }
496 }
497 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_from);