staging: most: add fair buffer distribution
[firefly-linux-kernel-4.4.55.git] / drivers / staging / most / aim-cdev / cdev.c
1 /*
2  * cdev.c - Application interfacing module for character devices
3  *
4  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * This file is licensed under GPLv2.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/kfifo.h>
22 #include <linux/uaccess.h>
23 #include <linux/idr.h>
24 #include "mostcore.h"
25
26 static dev_t aim_devno;
27 static struct class *aim_class;
28 static struct ida minor_id;
29 static unsigned int major;
30 static struct most_aim cdev_aim;
31
32 struct aim_channel {
33         wait_queue_head_t wq;
34         struct cdev cdev;
35         struct device *dev;
36         struct mutex io_mutex;
37         struct most_interface *iface;
38         struct most_channel_config *cfg;
39         unsigned int channel_id;
40         dev_t devno;
41         bool keep_mbo;
42         unsigned int mbo_offs;
43         struct mbo *stacked_mbo;
44         DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
45         atomic_t access_ref;
46         struct list_head list;
47 };
48 #define to_channel(d) container_of(d, struct aim_channel, cdev)
49 static struct list_head channel_list;
50 static spinlock_t ch_list_lock;
51
52 static struct aim_channel *get_channel(struct most_interface *iface, int id)
53 {
54         struct aim_channel *channel, *tmp;
55         unsigned long flags;
56         int found_channel = 0;
57
58         spin_lock_irqsave(&ch_list_lock, flags);
59         list_for_each_entry_safe(channel, tmp, &channel_list, list) {
60                 if ((channel->iface == iface) && (channel->channel_id == id)) {
61                         found_channel = 1;
62                         break;
63                 }
64         }
65         spin_unlock_irqrestore(&ch_list_lock, flags);
66         if (!found_channel)
67                 return NULL;
68         return channel;
69 }
70
71 /**
72  * aim_open - implements the syscall to open the device
73  * @inode: inode pointer
74  * @filp: file pointer
75  *
76  * This stores the channel pointer in the private data field of
77  * the file structure and activates the channel within the core.
78  */
79 static int aim_open(struct inode *inode, struct file *filp)
80 {
81         struct aim_channel *channel;
82         int ret;
83
84         channel = to_channel(inode->i_cdev);
85         filp->private_data = channel;
86
87         if (((channel->cfg->direction == MOST_CH_RX) &&
88              ((filp->f_flags & O_ACCMODE) != O_RDONLY))
89             || ((channel->cfg->direction == MOST_CH_TX) &&
90                 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
91                 pr_info("WARN: Access flags mismatch\n");
92                 return -EACCES;
93         }
94         if (!atomic_inc_and_test(&channel->access_ref)) {
95                 pr_info("WARN: Device is busy\n");
96                 atomic_dec(&channel->access_ref);
97                 return -EBUSY;
98         }
99
100         ret = most_start_channel(channel->iface, channel->channel_id, &cdev_aim);
101         if (ret)
102                 atomic_dec(&channel->access_ref);
103         return ret;
104 }
105
106 /**
107  * aim_close - implements the syscall to close the device
108  * @inode: inode pointer
109  * @filp: file pointer
110  *
111  * This stops the channel within the core.
112  */
113 static int aim_close(struct inode *inode, struct file *filp)
114 {
115         int ret;
116         struct mbo *mbo;
117         struct aim_channel *channel = to_channel(inode->i_cdev);
118
119         mutex_lock(&channel->io_mutex);
120         if (!channel->dev) {
121                 mutex_unlock(&channel->io_mutex);
122                 atomic_dec(&channel->access_ref);
123                 device_destroy(aim_class, channel->devno);
124                 cdev_del(&channel->cdev);
125                 kfifo_free(&channel->fifo);
126                 list_del(&channel->list);
127                 ida_simple_remove(&minor_id, MINOR(channel->devno));
128                 wake_up_interruptible(&channel->wq);
129                 kfree(channel);
130                 return 0;
131         }
132         mutex_unlock(&channel->io_mutex);
133
134         while (0 != kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
135                 most_put_mbo(mbo);
136         if (channel->keep_mbo)
137                 most_put_mbo(channel->stacked_mbo);
138         ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
139         atomic_dec(&channel->access_ref);
140         wake_up_interruptible(&channel->wq);
141         return ret;
142 }
143
144 /**
145  * aim_write - implements the syscall to write to the device
146  * @filp: file pointer
147  * @buf: pointer to user buffer
148  * @count: number of bytes to write
149  * @offset: offset from where to start writing
150  */
151 static ssize_t aim_write(struct file *filp, const char __user *buf,
152                          size_t count, loff_t *offset)
153 {
154         int ret, err;
155         size_t actual_len = 0;
156         size_t max_len = 0;
157         ssize_t retval;
158         struct mbo *mbo;
159         struct aim_channel *channel = filp->private_data;
160
161         mutex_lock(&channel->io_mutex);
162         if (unlikely(!channel->dev)) {
163                 mutex_unlock(&channel->io_mutex);
164                 return -EPIPE;
165         }
166         mutex_unlock(&channel->io_mutex);
167
168         mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
169
170         if (!mbo) {
171                 if ((filp->f_flags & O_NONBLOCK))
172                         return -EAGAIN;
173                 if (wait_event_interruptible(
174                             channel->wq,
175                             (mbo = most_get_mbo(channel->iface,
176                                                 channel->channel_id,
177                                                 &cdev_aim)) ||
178                             (!channel->dev)))
179                         return -ERESTARTSYS;
180         }
181
182         mutex_lock(&channel->io_mutex);
183         if (unlikely(!channel->dev)) {
184                 mutex_unlock(&channel->io_mutex);
185                 err = -EPIPE;
186                 goto error;
187         }
188         mutex_unlock(&channel->io_mutex);
189
190         max_len = channel->cfg->buffer_size;
191         actual_len = min(count, max_len);
192         mbo->buffer_length = actual_len;
193
194         retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
195         if (retval) {
196                 err = -EIO;
197                 goto error;
198         }
199
200         ret = most_submit_mbo(mbo);
201         if (ret) {
202                 pr_info("submitting MBO to core failed\n");
203                 err = ret;
204                 goto error;
205         }
206         return actual_len - retval;
207 error:
208         if (mbo)
209                 most_put_mbo(mbo);
210         return err;
211 }
212
213 /**
214  * aim_read - implements the syscall to read from the device
215  * @filp: file pointer
216  * @buf: pointer to user buffer
217  * @count: number of bytes to read
218  * @offset: offset from where to start reading
219  */
220 static ssize_t
221 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
222 {
223         ssize_t retval;
224         size_t not_copied, proc_len;
225         struct mbo *mbo;
226         struct aim_channel *channel = filp->private_data;
227
228         if (channel->keep_mbo) {
229                 mbo = channel->stacked_mbo;
230                 channel->keep_mbo = false;
231                 goto start_copy;
232         }
233         while ((0 == kfifo_out(&channel->fifo, &mbo, 1))
234                && (channel->dev)) {
235                 if (filp->f_flags & O_NONBLOCK)
236                         return -EAGAIN;
237                 if (wait_event_interruptible(channel->wq,
238                                              (!kfifo_is_empty(&channel->fifo) ||
239                                               (!channel->dev))))
240                         return -ERESTARTSYS;
241         }
242
243 start_copy:
244         /* make sure we don't submit to gone devices */
245         mutex_lock(&channel->io_mutex);
246         if (unlikely(!channel->dev)) {
247                 mutex_unlock(&channel->io_mutex);
248                 return -EIO;
249         }
250
251         if (count < mbo->processed_length)
252                 channel->keep_mbo = true;
253
254         proc_len = min((int)count,
255                        (int)(mbo->processed_length - channel->mbo_offs));
256
257         not_copied = copy_to_user(buf,
258                                   mbo->virt_address + channel->mbo_offs,
259                                   proc_len);
260
261         retval = not_copied ? proc_len - not_copied : proc_len;
262
263         if (channel->keep_mbo) {
264                 channel->mbo_offs = retval;
265                 channel->stacked_mbo = mbo;
266         } else {
267                 most_put_mbo(mbo);
268                 channel->mbo_offs = 0;
269         }
270         mutex_unlock(&channel->io_mutex);
271         return retval;
272 }
273
274 /**
275  * Initialization of struct file_operations
276  */
277 static const struct file_operations channel_fops = {
278         .owner = THIS_MODULE,
279         .read = aim_read,
280         .write = aim_write,
281         .open = aim_open,
282         .release = aim_close,
283 };
284
285 /**
286  * aim_disconnect_channel - disconnect a channel
287  * @iface: pointer to interface instance
288  * @channel_id: channel index
289  *
290  * This frees allocated memory and removes the cdev that represents this
291  * channel in user space.
292  */
293 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
294 {
295         struct aim_channel *channel;
296         unsigned long flags;
297
298         if (!iface) {
299                 pr_info("Bad interface pointer\n");
300                 return -EINVAL;
301         }
302
303         channel = get_channel(iface, channel_id);
304         if (!channel)
305                 return -ENXIO;
306
307         mutex_lock(&channel->io_mutex);
308         channel->dev = NULL;
309         mutex_unlock(&channel->io_mutex);
310
311         if (atomic_read(&channel->access_ref)) {
312                 device_destroy(aim_class, channel->devno);
313                 cdev_del(&channel->cdev);
314                 kfifo_free(&channel->fifo);
315                 ida_simple_remove(&minor_id, MINOR(channel->devno));
316                 spin_lock_irqsave(&ch_list_lock, flags);
317                 list_del(&channel->list);
318                 spin_unlock_irqrestore(&ch_list_lock, flags);
319                 kfree(channel);
320         } else {
321                 wake_up_interruptible(&channel->wq);
322         }
323         return 0;
324 }
325
326 /**
327  * aim_rx_completion - completion handler for rx channels
328  * @mbo: pointer to buffer object that has completed
329  *
330  * This searches for the channel linked to this MBO and stores it in the local
331  * fifo buffer.
332  */
333 static int aim_rx_completion(struct mbo *mbo)
334 {
335         struct aim_channel *channel;
336
337         if (!mbo)
338                 return -EINVAL;
339
340         channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
341         if (!channel)
342                 return -ENXIO;
343
344         kfifo_in(&channel->fifo, &mbo, 1);
345 #ifdef DEBUG_MESG
346         if (kfifo_is_full(&channel->fifo))
347                 pr_info("WARN: Fifo is full\n");
348 #endif
349         wake_up_interruptible(&channel->wq);
350         return 0;
351 }
352
353 /**
354  * aim_tx_completion - completion handler for tx channels
355  * @iface: pointer to interface instance
356  * @channel_id: channel index/ID
357  *
358  * This wakes sleeping processes in the wait-queue.
359  */
360 static int aim_tx_completion(struct most_interface *iface, int channel_id)
361 {
362         struct aim_channel *channel;
363
364         if (!iface) {
365                 pr_info("Bad interface pointer\n");
366                 return -EINVAL;
367         }
368         if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
369                 pr_info("Channel ID out of range\n");
370                 return -EINVAL;
371         }
372
373         channel = get_channel(iface, channel_id);
374         if (!channel)
375                 return -ENXIO;
376         wake_up_interruptible(&channel->wq);
377         return 0;
378 }
379
380 static struct most_aim cdev_aim;
381
382 /**
383  * aim_probe - probe function of the driver module
384  * @iface: pointer to interface instance
385  * @channel_id: channel index/ID
386  * @cfg: pointer to actual channel configuration
387  * @parent: pointer to kobject (needed for sysfs hook-up)
388  * @name: name of the device to be created
389  *
390  * This allocates achannel object and creates the device node in /dev
391  *
392  * Returns 0 on success or error code otherwise.
393  */
394 static int aim_probe(struct most_interface *iface, int channel_id,
395                      struct most_channel_config *cfg,
396                      struct kobject *parent, char *name)
397 {
398         struct aim_channel *channel;
399         unsigned long cl_flags;
400         int retval;
401         int current_minor;
402
403         if ((!iface) || (!cfg) || (!parent) || (!name)) {
404                 pr_info("Probing AIM with bad arguments");
405                 return -EINVAL;
406         }
407         channel = get_channel(iface, channel_id);
408         if (channel)
409                 return -EEXIST;
410
411         current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
412         if (current_minor < 0)
413                 return current_minor;
414
415         channel = kzalloc(sizeof(*channel), GFP_KERNEL);
416         if (!channel) {
417                 retval = -ENOMEM;
418                 goto error_alloc_channel;
419         }
420
421         channel->devno = MKDEV(major, current_minor);
422         cdev_init(&channel->cdev, &channel_fops);
423         channel->cdev.owner = THIS_MODULE;
424         cdev_add(&channel->cdev, channel->devno, 1);
425         channel->iface = iface;
426         channel->cfg = cfg;
427         channel->channel_id = channel_id;
428         channel->mbo_offs = 0;
429         atomic_set(&channel->access_ref, -1);
430         INIT_KFIFO(channel->fifo);
431         retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
432         if (retval) {
433                 pr_info("failed to alloc channel kfifo");
434                 goto error_alloc_kfifo;
435         }
436         init_waitqueue_head(&channel->wq);
437         mutex_init(&channel->io_mutex);
438         spin_lock_irqsave(&ch_list_lock, cl_flags);
439         list_add_tail(&channel->list, &channel_list);
440         spin_unlock_irqrestore(&ch_list_lock, cl_flags);
441         channel->dev = device_create(aim_class,
442                                      NULL,
443                                      channel->devno,
444                                      NULL,
445                                      "%s", name);
446
447         retval = IS_ERR(channel->dev);
448         if (retval) {
449                 pr_info("failed to create new device node %s\n", name);
450                 goto error_create_device;
451         }
452         kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
453         return 0;
454
455 error_create_device:
456         kfifo_free(&channel->fifo);
457         list_del(&channel->list);
458 error_alloc_kfifo:
459         cdev_del(&channel->cdev);
460         kfree(channel);
461 error_alloc_channel:
462         ida_simple_remove(&minor_id, current_minor);
463         return retval;
464 }
465
466 static struct most_aim cdev_aim = {
467         .name = "cdev",
468         .probe_channel = aim_probe,
469         .disconnect_channel = aim_disconnect_channel,
470         .rx_completion = aim_rx_completion,
471         .tx_completion = aim_tx_completion,
472 };
473
474 static int __init mod_init(void)
475 {
476         pr_info("init()\n");
477
478         INIT_LIST_HEAD(&channel_list);
479         spin_lock_init(&ch_list_lock);
480         ida_init(&minor_id);
481
482         if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
483                 return -EIO;
484         major = MAJOR(aim_devno);
485
486         aim_class = class_create(THIS_MODULE, "most_cdev_aim");
487         if (IS_ERR(aim_class)) {
488                 pr_err("no udev support\n");
489                 goto free_cdev;
490         }
491
492         if (most_register_aim(&cdev_aim))
493                 goto dest_class;
494         return 0;
495
496 dest_class:
497         class_destroy(aim_class);
498 free_cdev:
499         unregister_chrdev_region(aim_devno, 1);
500         return -EIO;
501 }
502
503 static void __exit mod_exit(void)
504 {
505         struct aim_channel *channel, *tmp;
506
507         pr_info("exit module\n");
508
509         most_deregister_aim(&cdev_aim);
510
511         list_for_each_entry_safe(channel, tmp, &channel_list, list) {
512                 device_destroy(aim_class, channel->devno);
513                 cdev_del(&channel->cdev);
514                 kfifo_free(&channel->fifo);
515                 list_del(&channel->list);
516                 ida_simple_remove(&minor_id, MINOR(channel->devno));
517                 kfree(channel);
518         }
519         class_destroy(aim_class);
520         unregister_chrdev_region(aim_devno, 1);
521         ida_destroy(&minor_id);
522 }
523
524 module_init(mod_init);
525 module_exit(mod_exit);
526 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
527 MODULE_LICENSE("GPL");
528 MODULE_DESCRIPTION("character device AIM for mostcore");