1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
29 static const char * const iio_endian_prefix[] = {
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
40 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
41 size_t n, loff_t *f_ps)
43 struct iio_dev *indio_dev = filp->private_data;
44 struct iio_buffer *rb = indio_dev->buffer;
46 if (!rb || !rb->access->read_first_n)
48 return rb->access->read_first_n(rb, n, buf);
52 * iio_buffer_poll() - poll the buffer to find out if it has data
54 unsigned int iio_buffer_poll(struct file *filp,
55 struct poll_table_struct *wait)
57 struct iio_dev *indio_dev = filp->private_data;
58 struct iio_buffer *rb = indio_dev->buffer;
60 poll_wait(filp, &rb->pollq, wait);
62 return POLLIN | POLLRDNORM;
63 /* need a way of knowing if there may be enough data... */
67 int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
69 struct iio_buffer *rb = indio_dev->buffer;
72 if (rb->access->mark_in_use)
73 rb->access->mark_in_use(rb);
77 void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
79 struct iio_buffer *rb = indio_dev->buffer;
83 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
84 if (rb->access->unmark_in_use)
85 rb->access->unmark_in_use(rb);
88 void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
90 buffer->indio_dev = indio_dev;
91 init_waitqueue_head(&buffer->pollq);
93 EXPORT_SYMBOL(iio_buffer_init);
95 static ssize_t iio_show_scan_index(struct device *dev,
96 struct device_attribute *attr,
99 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
102 static ssize_t iio_show_fixed_type(struct device *dev,
103 struct device_attribute *attr,
106 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
107 u8 type = this_attr->c->scan_type.endianness;
109 if (type == IIO_CPU) {
110 #ifdef __LITTLE_ENDIAN
116 return sprintf(buf, "%s:%c%d/%d>>%u\n",
117 iio_endian_prefix[type],
118 this_attr->c->scan_type.sign,
119 this_attr->c->scan_type.realbits,
120 this_attr->c->scan_type.storagebits,
121 this_attr->c->scan_type.shift);
124 static ssize_t iio_scan_el_show(struct device *dev,
125 struct device_attribute *attr,
129 struct iio_dev *indio_dev = dev_get_drvdata(dev);
131 ret = iio_scan_mask_query(indio_dev->buffer,
132 to_iio_dev_attr(attr)->address);
135 return sprintf(buf, "%d\n", ret);
138 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
140 clear_bit(bit, buffer->scan_mask);
141 buffer->scan_count--;
145 static ssize_t iio_scan_el_store(struct device *dev,
146 struct device_attribute *attr,
152 struct iio_dev *indio_dev = dev_get_drvdata(dev);
153 struct iio_buffer *buffer = indio_dev->buffer;
154 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
156 state = !(buf[0] == '0');
157 mutex_lock(&indio_dev->mlock);
158 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
162 ret = iio_scan_mask_query(buffer, this_attr->address);
166 ret = iio_scan_mask_clear(buffer, this_attr->address);
169 } else if (state && !ret) {
170 ret = iio_scan_mask_set(buffer, this_attr->address);
176 mutex_unlock(&indio_dev->mlock);
178 return ret ? ret : len;
182 static ssize_t iio_scan_el_ts_show(struct device *dev,
183 struct device_attribute *attr,
186 struct iio_dev *indio_dev = dev_get_drvdata(dev);
187 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
190 static ssize_t iio_scan_el_ts_store(struct device *dev,
191 struct device_attribute *attr,
196 struct iio_dev *indio_dev = dev_get_drvdata(dev);
199 state = !(buf[0] == '0');
200 mutex_lock(&indio_dev->mlock);
201 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
205 indio_dev->buffer->scan_timestamp = state;
207 mutex_unlock(&indio_dev->mlock);
209 return ret ? ret : len;
212 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
213 const struct iio_chan_spec *chan)
215 int ret, attrcount = 0;
216 struct iio_buffer *buffer = indio_dev->buffer;
218 ret = __iio_add_chan_devattr("index",
220 &iio_show_scan_index,
225 &buffer->scan_el_dev_attr_list);
229 ret = __iio_add_chan_devattr("type",
231 &iio_show_fixed_type,
236 &buffer->scan_el_dev_attr_list);
240 if (chan->type != IIO_TIMESTAMP)
241 ret = __iio_add_chan_devattr("en",
248 &buffer->scan_el_dev_attr_list);
250 ret = __iio_add_chan_devattr("en",
252 &iio_scan_el_ts_show,
253 &iio_scan_el_ts_store,
257 &buffer->scan_el_dev_attr_list);
264 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
265 struct iio_dev_attr *p)
267 kfree(p->dev_attr.attr.name);
271 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
273 struct iio_dev_attr *p, *n;
274 struct iio_buffer *buffer = indio_dev->buffer;
276 list_for_each_entry_safe(p, n,
277 &buffer->scan_el_dev_attr_list, l)
278 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
281 static const char * const iio_scan_elements_group_name = "scan_elements";
283 int iio_buffer_register(struct iio_dev *indio_dev,
284 const struct iio_chan_spec *channels,
287 struct iio_dev_attr *p;
288 struct attribute **attr;
289 struct iio_buffer *buffer = indio_dev->buffer;
290 int ret, i, attrn, attrcount, attrcount_orig = 0;
293 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
295 if (buffer->scan_el_attrs != NULL) {
296 attr = buffer->scan_el_attrs->attrs;
297 while (*attr++ != NULL)
300 attrcount = attrcount_orig;
301 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
304 for (i = 0; i < num_channels; i++) {
305 /* Establish necessary mask length */
306 if (channels[i].scan_index >
307 (int)indio_dev->masklength - 1)
308 indio_dev->masklength
309 = indio_dev->channels[i].scan_index + 1;
311 ret = iio_buffer_add_channel_sysfs(indio_dev,
314 goto error_cleanup_dynamic;
317 if (indio_dev->masklength && buffer->scan_mask == NULL) {
319 = kzalloc(sizeof(*buffer->scan_mask)*
320 BITS_TO_LONGS(indio_dev->masklength),
322 if (buffer->scan_mask == NULL) {
324 goto error_cleanup_dynamic;
329 buffer->scan_el_group.name = iio_scan_elements_group_name;
331 buffer->scan_el_group.attrs
332 = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
335 if (buffer->scan_el_group.attrs == NULL) {
337 goto error_free_scan_mask;
339 if (buffer->scan_el_attrs)
340 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
341 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
342 attrn = attrcount_orig;
344 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
345 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
346 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
350 error_free_scan_mask:
351 kfree(buffer->scan_mask);
352 error_cleanup_dynamic:
353 __iio_buffer_attr_cleanup(indio_dev);
357 EXPORT_SYMBOL(iio_buffer_register);
359 void iio_buffer_unregister(struct iio_dev *indio_dev)
361 kfree(indio_dev->buffer->scan_mask);
362 kfree(indio_dev->buffer->scan_el_group.attrs);
363 __iio_buffer_attr_cleanup(indio_dev);
365 EXPORT_SYMBOL(iio_buffer_unregister);
367 ssize_t iio_buffer_read_length(struct device *dev,
368 struct device_attribute *attr,
371 struct iio_dev *indio_dev = dev_get_drvdata(dev);
372 struct iio_buffer *buffer = indio_dev->buffer;
374 if (buffer->access->get_length)
375 return sprintf(buf, "%d\n",
376 buffer->access->get_length(buffer));
380 EXPORT_SYMBOL(iio_buffer_read_length);
382 ssize_t iio_buffer_write_length(struct device *dev,
383 struct device_attribute *attr,
389 struct iio_dev *indio_dev = dev_get_drvdata(dev);
390 struct iio_buffer *buffer = indio_dev->buffer;
392 ret = strict_strtoul(buf, 10, &val);
396 if (buffer->access->get_length)
397 if (val == buffer->access->get_length(buffer))
400 if (buffer->access->set_length) {
401 buffer->access->set_length(buffer, val);
402 if (buffer->access->mark_param_change)
403 buffer->access->mark_param_change(buffer);
408 EXPORT_SYMBOL(iio_buffer_write_length);
410 ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
411 struct device_attribute *attr,
414 struct iio_dev *indio_dev = dev_get_drvdata(dev);
415 struct iio_buffer *buffer = indio_dev->buffer;
417 if (buffer->access->get_bytes_per_datum)
418 return sprintf(buf, "%d\n",
419 buffer->access->get_bytes_per_datum(buffer));
423 EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
425 ssize_t iio_buffer_store_enable(struct device *dev,
426 struct device_attribute *attr,
431 bool requested_state, current_state;
433 struct iio_dev *indio_dev = dev_get_drvdata(dev);
434 struct iio_buffer *buffer = indio_dev->buffer;
436 mutex_lock(&indio_dev->mlock);
437 previous_mode = indio_dev->currentmode;
438 requested_state = !(buf[0] == '0');
439 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
440 if (current_state == requested_state) {
441 printk(KERN_INFO "iio-buffer, current state requested again\n");
444 if (requested_state) {
445 if (buffer->setup_ops->preenable) {
446 ret = buffer->setup_ops->preenable(indio_dev);
449 "Buffer not started:"
450 "buffer preenable failed\n");
454 if (buffer->access->request_update) {
455 ret = buffer->access->request_update(buffer);
458 "Buffer not started:"
459 "buffer parameter update failed\n");
463 if (buffer->access->mark_in_use)
464 buffer->access->mark_in_use(buffer);
465 /* Definitely possible for devices to support both of these.*/
466 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
467 if (!indio_dev->trig) {
469 "Buffer not started: no trigger\n");
471 if (buffer->access->unmark_in_use)
472 buffer->access->unmark_in_use(buffer);
475 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
476 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
477 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
478 else { /* should never be reached */
483 if (buffer->setup_ops->postenable) {
484 ret = buffer->setup_ops->postenable(indio_dev);
487 "Buffer not started:"
488 "postenable failed\n");
489 if (buffer->access->unmark_in_use)
490 buffer->access->unmark_in_use(buffer);
491 indio_dev->currentmode = previous_mode;
492 if (buffer->setup_ops->postdisable)
494 postdisable(indio_dev);
499 if (buffer->setup_ops->predisable) {
500 ret = buffer->setup_ops->predisable(indio_dev);
504 if (buffer->access->unmark_in_use)
505 buffer->access->unmark_in_use(buffer);
506 indio_dev->currentmode = INDIO_DIRECT_MODE;
507 if (buffer->setup_ops->postdisable) {
508 ret = buffer->setup_ops->postdisable(indio_dev);
514 mutex_unlock(&indio_dev->mlock);
518 mutex_unlock(&indio_dev->mlock);
521 EXPORT_SYMBOL(iio_buffer_store_enable);
523 ssize_t iio_buffer_show_enable(struct device *dev,
524 struct device_attribute *attr,
527 struct iio_dev *indio_dev = dev_get_drvdata(dev);
528 return sprintf(buf, "%d\n", !!(indio_dev->currentmode
529 & INDIO_ALL_BUFFER_MODES));
531 EXPORT_SYMBOL(iio_buffer_show_enable);
533 int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
535 struct iio_buffer *buffer = indio_dev->buffer;
537 dev_dbg(&indio_dev->dev, "%s\n", __func__);
538 /* Check if there are any scan elements enabled, if not fail*/
539 if (!(buffer->scan_count || buffer->scan_timestamp))
541 if (buffer->scan_timestamp)
542 if (buffer->scan_count)
543 /* Timestamp (aligned to s64) and data */
544 size = (((buffer->scan_count * buffer->bpe)
546 & ~(sizeof(s64) - 1))
548 else /* Timestamp only */
551 size = buffer->scan_count * buffer->bpe;
552 buffer->access->set_bytes_per_datum(buffer, size);
556 EXPORT_SYMBOL(iio_sw_buffer_preenable);
559 /* note NULL used as error indicator as it doesn't make sense. */
560 static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
561 unsigned int masklength,
564 if (bitmap_empty(mask, masklength))
567 if (bitmap_subset(mask, av_masks, masklength))
569 av_masks += BITS_TO_LONGS(masklength);
575 * iio_scan_mask_set() - set particular bit in the scan mask
576 * @buffer: the buffer whose scan mask we are interested in
577 * @bit: the bit to be set.
579 int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
581 struct iio_dev *indio_dev = buffer->indio_dev;
583 unsigned long *trialmask;
585 trialmask = kmalloc(sizeof(*trialmask)*
586 BITS_TO_LONGS(indio_dev->masklength),
589 if (trialmask == NULL)
591 if (!indio_dev->masklength) {
592 WARN_ON("trying to set scanmask prior to registering buffer\n");
596 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
597 set_bit(bit, trialmask);
599 if (indio_dev->available_scan_masks) {
600 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
601 indio_dev->masklength,
608 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
609 buffer->scan_count++;
615 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
617 int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
619 struct iio_dev *indio_dev = buffer->indio_dev;
622 if (bit > indio_dev->masklength)
625 if (!buffer->scan_mask)
627 if (indio_dev->available_scan_masks)
628 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
629 indio_dev->masklength,
632 mask = buffer->scan_mask;
636 return test_bit(bit, mask);
638 EXPORT_SYMBOL_GPL(iio_scan_mask_query);