1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix[] = {
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list);
40 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf);
45 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46 struct iio_buffer *buf, size_t required)
48 if (!indio_dev->info->hwfifo_flush_to_buffer)
51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
54 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
55 size_t to_wait, int to_flush)
60 /* wakeup if the device was unregistered */
64 /* drain the buffer if it was disabled */
65 if (!iio_buffer_is_active(buf)) {
66 to_wait = min_t(size_t, to_wait, 1);
70 avail = iio_buffer_data_available(buf);
72 if (avail >= to_wait) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait && !avail && to_flush)
75 iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
80 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
85 if (avail + flushed >= to_wait)
92 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * This function relies on all buffer implementations having an
95 * iio_buffer as their first element.
97 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
98 size_t n, loff_t *f_ps)
100 struct iio_dev *indio_dev = filp->private_data;
101 struct iio_buffer *rb = indio_dev->buffer;
107 if (!indio_dev->info)
110 if (!rb || !rb->access->read_first_n)
113 datum_size = rb->bytes_per_datum;
116 * If datum_size is 0 there will never be anything to read from the
117 * buffer, so signal end of file now.
122 to_read = min_t(size_t, n / datum_size, rb->watermark);
124 if (!(filp->f_flags & O_NONBLOCK))
128 ret = wait_event_interruptible(rb->pollq,
129 iio_buffer_ready(indio_dev, rb, to_wait, to_read));
133 if (!indio_dev->info)
136 ret = rb->access->read_first_n(rb, n, buf);
137 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
145 * iio_buffer_poll() - poll the buffer to find out if it has data
147 unsigned int iio_buffer_poll(struct file *filp,
148 struct poll_table_struct *wait)
150 struct iio_dev *indio_dev = filp->private_data;
151 struct iio_buffer *rb = indio_dev->buffer;
153 if (!indio_dev->info)
156 poll_wait(filp, &rb->pollq, wait);
157 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
158 return POLLIN | POLLRDNORM;
163 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
164 * @indio_dev: The IIO device
166 * Wakes up the event waitqueue used for poll(). Should usually
167 * be called when the device is unregistered.
169 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
171 if (!indio_dev->buffer)
174 wake_up(&indio_dev->buffer->pollq);
177 void iio_buffer_init(struct iio_buffer *buffer)
179 INIT_LIST_HEAD(&buffer->demux_list);
180 INIT_LIST_HEAD(&buffer->buffer_list);
181 init_waitqueue_head(&buffer->pollq);
182 kref_init(&buffer->ref);
183 buffer->watermark = 1;
185 EXPORT_SYMBOL(iio_buffer_init);
187 static ssize_t iio_show_scan_index(struct device *dev,
188 struct device_attribute *attr,
191 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
194 static ssize_t iio_show_fixed_type(struct device *dev,
195 struct device_attribute *attr,
198 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
199 u8 type = this_attr->c->scan_type.endianness;
201 if (type == IIO_CPU) {
202 #ifdef __LITTLE_ENDIAN
208 if (this_attr->c->scan_type.repeat > 1)
209 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
210 iio_endian_prefix[type],
211 this_attr->c->scan_type.sign,
212 this_attr->c->scan_type.realbits,
213 this_attr->c->scan_type.storagebits,
214 this_attr->c->scan_type.repeat,
215 this_attr->c->scan_type.shift);
217 return sprintf(buf, "%s:%c%d/%d>>%u\n",
218 iio_endian_prefix[type],
219 this_attr->c->scan_type.sign,
220 this_attr->c->scan_type.realbits,
221 this_attr->c->scan_type.storagebits,
222 this_attr->c->scan_type.shift);
225 static ssize_t iio_scan_el_show(struct device *dev,
226 struct device_attribute *attr,
230 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
232 /* Ensure ret is 0 or 1. */
233 ret = !!test_bit(to_iio_dev_attr(attr)->address,
234 indio_dev->buffer->scan_mask);
236 return sprintf(buf, "%d\n", ret);
239 /* Note NULL used as error indicator as it doesn't make sense. */
240 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
241 unsigned int masklength,
242 const unsigned long *mask)
244 if (bitmap_empty(mask, masklength))
247 if (bitmap_subset(mask, av_masks, masklength))
249 av_masks += BITS_TO_LONGS(masklength);
254 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
255 const unsigned long *mask)
257 if (!indio_dev->setup_ops->validate_scan_mask)
260 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
264 * iio_scan_mask_set() - set particular bit in the scan mask
265 * @indio_dev: the iio device
266 * @buffer: the buffer whose scan mask we are interested in
267 * @bit: the bit to be set.
269 * Note that at this point we have no way of knowing what other
270 * buffers might request, hence this code only verifies that the
271 * individual buffers request is plausible.
273 static int iio_scan_mask_set(struct iio_dev *indio_dev,
274 struct iio_buffer *buffer, int bit)
276 const unsigned long *mask;
277 unsigned long *trialmask;
279 trialmask = kmalloc(sizeof(*trialmask)*
280 BITS_TO_LONGS(indio_dev->masklength),
283 if (trialmask == NULL)
285 if (!indio_dev->masklength) {
286 WARN_ON("Trying to set scanmask prior to registering buffer\n");
287 goto err_invalid_mask;
289 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
290 set_bit(bit, trialmask);
292 if (!iio_validate_scan_mask(indio_dev, trialmask))
293 goto err_invalid_mask;
295 if (indio_dev->available_scan_masks) {
296 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
297 indio_dev->masklength,
300 goto err_invalid_mask;
302 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
313 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
315 clear_bit(bit, buffer->scan_mask);
319 static ssize_t iio_scan_el_store(struct device *dev,
320 struct device_attribute *attr,
326 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
327 struct iio_buffer *buffer = indio_dev->buffer;
328 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
330 ret = strtobool(buf, &state);
333 mutex_lock(&indio_dev->mlock);
334 if (iio_buffer_is_active(indio_dev->buffer)) {
338 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
342 ret = iio_scan_mask_clear(buffer, this_attr->address);
345 } else if (state && !ret) {
346 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
352 mutex_unlock(&indio_dev->mlock);
354 return ret < 0 ? ret : len;
358 static ssize_t iio_scan_el_ts_show(struct device *dev,
359 struct device_attribute *attr,
362 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
363 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
366 static ssize_t iio_scan_el_ts_store(struct device *dev,
367 struct device_attribute *attr,
372 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
375 ret = strtobool(buf, &state);
379 mutex_lock(&indio_dev->mlock);
380 if (iio_buffer_is_active(indio_dev->buffer)) {
384 indio_dev->buffer->scan_timestamp = state;
386 mutex_unlock(&indio_dev->mlock);
388 return ret ? ret : len;
391 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
392 const struct iio_chan_spec *chan)
394 int ret, attrcount = 0;
395 struct iio_buffer *buffer = indio_dev->buffer;
397 ret = __iio_add_chan_devattr("index",
399 &iio_show_scan_index,
404 &buffer->scan_el_dev_attr_list);
408 ret = __iio_add_chan_devattr("type",
410 &iio_show_fixed_type,
415 &buffer->scan_el_dev_attr_list);
419 if (chan->type != IIO_TIMESTAMP)
420 ret = __iio_add_chan_devattr("en",
427 &buffer->scan_el_dev_attr_list);
429 ret = __iio_add_chan_devattr("en",
431 &iio_scan_el_ts_show,
432 &iio_scan_el_ts_store,
436 &buffer->scan_el_dev_attr_list);
444 static ssize_t iio_buffer_read_length(struct device *dev,
445 struct device_attribute *attr,
448 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
449 struct iio_buffer *buffer = indio_dev->buffer;
451 return sprintf(buf, "%d\n", buffer->length);
454 static ssize_t iio_buffer_write_length(struct device *dev,
455 struct device_attribute *attr,
456 const char *buf, size_t len)
458 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
459 struct iio_buffer *buffer = indio_dev->buffer;
463 ret = kstrtouint(buf, 10, &val);
467 if (val == buffer->length)
470 mutex_lock(&indio_dev->mlock);
471 if (iio_buffer_is_active(indio_dev->buffer)) {
474 buffer->access->set_length(buffer, val);
479 if (buffer->length && buffer->length < buffer->watermark)
480 buffer->watermark = buffer->length;
482 mutex_unlock(&indio_dev->mlock);
484 return ret ? ret : len;
487 static ssize_t iio_buffer_show_enable(struct device *dev,
488 struct device_attribute *attr,
491 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
492 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
495 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
496 const unsigned long *mask, bool timestamp)
498 const struct iio_chan_spec *ch;
502 /* How much space will the demuxed element take? */
503 for_each_set_bit(i, mask,
504 indio_dev->masklength) {
505 ch = iio_find_channel_from_si(indio_dev, i);
506 if (ch->scan_type.repeat > 1)
507 length = ch->scan_type.storagebits / 8 *
508 ch->scan_type.repeat;
510 length = ch->scan_type.storagebits / 8;
511 bytes = ALIGN(bytes, length);
515 ch = iio_find_channel_from_si(indio_dev,
516 indio_dev->scan_index_timestamp);
517 if (ch->scan_type.repeat > 1)
518 length = ch->scan_type.storagebits / 8 *
519 ch->scan_type.repeat;
521 length = ch->scan_type.storagebits / 8;
522 bytes = ALIGN(bytes, length);
528 static void iio_buffer_activate(struct iio_dev *indio_dev,
529 struct iio_buffer *buffer)
531 iio_buffer_get(buffer);
532 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
535 static void iio_buffer_deactivate(struct iio_buffer *buffer)
537 list_del_init(&buffer->buffer_list);
538 wake_up_interruptible(&buffer->pollq);
539 iio_buffer_put(buffer);
542 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
544 struct iio_buffer *buffer, *_buffer;
546 list_for_each_entry_safe(buffer, _buffer,
547 &indio_dev->buffer_list, buffer_list)
548 iio_buffer_deactivate(buffer);
551 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
552 struct iio_buffer *buffer)
556 if (!buffer->access->set_bytes_per_datum)
559 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
560 buffer->scan_timestamp);
562 buffer->access->set_bytes_per_datum(buffer, bytes);
565 static int iio_buffer_request_update(struct iio_dev *indio_dev,
566 struct iio_buffer *buffer)
570 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
571 if (buffer->access->request_update) {
572 ret = buffer->access->request_update(buffer);
574 dev_dbg(&indio_dev->dev,
575 "Buffer not started: buffer parameter update failed (%d)\n",
584 static void iio_free_scan_mask(struct iio_dev *indio_dev,
585 const unsigned long *mask)
587 /* If the mask is dynamically allocated free it, otherwise do nothing */
588 if (!indio_dev->available_scan_masks)
592 struct iio_device_config {
594 const unsigned long *scan_mask;
595 unsigned int scan_bytes;
599 static int iio_verify_update(struct iio_dev *indio_dev,
600 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
601 struct iio_device_config *config)
603 unsigned long *compound_mask;
604 const unsigned long *scan_mask;
605 struct iio_buffer *buffer;
608 memset(config, 0, sizeof(*config));
611 * If there is just one buffer and we are removing it there is nothing
614 if (remove_buffer && !insert_buffer &&
615 list_is_singular(&indio_dev->buffer_list))
618 /* Definitely possible for devices to support both of these. */
619 if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
620 config->mode = INDIO_BUFFER_TRIGGERED;
621 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
622 config->mode = INDIO_BUFFER_HARDWARE;
623 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
624 config->mode = INDIO_BUFFER_SOFTWARE;
626 /* Can only occur on first buffer */
627 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
628 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
632 /* What scan mask do we actually have? */
633 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
634 sizeof(long), GFP_KERNEL);
635 if (compound_mask == NULL)
638 scan_timestamp = false;
640 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
641 if (buffer == remove_buffer)
643 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
644 indio_dev->masklength);
645 scan_timestamp |= buffer->scan_timestamp;
649 bitmap_or(compound_mask, compound_mask,
650 insert_buffer->scan_mask, indio_dev->masklength);
651 scan_timestamp |= insert_buffer->scan_timestamp;
654 if (indio_dev->available_scan_masks) {
655 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
656 indio_dev->masklength,
658 kfree(compound_mask);
659 if (scan_mask == NULL)
662 scan_mask = compound_mask;
665 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
666 scan_mask, scan_timestamp);
667 config->scan_mask = scan_mask;
668 config->scan_timestamp = scan_timestamp;
673 static int iio_enable_buffers(struct iio_dev *indio_dev,
674 struct iio_device_config *config)
678 indio_dev->active_scan_mask = config->scan_mask;
679 indio_dev->scan_timestamp = config->scan_timestamp;
680 indio_dev->scan_bytes = config->scan_bytes;
682 iio_update_demux(indio_dev);
685 if (indio_dev->setup_ops->preenable) {
686 ret = indio_dev->setup_ops->preenable(indio_dev);
688 dev_dbg(&indio_dev->dev,
689 "Buffer not started: buffer preenable failed (%d)\n", ret);
690 goto err_undo_config;
694 if (indio_dev->info->update_scan_mode) {
695 ret = indio_dev->info
696 ->update_scan_mode(indio_dev,
697 indio_dev->active_scan_mask);
699 dev_dbg(&indio_dev->dev,
700 "Buffer not started: update scan mode failed (%d)\n",
702 goto err_run_postdisable;
706 indio_dev->currentmode = config->mode;
708 if (indio_dev->setup_ops->postenable) {
709 ret = indio_dev->setup_ops->postenable(indio_dev);
711 dev_dbg(&indio_dev->dev,
712 "Buffer not started: postenable failed (%d)\n", ret);
713 goto err_run_postdisable;
720 indio_dev->currentmode = INDIO_DIRECT_MODE;
721 if (indio_dev->setup_ops->postdisable)
722 indio_dev->setup_ops->postdisable(indio_dev);
724 indio_dev->active_scan_mask = NULL;
729 static int iio_disable_buffers(struct iio_dev *indio_dev)
734 /* Wind down existing buffers - iff there are any */
735 if (list_empty(&indio_dev->buffer_list))
739 * If things go wrong at some step in disable we still need to continue
740 * to perform the other steps, otherwise we leave the device in a
741 * inconsistent state. We return the error code for the first error we
745 if (indio_dev->setup_ops->predisable) {
746 ret2 = indio_dev->setup_ops->predisable(indio_dev);
751 indio_dev->currentmode = INDIO_DIRECT_MODE;
753 if (indio_dev->setup_ops->postdisable) {
754 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
759 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
760 indio_dev->active_scan_mask = NULL;
765 static int __iio_update_buffers(struct iio_dev *indio_dev,
766 struct iio_buffer *insert_buffer,
767 struct iio_buffer *remove_buffer)
769 struct iio_device_config new_config;
772 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
778 ret = iio_buffer_request_update(indio_dev, insert_buffer);
780 goto err_free_config;
783 ret = iio_disable_buffers(indio_dev);
785 goto err_deactivate_all;
788 iio_buffer_deactivate(remove_buffer);
790 iio_buffer_activate(indio_dev, insert_buffer);
792 /* If no buffers in list, we are done */
793 if (list_empty(&indio_dev->buffer_list))
796 ret = iio_enable_buffers(indio_dev, &new_config);
798 goto err_deactivate_all;
804 * We've already verified that the config is valid earlier. If things go
805 * wrong in either enable or disable the most likely reason is an IO
806 * error from the device. In this case there is no good recovery
807 * strategy. Just make sure to disable everything and leave the device
808 * in a sane state. With a bit of luck the device might come back to
809 * life again later and userspace can try again.
811 iio_buffer_deactivate_all(indio_dev);
814 iio_free_scan_mask(indio_dev, new_config.scan_mask);
818 int iio_update_buffers(struct iio_dev *indio_dev,
819 struct iio_buffer *insert_buffer,
820 struct iio_buffer *remove_buffer)
824 if (insert_buffer == remove_buffer)
827 mutex_lock(&indio_dev->info_exist_lock);
828 mutex_lock(&indio_dev->mlock);
830 if (insert_buffer && iio_buffer_is_active(insert_buffer))
831 insert_buffer = NULL;
833 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
834 remove_buffer = NULL;
836 if (!insert_buffer && !remove_buffer) {
841 if (indio_dev->info == NULL) {
846 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
849 mutex_unlock(&indio_dev->mlock);
850 mutex_unlock(&indio_dev->info_exist_lock);
854 EXPORT_SYMBOL_GPL(iio_update_buffers);
856 void iio_disable_all_buffers(struct iio_dev *indio_dev)
858 iio_disable_buffers(indio_dev);
859 iio_buffer_deactivate_all(indio_dev);
862 static ssize_t iio_buffer_store_enable(struct device *dev,
863 struct device_attribute *attr,
868 bool requested_state;
869 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
872 ret = strtobool(buf, &requested_state);
876 mutex_lock(&indio_dev->mlock);
878 /* Find out if it is in the list */
879 inlist = iio_buffer_is_active(indio_dev->buffer);
880 /* Already in desired state */
881 if (inlist == requested_state)
885 ret = __iio_update_buffers(indio_dev,
886 indio_dev->buffer, NULL);
888 ret = __iio_update_buffers(indio_dev,
889 NULL, indio_dev->buffer);
892 mutex_unlock(&indio_dev->mlock);
893 return (ret < 0) ? ret : len;
896 static const char * const iio_scan_elements_group_name = "scan_elements";
898 static ssize_t iio_buffer_show_watermark(struct device *dev,
899 struct device_attribute *attr,
902 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
903 struct iio_buffer *buffer = indio_dev->buffer;
905 return sprintf(buf, "%u\n", buffer->watermark);
908 static ssize_t iio_buffer_store_watermark(struct device *dev,
909 struct device_attribute *attr,
913 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
914 struct iio_buffer *buffer = indio_dev->buffer;
918 ret = kstrtouint(buf, 10, &val);
924 mutex_lock(&indio_dev->mlock);
926 if (val > buffer->length) {
931 if (iio_buffer_is_active(indio_dev->buffer)) {
936 buffer->watermark = val;
938 if (indio_dev->info->hwfifo_set_watermark)
939 indio_dev->info->hwfifo_set_watermark(indio_dev, val);
941 mutex_unlock(&indio_dev->mlock);
943 return ret ? ret : len;
946 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
947 iio_buffer_write_length);
948 static struct device_attribute dev_attr_length_ro = __ATTR(length,
949 S_IRUGO, iio_buffer_read_length, NULL);
950 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
951 iio_buffer_show_enable, iio_buffer_store_enable);
952 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
953 iio_buffer_show_watermark, iio_buffer_store_watermark);
955 static struct attribute *iio_buffer_attrs[] = {
956 &dev_attr_length.attr,
957 &dev_attr_enable.attr,
958 &dev_attr_watermark.attr,
961 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
963 struct iio_dev_attr *p;
964 struct attribute **attr;
965 struct iio_buffer *buffer = indio_dev->buffer;
966 int ret, i, attrn, attrcount, attrcount_orig = 0;
967 const struct iio_chan_spec *channels;
969 channels = indio_dev->channels;
971 int ml = indio_dev->masklength;
973 for (i = 0; i < indio_dev->num_channels; i++)
974 ml = max(ml, channels[i].scan_index + 1);
975 indio_dev->masklength = ml;
983 while (buffer->attrs[attrcount] != NULL)
987 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
988 sizeof(struct attribute *), GFP_KERNEL);
992 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
993 if (!buffer->access->set_length)
994 attr[0] = &dev_attr_length_ro.attr;
997 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
998 sizeof(struct attribute *) * attrcount);
1000 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1002 buffer->buffer_group.name = "buffer";
1003 buffer->buffer_group.attrs = attr;
1005 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1007 if (buffer->scan_el_attrs != NULL) {
1008 attr = buffer->scan_el_attrs->attrs;
1009 while (*attr++ != NULL)
1012 attrcount = attrcount_orig;
1013 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1014 channels = indio_dev->channels;
1017 for (i = 0; i < indio_dev->num_channels; i++) {
1018 if (channels[i].scan_index < 0)
1021 ret = iio_buffer_add_channel_sysfs(indio_dev,
1024 goto error_cleanup_dynamic;
1026 if (channels[i].type == IIO_TIMESTAMP)
1027 indio_dev->scan_index_timestamp =
1028 channels[i].scan_index;
1030 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1031 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1032 sizeof(*buffer->scan_mask),
1034 if (buffer->scan_mask == NULL) {
1036 goto error_cleanup_dynamic;
1041 buffer->scan_el_group.name = iio_scan_elements_group_name;
1043 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1044 sizeof(buffer->scan_el_group.attrs[0]),
1046 if (buffer->scan_el_group.attrs == NULL) {
1048 goto error_free_scan_mask;
1050 if (buffer->scan_el_attrs)
1051 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
1052 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
1053 attrn = attrcount_orig;
1055 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1056 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1057 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1061 error_free_scan_mask:
1062 kfree(buffer->scan_mask);
1063 error_cleanup_dynamic:
1064 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1065 kfree(indio_dev->buffer->buffer_group.attrs);
1070 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1072 if (!indio_dev->buffer)
1075 kfree(indio_dev->buffer->scan_mask);
1076 kfree(indio_dev->buffer->buffer_group.attrs);
1077 kfree(indio_dev->buffer->scan_el_group.attrs);
1078 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
1082 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1083 * @indio_dev: the iio device
1084 * @mask: scan mask to be checked
1086 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1087 * can be used for devices where only one channel can be active for sampling at
1090 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1091 const unsigned long *mask)
1093 return bitmap_weight(mask, indio_dev->masklength) == 1;
1095 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1097 int iio_scan_mask_query(struct iio_dev *indio_dev,
1098 struct iio_buffer *buffer, int bit)
1100 if (bit > indio_dev->masklength)
1103 if (!buffer->scan_mask)
1106 /* Ensure return value is 0 or 1. */
1107 return !!test_bit(bit, buffer->scan_mask);
1109 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1112 * struct iio_demux_table() - table describing demux memcpy ops
1113 * @from: index to copy from
1114 * @to: index to copy to
1115 * @length: how many bytes to copy
1116 * @l: list head used for management
1118 struct iio_demux_table {
1125 static const void *iio_demux(struct iio_buffer *buffer,
1128 struct iio_demux_table *t;
1130 if (list_empty(&buffer->demux_list))
1132 list_for_each_entry(t, &buffer->demux_list, l)
1133 memcpy(buffer->demux_bounce + t->to,
1134 datain + t->from, t->length);
1136 return buffer->demux_bounce;
1139 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1141 const void *dataout = iio_demux(buffer, data);
1144 ret = buffer->access->store_to(buffer, dataout);
1149 * We can't just test for watermark to decide if we wake the poll queue
1150 * because read may request less samples than the watermark.
1152 wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1156 static void iio_buffer_demux_free(struct iio_buffer *buffer)
1158 struct iio_demux_table *p, *q;
1159 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1166 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1169 struct iio_buffer *buf;
1171 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1172 ret = iio_push_to_buffer(buf, data);
1179 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1181 static int iio_buffer_add_demux(struct iio_buffer *buffer,
1182 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
1183 unsigned int length)
1186 if (*p && (*p)->from + (*p)->length == in_loc &&
1187 (*p)->to + (*p)->length == out_loc) {
1188 (*p)->length += length;
1190 *p = kmalloc(sizeof(**p), GFP_KERNEL);
1193 (*p)->from = in_loc;
1195 (*p)->length = length;
1196 list_add_tail(&(*p)->l, &buffer->demux_list);
1202 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1203 struct iio_buffer *buffer)
1205 const struct iio_chan_spec *ch;
1206 int ret, in_ind = -1, out_ind, length;
1207 unsigned in_loc = 0, out_loc = 0;
1208 struct iio_demux_table *p = NULL;
1210 /* Clear out any old demux */
1211 iio_buffer_demux_free(buffer);
1212 kfree(buffer->demux_bounce);
1213 buffer->demux_bounce = NULL;
1215 /* First work out which scan mode we will actually have */
1216 if (bitmap_equal(indio_dev->active_scan_mask,
1218 indio_dev->masklength))
1221 /* Now we have the two masks, work from least sig and build up sizes */
1222 for_each_set_bit(out_ind,
1224 indio_dev->masklength) {
1225 in_ind = find_next_bit(indio_dev->active_scan_mask,
1226 indio_dev->masklength,
1228 while (in_ind != out_ind) {
1229 in_ind = find_next_bit(indio_dev->active_scan_mask,
1230 indio_dev->masklength,
1232 ch = iio_find_channel_from_si(indio_dev, in_ind);
1233 if (ch->scan_type.repeat > 1)
1234 length = ch->scan_type.storagebits / 8 *
1235 ch->scan_type.repeat;
1237 length = ch->scan_type.storagebits / 8;
1238 /* Make sure we are aligned */
1239 in_loc = roundup(in_loc, length) + length;
1241 ch = iio_find_channel_from_si(indio_dev, in_ind);
1242 if (ch->scan_type.repeat > 1)
1243 length = ch->scan_type.storagebits / 8 *
1244 ch->scan_type.repeat;
1246 length = ch->scan_type.storagebits / 8;
1247 out_loc = roundup(out_loc, length);
1248 in_loc = roundup(in_loc, length);
1249 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1251 goto error_clear_mux_table;
1255 /* Relies on scan_timestamp being last */
1256 if (buffer->scan_timestamp) {
1257 ch = iio_find_channel_from_si(indio_dev,
1258 indio_dev->scan_index_timestamp);
1259 if (ch->scan_type.repeat > 1)
1260 length = ch->scan_type.storagebits / 8 *
1261 ch->scan_type.repeat;
1263 length = ch->scan_type.storagebits / 8;
1264 out_loc = roundup(out_loc, length);
1265 in_loc = roundup(in_loc, length);
1266 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1268 goto error_clear_mux_table;
1272 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1273 if (buffer->demux_bounce == NULL) {
1275 goto error_clear_mux_table;
1279 error_clear_mux_table:
1280 iio_buffer_demux_free(buffer);
1285 int iio_update_demux(struct iio_dev *indio_dev)
1287 struct iio_buffer *buffer;
1290 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1291 ret = iio_buffer_update_demux(indio_dev, buffer);
1293 goto error_clear_mux_table;
1297 error_clear_mux_table:
1298 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1299 iio_buffer_demux_free(buffer);
1303 EXPORT_SYMBOL_GPL(iio_update_demux);
1306 * iio_buffer_release() - Free a buffer's resources
1307 * @ref: Pointer to the kref embedded in the iio_buffer struct
1309 * This function is called when the last reference to the buffer has been
1310 * dropped. It will typically free all resources allocated by the buffer. Do not
1311 * call this function manually, always use iio_buffer_put() when done using a
1314 static void iio_buffer_release(struct kref *ref)
1316 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1318 buffer->access->release(buffer);
1322 * iio_buffer_get() - Grab a reference to the buffer
1323 * @buffer: The buffer to grab a reference for, may be NULL
1325 * Returns the pointer to the buffer that was passed into the function.
1327 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1330 kref_get(&buffer->ref);
1334 EXPORT_SYMBOL_GPL(iio_buffer_get);
1337 * iio_buffer_put() - Release the reference to the buffer
1338 * @buffer: The buffer to release the reference for, may be NULL
1340 void iio_buffer_put(struct iio_buffer *buffer)
1343 kref_put(&buffer->ref, iio_buffer_release);
1345 EXPORT_SYMBOL_GPL(iio_buffer_put);