1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
24 #include <linux/iio/iio.h>
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
29 static const char * const iio_endian_prefix[] = {
34 static bool iio_buffer_is_active(struct iio_dev *indio_dev,
35 struct iio_buffer *buf)
39 list_for_each(p, &indio_dev->buffer_list)
40 if (p == &buf->buffer_list)
47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
49 * This function relies on all buffer implementations having an
50 * iio_buffer as their first element.
52 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
53 size_t n, loff_t *f_ps)
55 struct iio_dev *indio_dev = filp->private_data;
56 struct iio_buffer *rb = indio_dev->buffer;
58 if (!rb || !rb->access->read_first_n)
60 return rb->access->read_first_n(rb, n, buf);
64 * iio_buffer_poll() - poll the buffer to find out if it has data
66 unsigned int iio_buffer_poll(struct file *filp,
67 struct poll_table_struct *wait)
69 struct iio_dev *indio_dev = filp->private_data;
70 struct iio_buffer *rb = indio_dev->buffer;
72 poll_wait(filp, &rb->pollq, wait);
74 return POLLIN | POLLRDNORM;
75 /* need a way of knowing if there may be enough data... */
79 void iio_buffer_init(struct iio_buffer *buffer)
81 INIT_LIST_HEAD(&buffer->demux_list);
82 init_waitqueue_head(&buffer->pollq);
84 EXPORT_SYMBOL(iio_buffer_init);
86 static ssize_t iio_show_scan_index(struct device *dev,
87 struct device_attribute *attr,
90 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
93 static ssize_t iio_show_fixed_type(struct device *dev,
94 struct device_attribute *attr,
97 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
98 u8 type = this_attr->c->scan_type.endianness;
100 if (type == IIO_CPU) {
101 #ifdef __LITTLE_ENDIAN
107 return sprintf(buf, "%s:%c%d/%d>>%u\n",
108 iio_endian_prefix[type],
109 this_attr->c->scan_type.sign,
110 this_attr->c->scan_type.realbits,
111 this_attr->c->scan_type.storagebits,
112 this_attr->c->scan_type.shift);
115 static ssize_t iio_scan_el_show(struct device *dev,
116 struct device_attribute *attr,
120 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
122 ret = test_bit(to_iio_dev_attr(attr)->address,
123 indio_dev->buffer->scan_mask);
125 return sprintf(buf, "%d\n", ret);
128 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
130 clear_bit(bit, buffer->scan_mask);
134 static ssize_t iio_scan_el_store(struct device *dev,
135 struct device_attribute *attr,
141 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
142 struct iio_buffer *buffer = indio_dev->buffer;
143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
145 ret = strtobool(buf, &state);
148 mutex_lock(&indio_dev->mlock);
149 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
153 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
157 ret = iio_scan_mask_clear(buffer, this_attr->address);
160 } else if (state && !ret) {
161 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
167 mutex_unlock(&indio_dev->mlock);
169 return ret < 0 ? ret : len;
173 static ssize_t iio_scan_el_ts_show(struct device *dev,
174 struct device_attribute *attr,
177 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
178 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
181 static ssize_t iio_scan_el_ts_store(struct device *dev,
182 struct device_attribute *attr,
187 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
190 ret = strtobool(buf, &state);
194 mutex_lock(&indio_dev->mlock);
195 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
199 indio_dev->buffer->scan_timestamp = state;
201 mutex_unlock(&indio_dev->mlock);
203 return ret ? ret : len;
206 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
207 const struct iio_chan_spec *chan)
209 int ret, attrcount = 0;
210 struct iio_buffer *buffer = indio_dev->buffer;
212 ret = __iio_add_chan_devattr("index",
214 &iio_show_scan_index,
219 &buffer->scan_el_dev_attr_list);
223 ret = __iio_add_chan_devattr("type",
225 &iio_show_fixed_type,
230 &buffer->scan_el_dev_attr_list);
234 if (chan->type != IIO_TIMESTAMP)
235 ret = __iio_add_chan_devattr("en",
242 &buffer->scan_el_dev_attr_list);
244 ret = __iio_add_chan_devattr("en",
246 &iio_scan_el_ts_show,
247 &iio_scan_el_ts_store,
251 &buffer->scan_el_dev_attr_list);
258 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
259 struct iio_dev_attr *p)
261 kfree(p->dev_attr.attr.name);
265 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
267 struct iio_dev_attr *p, *n;
268 struct iio_buffer *buffer = indio_dev->buffer;
270 list_for_each_entry_safe(p, n,
271 &buffer->scan_el_dev_attr_list, l)
272 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
275 static const char * const iio_scan_elements_group_name = "scan_elements";
277 int iio_buffer_register(struct iio_dev *indio_dev,
278 const struct iio_chan_spec *channels,
281 struct iio_dev_attr *p;
282 struct attribute **attr;
283 struct iio_buffer *buffer = indio_dev->buffer;
284 int ret, i, attrn, attrcount, attrcount_orig = 0;
287 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
289 if (buffer->scan_el_attrs != NULL) {
290 attr = buffer->scan_el_attrs->attrs;
291 while (*attr++ != NULL)
294 attrcount = attrcount_orig;
295 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
298 for (i = 0; i < num_channels; i++) {
299 if (channels[i].scan_index < 0)
302 /* Establish necessary mask length */
303 if (channels[i].scan_index >
304 (int)indio_dev->masklength - 1)
305 indio_dev->masklength
306 = channels[i].scan_index + 1;
308 ret = iio_buffer_add_channel_sysfs(indio_dev,
311 goto error_cleanup_dynamic;
313 if (channels[i].type == IIO_TIMESTAMP)
314 indio_dev->scan_index_timestamp =
315 channels[i].scan_index;
317 if (indio_dev->masklength && buffer->scan_mask == NULL) {
318 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
319 sizeof(*buffer->scan_mask),
321 if (buffer->scan_mask == NULL) {
323 goto error_cleanup_dynamic;
328 buffer->scan_el_group.name = iio_scan_elements_group_name;
330 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
331 sizeof(buffer->scan_el_group.attrs[0]),
333 if (buffer->scan_el_group.attrs == NULL) {
335 goto error_free_scan_mask;
337 if (buffer->scan_el_attrs)
338 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
339 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
340 attrn = attrcount_orig;
342 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
343 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
344 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
348 error_free_scan_mask:
349 kfree(buffer->scan_mask);
350 error_cleanup_dynamic:
351 __iio_buffer_attr_cleanup(indio_dev);
355 EXPORT_SYMBOL(iio_buffer_register);
357 void iio_buffer_unregister(struct iio_dev *indio_dev)
359 kfree(indio_dev->buffer->scan_mask);
360 kfree(indio_dev->buffer->scan_el_group.attrs);
361 __iio_buffer_attr_cleanup(indio_dev);
363 EXPORT_SYMBOL(iio_buffer_unregister);
365 ssize_t iio_buffer_read_length(struct device *dev,
366 struct device_attribute *attr,
369 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
370 struct iio_buffer *buffer = indio_dev->buffer;
372 if (buffer->access->get_length)
373 return sprintf(buf, "%d\n",
374 buffer->access->get_length(buffer));
378 EXPORT_SYMBOL(iio_buffer_read_length);
380 ssize_t iio_buffer_write_length(struct device *dev,
381 struct device_attribute *attr,
385 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
386 struct iio_buffer *buffer = indio_dev->buffer;
390 ret = kstrtouint(buf, 10, &val);
394 if (buffer->access->get_length)
395 if (val == buffer->access->get_length(buffer))
398 mutex_lock(&indio_dev->mlock);
399 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
402 if (buffer->access->set_length)
403 buffer->access->set_length(buffer, val);
406 mutex_unlock(&indio_dev->mlock);
408 return ret ? ret : len;
410 EXPORT_SYMBOL(iio_buffer_write_length);
412 ssize_t iio_buffer_show_enable(struct device *dev,
413 struct device_attribute *attr,
416 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
417 return sprintf(buf, "%d\n",
418 iio_buffer_is_active(indio_dev,
421 EXPORT_SYMBOL(iio_buffer_show_enable);
423 /* note NULL used as error indicator as it doesn't make sense. */
424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
425 unsigned int masklength,
426 const unsigned long *mask)
428 if (bitmap_empty(mask, masklength))
431 if (bitmap_subset(mask, av_masks, masklength))
433 av_masks += BITS_TO_LONGS(masklength);
438 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
441 const struct iio_chan_spec *ch;
445 /* How much space will the demuxed element take? */
446 for_each_set_bit(i, mask,
447 indio_dev->masklength) {
448 ch = iio_find_channel_from_si(indio_dev, i);
449 length = ch->scan_type.storagebits / 8;
450 bytes = ALIGN(bytes, length);
454 ch = iio_find_channel_from_si(indio_dev,
455 indio_dev->scan_index_timestamp);
456 length = ch->scan_type.storagebits / 8;
457 bytes = ALIGN(bytes, length);
463 void iio_disable_all_buffers(struct iio_dev *indio_dev)
465 struct iio_buffer *buffer, *_buffer;
467 if (list_empty(&indio_dev->buffer_list))
470 if (indio_dev->setup_ops->predisable)
471 indio_dev->setup_ops->predisable(indio_dev);
473 list_for_each_entry_safe(buffer, _buffer,
474 &indio_dev->buffer_list, buffer_list)
475 list_del_init(&buffer->buffer_list);
477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev);
481 if (indio_dev->available_scan_masks == NULL)
482 kfree(indio_dev->active_scan_mask);
485 int iio_update_buffers(struct iio_dev *indio_dev,
486 struct iio_buffer *insert_buffer,
487 struct iio_buffer *remove_buffer)
491 struct iio_buffer *buffer;
492 unsigned long *compound_mask;
493 const unsigned long *old_mask;
495 /* Wind down existing buffers - iff there are any */
496 if (!list_empty(&indio_dev->buffer_list)) {
497 if (indio_dev->setup_ops->predisable) {
498 ret = indio_dev->setup_ops->predisable(indio_dev);
502 indio_dev->currentmode = INDIO_DIRECT_MODE;
503 if (indio_dev->setup_ops->postdisable) {
504 ret = indio_dev->setup_ops->postdisable(indio_dev);
509 /* Keep a copy of current setup to allow roll back */
510 old_mask = indio_dev->active_scan_mask;
511 if (!indio_dev->available_scan_masks)
512 indio_dev->active_scan_mask = NULL;
515 list_del(&remove_buffer->buffer_list);
517 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
519 /* If no buffers in list, we are done */
520 if (list_empty(&indio_dev->buffer_list)) {
521 indio_dev->currentmode = INDIO_DIRECT_MODE;
522 if (indio_dev->available_scan_masks == NULL)
527 /* What scan mask do we actually have ?*/
528 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
529 sizeof(long), GFP_KERNEL);
530 if (compound_mask == NULL) {
531 if (indio_dev->available_scan_masks == NULL)
535 indio_dev->scan_timestamp = 0;
537 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
538 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
539 indio_dev->masklength);
540 indio_dev->scan_timestamp |= buffer->scan_timestamp;
542 if (indio_dev->available_scan_masks) {
543 indio_dev->active_scan_mask =
544 iio_scan_mask_match(indio_dev->available_scan_masks,
545 indio_dev->masklength,
547 if (indio_dev->active_scan_mask == NULL) {
550 * Note can only occur when adding a buffer.
552 list_del(&insert_buffer->buffer_list);
554 indio_dev->active_scan_mask = old_mask;
558 kfree(compound_mask);
564 indio_dev->active_scan_mask = compound_mask;
567 iio_update_demux(indio_dev);
570 if (indio_dev->setup_ops->preenable) {
571 ret = indio_dev->setup_ops->preenable(indio_dev);
574 "Buffer not started: buffer preenable failed (%d)\n", ret);
575 goto error_remove_inserted;
578 indio_dev->scan_bytes =
579 iio_compute_scan_bytes(indio_dev,
580 indio_dev->active_scan_mask,
581 indio_dev->scan_timestamp);
582 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
583 if (buffer->access->request_update) {
584 ret = buffer->access->request_update(buffer);
587 "Buffer not started: buffer parameter update failed (%d)\n", ret);
588 goto error_run_postdisable;
591 if (indio_dev->info->update_scan_mode) {
592 ret = indio_dev->info
593 ->update_scan_mode(indio_dev,
594 indio_dev->active_scan_mask);
596 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
597 goto error_run_postdisable;
600 /* Definitely possible for devices to support both of these.*/
601 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
602 if (!indio_dev->trig) {
603 printk(KERN_INFO "Buffer not started: no trigger\n");
605 /* Can only occur on first buffer */
606 goto error_run_postdisable;
608 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
609 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
610 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
611 } else { /* should never be reached */
613 goto error_run_postdisable;
616 if (indio_dev->setup_ops->postenable) {
617 ret = indio_dev->setup_ops->postenable(indio_dev);
620 "Buffer not started: postenable failed (%d)\n", ret);
621 indio_dev->currentmode = INDIO_DIRECT_MODE;
622 if (indio_dev->setup_ops->postdisable)
623 indio_dev->setup_ops->postdisable(indio_dev);
624 goto error_disable_all_buffers;
628 if (indio_dev->available_scan_masks)
629 kfree(compound_mask);
635 error_disable_all_buffers:
636 indio_dev->currentmode = INDIO_DIRECT_MODE;
637 error_run_postdisable:
638 if (indio_dev->setup_ops->postdisable)
639 indio_dev->setup_ops->postdisable(indio_dev);
640 error_remove_inserted:
643 list_del(&insert_buffer->buffer_list);
644 indio_dev->active_scan_mask = old_mask;
645 kfree(compound_mask);
650 EXPORT_SYMBOL_GPL(iio_update_buffers);
652 ssize_t iio_buffer_store_enable(struct device *dev,
653 struct device_attribute *attr,
658 bool requested_state;
659 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
660 struct iio_buffer *pbuf = indio_dev->buffer;
663 ret = strtobool(buf, &requested_state);
667 mutex_lock(&indio_dev->mlock);
669 /* Find out if it is in the list */
670 inlist = iio_buffer_is_active(indio_dev, pbuf);
671 /* Already in desired state */
672 if (inlist == requested_state)
676 ret = iio_update_buffers(indio_dev,
677 indio_dev->buffer, NULL);
679 ret = iio_update_buffers(indio_dev,
680 NULL, indio_dev->buffer);
685 mutex_unlock(&indio_dev->mlock);
686 return (ret < 0) ? ret : len;
688 EXPORT_SYMBOL(iio_buffer_store_enable);
690 int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
692 struct iio_buffer *buffer;
694 dev_dbg(&indio_dev->dev, "%s\n", __func__);
696 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
697 if (buffer->access->set_bytes_per_datum) {
698 bytes = iio_compute_scan_bytes(indio_dev,
700 buffer->scan_timestamp);
702 buffer->access->set_bytes_per_datum(buffer, bytes);
706 EXPORT_SYMBOL(iio_sw_buffer_preenable);
709 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
710 * @indio_dev: the iio device
711 * @mask: scan mask to be checked
713 * Return true if exactly one bit is set in the scan mask, false otherwise. It
714 * can be used for devices where only one channel can be active for sampling at
717 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
718 const unsigned long *mask)
720 return bitmap_weight(mask, indio_dev->masklength) == 1;
722 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
724 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
725 const unsigned long *mask)
727 if (!indio_dev->setup_ops->validate_scan_mask)
730 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
734 * iio_scan_mask_set() - set particular bit in the scan mask
735 * @buffer: the buffer whose scan mask we are interested in
736 * @bit: the bit to be set.
738 * Note that at this point we have no way of knowing what other
739 * buffers might request, hence this code only verifies that the
740 * individual buffers request is plausible.
742 int iio_scan_mask_set(struct iio_dev *indio_dev,
743 struct iio_buffer *buffer, int bit)
745 const unsigned long *mask;
746 unsigned long *trialmask;
748 trialmask = kmalloc(sizeof(*trialmask)*
749 BITS_TO_LONGS(indio_dev->masklength),
752 if (trialmask == NULL)
754 if (!indio_dev->masklength) {
755 WARN_ON("trying to set scanmask prior to registering buffer\n");
756 goto err_invalid_mask;
758 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
759 set_bit(bit, trialmask);
761 if (!iio_validate_scan_mask(indio_dev, trialmask))
762 goto err_invalid_mask;
764 if (indio_dev->available_scan_masks) {
765 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
766 indio_dev->masklength,
769 goto err_invalid_mask;
771 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
781 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
783 int iio_scan_mask_query(struct iio_dev *indio_dev,
784 struct iio_buffer *buffer, int bit)
786 if (bit > indio_dev->masklength)
789 if (!buffer->scan_mask)
792 return test_bit(bit, buffer->scan_mask);
794 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
797 * struct iio_demux_table() - table describing demux memcpy ops
798 * @from: index to copy from
799 * @to: index to copy to
800 * @length: how many bytes to copy
801 * @l: list head used for management
803 struct iio_demux_table {
810 static unsigned char *iio_demux(struct iio_buffer *buffer,
811 unsigned char *datain)
813 struct iio_demux_table *t;
815 if (list_empty(&buffer->demux_list))
817 list_for_each_entry(t, &buffer->demux_list, l)
818 memcpy(buffer->demux_bounce + t->to,
819 datain + t->from, t->length);
821 return buffer->demux_bounce;
824 static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
826 unsigned char *dataout = iio_demux(buffer, data);
828 return buffer->access->store_to(buffer, dataout);
831 static void iio_buffer_demux_free(struct iio_buffer *buffer)
833 struct iio_demux_table *p, *q;
834 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
841 int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
844 struct iio_buffer *buf;
846 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
847 ret = iio_push_to_buffer(buf, data);
854 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
856 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
857 struct iio_buffer *buffer)
859 const struct iio_chan_spec *ch;
860 int ret, in_ind = -1, out_ind, length;
861 unsigned in_loc = 0, out_loc = 0;
862 struct iio_demux_table *p;
864 /* Clear out any old demux */
865 iio_buffer_demux_free(buffer);
866 kfree(buffer->demux_bounce);
867 buffer->demux_bounce = NULL;
869 /* First work out which scan mode we will actually have */
870 if (bitmap_equal(indio_dev->active_scan_mask,
872 indio_dev->masklength))
875 /* Now we have the two masks, work from least sig and build up sizes */
876 for_each_set_bit(out_ind,
877 indio_dev->active_scan_mask,
878 indio_dev->masklength) {
879 in_ind = find_next_bit(indio_dev->active_scan_mask,
880 indio_dev->masklength,
882 while (in_ind != out_ind) {
883 in_ind = find_next_bit(indio_dev->active_scan_mask,
884 indio_dev->masklength,
886 ch = iio_find_channel_from_si(indio_dev, in_ind);
887 length = ch->scan_type.storagebits/8;
888 /* Make sure we are aligned */
891 in_loc += length - in_loc % length;
893 p = kmalloc(sizeof(*p), GFP_KERNEL);
896 goto error_clear_mux_table;
898 ch = iio_find_channel_from_si(indio_dev, in_ind);
899 length = ch->scan_type.storagebits/8;
900 if (out_loc % length)
901 out_loc += length - out_loc % length;
903 in_loc += length - in_loc % length;
907 list_add_tail(&p->l, &buffer->demux_list);
911 /* Relies on scan_timestamp being last */
912 if (buffer->scan_timestamp) {
913 p = kmalloc(sizeof(*p), GFP_KERNEL);
916 goto error_clear_mux_table;
918 ch = iio_find_channel_from_si(indio_dev,
919 indio_dev->scan_index_timestamp);
920 length = ch->scan_type.storagebits/8;
921 if (out_loc % length)
922 out_loc += length - out_loc % length;
924 in_loc += length - in_loc % length;
928 list_add_tail(&p->l, &buffer->demux_list);
932 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
933 if (buffer->demux_bounce == NULL) {
935 goto error_clear_mux_table;
939 error_clear_mux_table:
940 iio_buffer_demux_free(buffer);
945 int iio_update_demux(struct iio_dev *indio_dev)
947 struct iio_buffer *buffer;
950 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
951 ret = iio_buffer_update_demux(indio_dev, buffer);
953 goto error_clear_mux_table;
957 error_clear_mux_table:
958 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
959 iio_buffer_demux_free(buffer);
963 EXPORT_SYMBOL_GPL(iio_update_demux);