1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix[] = {
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list);
40 static bool iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf);
46 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
48 * This function relies on all buffer implementations having an
49 * iio_buffer as their first element.
51 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
52 size_t n, loff_t *f_ps)
54 struct iio_dev *indio_dev = filp->private_data;
55 struct iio_buffer *rb = indio_dev->buffer;
61 if (!rb || !rb->access->read_first_n)
65 if (!iio_buffer_data_available(rb)) {
66 if (filp->f_flags & O_NONBLOCK)
69 ret = wait_event_interruptible(rb->pollq,
70 iio_buffer_data_available(rb) ||
71 indio_dev->info == NULL);
74 if (indio_dev->info == NULL)
78 ret = rb->access->read_first_n(rb, n, buf);
79 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
87 * iio_buffer_poll() - poll the buffer to find out if it has data
89 unsigned int iio_buffer_poll(struct file *filp,
90 struct poll_table_struct *wait)
92 struct iio_dev *indio_dev = filp->private_data;
93 struct iio_buffer *rb = indio_dev->buffer;
98 poll_wait(filp, &rb->pollq, wait);
99 if (iio_buffer_data_available(rb))
100 return POLLIN | POLLRDNORM;
101 /* need a way of knowing if there may be enough data... */
106 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
107 * @indio_dev: The IIO device
109 * Wakes up the event waitqueue used for poll(). Should usually
110 * be called when the device is unregistered.
112 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
114 if (!indio_dev->buffer)
117 wake_up(&indio_dev->buffer->pollq);
120 void iio_buffer_init(struct iio_buffer *buffer)
122 INIT_LIST_HEAD(&buffer->demux_list);
123 INIT_LIST_HEAD(&buffer->buffer_list);
124 init_waitqueue_head(&buffer->pollq);
125 kref_init(&buffer->ref);
127 EXPORT_SYMBOL(iio_buffer_init);
129 static ssize_t iio_show_scan_index(struct device *dev,
130 struct device_attribute *attr,
133 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
136 static ssize_t iio_show_fixed_type(struct device *dev,
137 struct device_attribute *attr,
140 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
141 u8 type = this_attr->c->scan_type.endianness;
143 if (type == IIO_CPU) {
144 #ifdef __LITTLE_ENDIAN
150 if (this_attr->c->scan_type.repeat > 1)
151 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
152 iio_endian_prefix[type],
153 this_attr->c->scan_type.sign,
154 this_attr->c->scan_type.realbits,
155 this_attr->c->scan_type.storagebits,
156 this_attr->c->scan_type.repeat,
157 this_attr->c->scan_type.shift);
159 return sprintf(buf, "%s:%c%d/%d>>%u\n",
160 iio_endian_prefix[type],
161 this_attr->c->scan_type.sign,
162 this_attr->c->scan_type.realbits,
163 this_attr->c->scan_type.storagebits,
164 this_attr->c->scan_type.shift);
167 static ssize_t iio_scan_el_show(struct device *dev,
168 struct device_attribute *attr,
172 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
174 /* Ensure ret is 0 or 1. */
175 ret = !!test_bit(to_iio_dev_attr(attr)->address,
176 indio_dev->buffer->scan_mask);
178 return sprintf(buf, "%d\n", ret);
181 /* Note NULL used as error indicator as it doesn't make sense. */
182 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
183 unsigned int masklength,
184 const unsigned long *mask)
186 if (bitmap_empty(mask, masklength))
189 if (bitmap_subset(mask, av_masks, masklength))
191 av_masks += BITS_TO_LONGS(masklength);
196 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
197 const unsigned long *mask)
199 if (!indio_dev->setup_ops->validate_scan_mask)
202 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
206 * iio_scan_mask_set() - set particular bit in the scan mask
207 * @indio_dev: the iio device
208 * @buffer: the buffer whose scan mask we are interested in
209 * @bit: the bit to be set.
211 * Note that at this point we have no way of knowing what other
212 * buffers might request, hence this code only verifies that the
213 * individual buffers request is plausible.
215 static int iio_scan_mask_set(struct iio_dev *indio_dev,
216 struct iio_buffer *buffer, int bit)
218 const unsigned long *mask;
219 unsigned long *trialmask;
221 trialmask = kmalloc(sizeof(*trialmask)*
222 BITS_TO_LONGS(indio_dev->masklength),
225 if (trialmask == NULL)
227 if (!indio_dev->masklength) {
228 WARN_ON("Trying to set scanmask prior to registering buffer\n");
229 goto err_invalid_mask;
231 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
232 set_bit(bit, trialmask);
234 if (!iio_validate_scan_mask(indio_dev, trialmask))
235 goto err_invalid_mask;
237 if (indio_dev->available_scan_masks) {
238 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
239 indio_dev->masklength,
242 goto err_invalid_mask;
244 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
255 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
257 clear_bit(bit, buffer->scan_mask);
261 static ssize_t iio_scan_el_store(struct device *dev,
262 struct device_attribute *attr,
268 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269 struct iio_buffer *buffer = indio_dev->buffer;
270 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
272 ret = strtobool(buf, &state);
275 mutex_lock(&indio_dev->mlock);
276 if (iio_buffer_is_active(indio_dev->buffer)) {
280 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
284 ret = iio_scan_mask_clear(buffer, this_attr->address);
287 } else if (state && !ret) {
288 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
294 mutex_unlock(&indio_dev->mlock);
296 return ret < 0 ? ret : len;
300 static ssize_t iio_scan_el_ts_show(struct device *dev,
301 struct device_attribute *attr,
304 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
305 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
308 static ssize_t iio_scan_el_ts_store(struct device *dev,
309 struct device_attribute *attr,
314 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
317 ret = strtobool(buf, &state);
321 mutex_lock(&indio_dev->mlock);
322 if (iio_buffer_is_active(indio_dev->buffer)) {
326 indio_dev->buffer->scan_timestamp = state;
328 mutex_unlock(&indio_dev->mlock);
330 return ret ? ret : len;
333 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
334 const struct iio_chan_spec *chan)
336 int ret, attrcount = 0;
337 struct iio_buffer *buffer = indio_dev->buffer;
339 ret = __iio_add_chan_devattr("index",
341 &iio_show_scan_index,
346 &buffer->scan_el_dev_attr_list);
350 ret = __iio_add_chan_devattr("type",
352 &iio_show_fixed_type,
357 &buffer->scan_el_dev_attr_list);
361 if (chan->type != IIO_TIMESTAMP)
362 ret = __iio_add_chan_devattr("en",
369 &buffer->scan_el_dev_attr_list);
371 ret = __iio_add_chan_devattr("en",
373 &iio_scan_el_ts_show,
374 &iio_scan_el_ts_store,
378 &buffer->scan_el_dev_attr_list);
386 static ssize_t iio_buffer_read_length(struct device *dev,
387 struct device_attribute *attr,
390 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
391 struct iio_buffer *buffer = indio_dev->buffer;
393 return sprintf(buf, "%d\n", buffer->length);
396 static ssize_t iio_buffer_write_length(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf, size_t len)
400 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
401 struct iio_buffer *buffer = indio_dev->buffer;
405 ret = kstrtouint(buf, 10, &val);
409 if (val == buffer->length)
412 mutex_lock(&indio_dev->mlock);
413 if (iio_buffer_is_active(indio_dev->buffer)) {
416 buffer->access->set_length(buffer, val);
419 mutex_unlock(&indio_dev->mlock);
421 return ret ? ret : len;
424 static ssize_t iio_buffer_show_enable(struct device *dev,
425 struct device_attribute *attr,
428 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
429 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
432 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
433 const unsigned long *mask, bool timestamp)
435 const struct iio_chan_spec *ch;
439 /* How much space will the demuxed element take? */
440 for_each_set_bit(i, mask,
441 indio_dev->masklength) {
442 ch = iio_find_channel_from_si(indio_dev, i);
443 if (ch->scan_type.repeat > 1)
444 length = ch->scan_type.storagebits / 8 *
445 ch->scan_type.repeat;
447 length = ch->scan_type.storagebits / 8;
448 bytes = ALIGN(bytes, length);
452 ch = iio_find_channel_from_si(indio_dev,
453 indio_dev->scan_index_timestamp);
454 if (ch->scan_type.repeat > 1)
455 length = ch->scan_type.storagebits / 8 *
456 ch->scan_type.repeat;
458 length = ch->scan_type.storagebits / 8;
459 bytes = ALIGN(bytes, length);
465 static void iio_buffer_activate(struct iio_dev *indio_dev,
466 struct iio_buffer *buffer)
468 iio_buffer_get(buffer);
469 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
472 static void iio_buffer_deactivate(struct iio_buffer *buffer)
474 list_del_init(&buffer->buffer_list);
475 iio_buffer_put(buffer);
478 void iio_disable_all_buffers(struct iio_dev *indio_dev)
480 struct iio_buffer *buffer, *_buffer;
482 if (list_empty(&indio_dev->buffer_list))
485 if (indio_dev->setup_ops->predisable)
486 indio_dev->setup_ops->predisable(indio_dev);
488 list_for_each_entry_safe(buffer, _buffer,
489 &indio_dev->buffer_list, buffer_list)
490 iio_buffer_deactivate(buffer);
492 indio_dev->currentmode = INDIO_DIRECT_MODE;
493 if (indio_dev->setup_ops->postdisable)
494 indio_dev->setup_ops->postdisable(indio_dev);
496 if (indio_dev->available_scan_masks == NULL)
497 kfree(indio_dev->active_scan_mask);
500 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
501 struct iio_buffer *buffer)
505 if (!buffer->access->set_bytes_per_datum)
508 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
509 buffer->scan_timestamp);
511 buffer->access->set_bytes_per_datum(buffer, bytes);
514 static int __iio_update_buffers(struct iio_dev *indio_dev,
515 struct iio_buffer *insert_buffer,
516 struct iio_buffer *remove_buffer)
520 struct iio_buffer *buffer;
521 unsigned long *compound_mask;
522 const unsigned long *old_mask;
524 /* Wind down existing buffers - iff there are any */
525 if (!list_empty(&indio_dev->buffer_list)) {
526 if (indio_dev->setup_ops->predisable) {
527 ret = indio_dev->setup_ops->predisable(indio_dev);
531 indio_dev->currentmode = INDIO_DIRECT_MODE;
532 if (indio_dev->setup_ops->postdisable) {
533 ret = indio_dev->setup_ops->postdisable(indio_dev);
538 /* Keep a copy of current setup to allow roll back */
539 old_mask = indio_dev->active_scan_mask;
540 if (!indio_dev->available_scan_masks)
541 indio_dev->active_scan_mask = NULL;
544 iio_buffer_deactivate(remove_buffer);
546 iio_buffer_activate(indio_dev, insert_buffer);
548 /* If no buffers in list, we are done */
549 if (list_empty(&indio_dev->buffer_list)) {
550 indio_dev->currentmode = INDIO_DIRECT_MODE;
551 if (indio_dev->available_scan_masks == NULL)
556 /* What scan mask do we actually have? */
557 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
558 sizeof(long), GFP_KERNEL);
559 if (compound_mask == NULL) {
560 if (indio_dev->available_scan_masks == NULL)
564 indio_dev->scan_timestamp = 0;
566 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
567 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
568 indio_dev->masklength);
569 indio_dev->scan_timestamp |= buffer->scan_timestamp;
571 if (indio_dev->available_scan_masks) {
572 indio_dev->active_scan_mask =
573 iio_scan_mask_match(indio_dev->available_scan_masks,
574 indio_dev->masklength,
576 if (indio_dev->active_scan_mask == NULL) {
579 * Note can only occur when adding a buffer.
581 iio_buffer_deactivate(insert_buffer);
583 indio_dev->active_scan_mask = old_mask;
587 kfree(compound_mask);
593 indio_dev->active_scan_mask = compound_mask;
596 iio_update_demux(indio_dev);
599 if (indio_dev->setup_ops->preenable) {
600 ret = indio_dev->setup_ops->preenable(indio_dev);
603 "Buffer not started: buffer preenable failed (%d)\n", ret);
604 goto error_remove_inserted;
607 indio_dev->scan_bytes =
608 iio_compute_scan_bytes(indio_dev,
609 indio_dev->active_scan_mask,
610 indio_dev->scan_timestamp);
611 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
612 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
613 if (buffer->access->request_update) {
614 ret = buffer->access->request_update(buffer);
617 "Buffer not started: buffer parameter update failed (%d)\n", ret);
618 goto error_run_postdisable;
622 if (indio_dev->info->update_scan_mode) {
623 ret = indio_dev->info
624 ->update_scan_mode(indio_dev,
625 indio_dev->active_scan_mask);
627 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
628 goto error_run_postdisable;
631 /* Definitely possible for devices to support both of these. */
632 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
633 if (!indio_dev->trig) {
634 printk(KERN_INFO "Buffer not started: no trigger\n");
636 /* Can only occur on first buffer */
637 goto error_run_postdisable;
639 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
640 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
641 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
642 } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
643 indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
644 } else { /* Should never be reached */
646 goto error_run_postdisable;
649 if (indio_dev->setup_ops->postenable) {
650 ret = indio_dev->setup_ops->postenable(indio_dev);
653 "Buffer not started: postenable failed (%d)\n", ret);
654 indio_dev->currentmode = INDIO_DIRECT_MODE;
655 if (indio_dev->setup_ops->postdisable)
656 indio_dev->setup_ops->postdisable(indio_dev);
657 goto error_disable_all_buffers;
661 if (indio_dev->available_scan_masks)
662 kfree(compound_mask);
668 error_disable_all_buffers:
669 indio_dev->currentmode = INDIO_DIRECT_MODE;
670 error_run_postdisable:
671 if (indio_dev->setup_ops->postdisable)
672 indio_dev->setup_ops->postdisable(indio_dev);
673 error_remove_inserted:
675 iio_buffer_deactivate(insert_buffer);
676 indio_dev->active_scan_mask = old_mask;
677 kfree(compound_mask);
681 int iio_update_buffers(struct iio_dev *indio_dev,
682 struct iio_buffer *insert_buffer,
683 struct iio_buffer *remove_buffer)
687 if (insert_buffer == remove_buffer)
690 mutex_lock(&indio_dev->info_exist_lock);
691 mutex_lock(&indio_dev->mlock);
693 if (insert_buffer && iio_buffer_is_active(insert_buffer))
694 insert_buffer = NULL;
696 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
697 remove_buffer = NULL;
699 if (!insert_buffer && !remove_buffer) {
704 if (indio_dev->info == NULL) {
709 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
712 mutex_unlock(&indio_dev->mlock);
713 mutex_unlock(&indio_dev->info_exist_lock);
717 EXPORT_SYMBOL_GPL(iio_update_buffers);
719 static ssize_t iio_buffer_store_enable(struct device *dev,
720 struct device_attribute *attr,
725 bool requested_state;
726 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
729 ret = strtobool(buf, &requested_state);
733 mutex_lock(&indio_dev->mlock);
735 /* Find out if it is in the list */
736 inlist = iio_buffer_is_active(indio_dev->buffer);
737 /* Already in desired state */
738 if (inlist == requested_state)
742 ret = __iio_update_buffers(indio_dev,
743 indio_dev->buffer, NULL);
745 ret = __iio_update_buffers(indio_dev,
746 NULL, indio_dev->buffer);
751 mutex_unlock(&indio_dev->mlock);
752 return (ret < 0) ? ret : len;
755 static const char * const iio_scan_elements_group_name = "scan_elements";
757 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
758 iio_buffer_write_length);
759 static struct device_attribute dev_attr_length_ro = __ATTR(length,
760 S_IRUGO, iio_buffer_read_length, NULL);
761 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
762 iio_buffer_show_enable, iio_buffer_store_enable);
764 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
766 struct iio_dev_attr *p;
767 struct attribute **attr;
768 struct iio_buffer *buffer = indio_dev->buffer;
769 int ret, i, attrn, attrcount, attrcount_orig = 0;
770 const struct iio_chan_spec *channels;
777 while (buffer->attrs[attrcount] != NULL)
781 buffer->buffer_group.name = "buffer";
782 buffer->buffer_group.attrs = kcalloc(attrcount + 3,
783 sizeof(*buffer->buffer_group.attrs), GFP_KERNEL);
784 if (!buffer->buffer_group.attrs)
787 if (buffer->access->set_length)
788 buffer->buffer_group.attrs[0] = &dev_attr_length.attr;
790 buffer->buffer_group.attrs[0] = &dev_attr_length_ro.attr;
791 buffer->buffer_group.attrs[1] = &dev_attr_enable.attr;
793 memcpy(&buffer->buffer_group.attrs[2], buffer->attrs,
794 sizeof(*&buffer->buffer_group.attrs) * attrcount);
795 buffer->buffer_group.attrs[attrcount+2] = NULL;
797 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
799 if (buffer->scan_el_attrs != NULL) {
800 attr = buffer->scan_el_attrs->attrs;
801 while (*attr++ != NULL)
804 attrcount = attrcount_orig;
805 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
806 channels = indio_dev->channels;
809 for (i = 0; i < indio_dev->num_channels; i++) {
810 if (channels[i].scan_index < 0)
813 /* Establish necessary mask length */
814 if (channels[i].scan_index >
815 (int)indio_dev->masklength - 1)
816 indio_dev->masklength
817 = channels[i].scan_index + 1;
819 ret = iio_buffer_add_channel_sysfs(indio_dev,
822 goto error_cleanup_dynamic;
824 if (channels[i].type == IIO_TIMESTAMP)
825 indio_dev->scan_index_timestamp =
826 channels[i].scan_index;
828 if (indio_dev->masklength && buffer->scan_mask == NULL) {
829 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
830 sizeof(*buffer->scan_mask),
832 if (buffer->scan_mask == NULL) {
834 goto error_cleanup_dynamic;
839 buffer->scan_el_group.name = iio_scan_elements_group_name;
841 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
842 sizeof(buffer->scan_el_group.attrs[0]),
844 if (buffer->scan_el_group.attrs == NULL) {
846 goto error_free_scan_mask;
848 if (buffer->scan_el_attrs)
849 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
850 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
851 attrn = attrcount_orig;
853 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
854 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
855 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
859 error_free_scan_mask:
860 kfree(buffer->scan_mask);
861 error_cleanup_dynamic:
862 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
863 kfree(indio_dev->buffer->buffer_group.attrs);
868 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
870 if (!indio_dev->buffer)
873 kfree(indio_dev->buffer->scan_mask);
874 kfree(indio_dev->buffer->buffer_group.attrs);
875 kfree(indio_dev->buffer->scan_el_group.attrs);
876 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
880 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
881 * @indio_dev: the iio device
882 * @mask: scan mask to be checked
884 * Return true if exactly one bit is set in the scan mask, false otherwise. It
885 * can be used for devices where only one channel can be active for sampling at
888 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
889 const unsigned long *mask)
891 return bitmap_weight(mask, indio_dev->masklength) == 1;
893 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
895 int iio_scan_mask_query(struct iio_dev *indio_dev,
896 struct iio_buffer *buffer, int bit)
898 if (bit > indio_dev->masklength)
901 if (!buffer->scan_mask)
904 /* Ensure return value is 0 or 1. */
905 return !!test_bit(bit, buffer->scan_mask);
907 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
910 * struct iio_demux_table() - table describing demux memcpy ops
911 * @from: index to copy from
912 * @to: index to copy to
913 * @length: how many bytes to copy
914 * @l: list head used for management
916 struct iio_demux_table {
923 static const void *iio_demux(struct iio_buffer *buffer,
926 struct iio_demux_table *t;
928 if (list_empty(&buffer->demux_list))
930 list_for_each_entry(t, &buffer->demux_list, l)
931 memcpy(buffer->demux_bounce + t->to,
932 datain + t->from, t->length);
934 return buffer->demux_bounce;
937 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
939 const void *dataout = iio_demux(buffer, data);
941 return buffer->access->store_to(buffer, dataout);
944 static void iio_buffer_demux_free(struct iio_buffer *buffer)
946 struct iio_demux_table *p, *q;
947 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
954 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
957 struct iio_buffer *buf;
959 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
960 ret = iio_push_to_buffer(buf, data);
967 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
969 static int iio_buffer_add_demux(struct iio_buffer *buffer,
970 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
974 if (*p && (*p)->from + (*p)->length == in_loc &&
975 (*p)->to + (*p)->length == out_loc) {
976 (*p)->length += length;
978 *p = kmalloc(sizeof(**p), GFP_KERNEL);
983 (*p)->length = length;
984 list_add_tail(&(*p)->l, &buffer->demux_list);
990 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
991 struct iio_buffer *buffer)
993 const struct iio_chan_spec *ch;
994 int ret, in_ind = -1, out_ind, length;
995 unsigned in_loc = 0, out_loc = 0;
996 struct iio_demux_table *p = NULL;
998 /* Clear out any old demux */
999 iio_buffer_demux_free(buffer);
1000 kfree(buffer->demux_bounce);
1001 buffer->demux_bounce = NULL;
1003 /* First work out which scan mode we will actually have */
1004 if (bitmap_equal(indio_dev->active_scan_mask,
1006 indio_dev->masklength))
1009 /* Now we have the two masks, work from least sig and build up sizes */
1010 for_each_set_bit(out_ind,
1012 indio_dev->masklength) {
1013 in_ind = find_next_bit(indio_dev->active_scan_mask,
1014 indio_dev->masklength,
1016 while (in_ind != out_ind) {
1017 in_ind = find_next_bit(indio_dev->active_scan_mask,
1018 indio_dev->masklength,
1020 ch = iio_find_channel_from_si(indio_dev, in_ind);
1021 if (ch->scan_type.repeat > 1)
1022 length = ch->scan_type.storagebits / 8 *
1023 ch->scan_type.repeat;
1025 length = ch->scan_type.storagebits / 8;
1026 /* Make sure we are aligned */
1027 in_loc = roundup(in_loc, length) + length;
1029 ch = iio_find_channel_from_si(indio_dev, in_ind);
1030 if (ch->scan_type.repeat > 1)
1031 length = ch->scan_type.storagebits / 8 *
1032 ch->scan_type.repeat;
1034 length = ch->scan_type.storagebits / 8;
1035 out_loc = roundup(out_loc, length);
1036 in_loc = roundup(in_loc, length);
1037 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1039 goto error_clear_mux_table;
1043 /* Relies on scan_timestamp being last */
1044 if (buffer->scan_timestamp) {
1045 ch = iio_find_channel_from_si(indio_dev,
1046 indio_dev->scan_index_timestamp);
1047 if (ch->scan_type.repeat > 1)
1048 length = ch->scan_type.storagebits / 8 *
1049 ch->scan_type.repeat;
1051 length = ch->scan_type.storagebits / 8;
1052 out_loc = roundup(out_loc, length);
1053 in_loc = roundup(in_loc, length);
1054 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1056 goto error_clear_mux_table;
1060 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1061 if (buffer->demux_bounce == NULL) {
1063 goto error_clear_mux_table;
1067 error_clear_mux_table:
1068 iio_buffer_demux_free(buffer);
1073 int iio_update_demux(struct iio_dev *indio_dev)
1075 struct iio_buffer *buffer;
1078 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1079 ret = iio_buffer_update_demux(indio_dev, buffer);
1081 goto error_clear_mux_table;
1085 error_clear_mux_table:
1086 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1087 iio_buffer_demux_free(buffer);
1091 EXPORT_SYMBOL_GPL(iio_update_demux);
1094 * iio_buffer_release() - Free a buffer's resources
1095 * @ref: Pointer to the kref embedded in the iio_buffer struct
1097 * This function is called when the last reference to the buffer has been
1098 * dropped. It will typically free all resources allocated by the buffer. Do not
1099 * call this function manually, always use iio_buffer_put() when done using a
1102 static void iio_buffer_release(struct kref *ref)
1104 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1106 buffer->access->release(buffer);
1110 * iio_buffer_get() - Grab a reference to the buffer
1111 * @buffer: The buffer to grab a reference for, may be NULL
1113 * Returns the pointer to the buffer that was passed into the function.
1115 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1118 kref_get(&buffer->ref);
1122 EXPORT_SYMBOL_GPL(iio_buffer_get);
1125 * iio_buffer_put() - Release the reference to the buffer
1126 * @buffer: The buffer to release the reference for, may be NULL
1128 void iio_buffer_put(struct iio_buffer *buffer)
1131 kref_put(&buffer->ref, iio_buffer_release);
1133 EXPORT_SYMBOL_GPL(iio_buffer_put);