1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
4 * Copyright (c) 2008 Jonathan Cameron
6 * Handling of buffer allocation / resizing.
8 * Things to look at here.
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
31 static const char * const iio_endian_prefix[] = {
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
38 return !list_empty(&buf->buffer_list);
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
43 return buf->access->data_available(buf);
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 struct iio_buffer *buf, size_t required)
49 if (!indio_dev->info->hwfifo_flush_to_buffer)
52 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 size_t to_wait, int to_flush)
61 /* wakeup if the device was unregistered */
65 /* drain the buffer if it was disabled */
66 if (!iio_buffer_is_active(buf)) {
67 to_wait = min_t(size_t, to_wait, 1);
71 avail = iio_buffer_data_available(buf);
73 if (avail >= to_wait) {
74 /* force a flush for non-blocking reads */
75 if (!to_wait && avail < to_flush)
76 iio_buffer_flush_hwfifo(indio_dev, buf,
82 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
87 if (avail + flushed >= to_wait)
94 * iio_buffer_read() - chrdev read for buffer access
95 * @filp: File structure pointer for the char device
96 * @buf: Destination buffer for iio buffer read
97 * @n: First n bytes to read
98 * @f_ps: Long offset provided by the user as a seek position
100 * This function relies on all buffer implementations having an
101 * iio_buffer as their first element.
103 * Return: negative values corresponding to error codes or ret != 0
104 * for ending the reading activity
106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107 size_t n, loff_t *f_ps)
109 struct iio_dev_buffer_pair *ib = filp->private_data;
110 struct iio_buffer *rb = ib->buffer;
111 struct iio_dev *indio_dev = ib->indio_dev;
112 DEFINE_WAIT_FUNC(wait, woken_wake_function);
117 if (!indio_dev->info)
120 if (!rb || !rb->access->read)
123 datum_size = rb->bytes_per_datum;
126 * If datum_size is 0 there will never be anything to read from the
127 * buffer, so signal end of file now.
132 if (filp->f_flags & O_NONBLOCK)
135 to_wait = min_t(size_t, n / datum_size, rb->watermark);
137 add_wait_queue(&rb->pollq, &wait);
139 if (!indio_dev->info) {
144 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
145 if (signal_pending(current)) {
150 wait_woken(&wait, TASK_INTERRUPTIBLE,
151 MAX_SCHEDULE_TIMEOUT);
155 ret = rb->access->read(rb, n, buf);
156 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
159 remove_wait_queue(&rb->pollq, &wait);
165 * iio_buffer_poll() - poll the buffer to find out if it has data
166 * @filp: File structure pointer for device access
167 * @wait: Poll table structure pointer for which the driver adds
170 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
171 * or 0 for other cases
173 static __poll_t iio_buffer_poll(struct file *filp,
174 struct poll_table_struct *wait)
176 struct iio_dev_buffer_pair *ib = filp->private_data;
177 struct iio_buffer *rb = ib->buffer;
178 struct iio_dev *indio_dev = ib->indio_dev;
180 if (!indio_dev->info || rb == NULL)
183 poll_wait(filp, &rb->pollq, wait);
184 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
185 return EPOLLIN | EPOLLRDNORM;
189 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
190 size_t n, loff_t *f_ps)
192 struct iio_dev_buffer_pair *ib = filp->private_data;
193 struct iio_buffer *rb = ib->buffer;
195 /* check if buffer was opened through new API */
196 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
199 return iio_buffer_read(filp, buf, n, f_ps);
202 __poll_t iio_buffer_poll_wrapper(struct file *filp,
203 struct poll_table_struct *wait)
205 struct iio_dev_buffer_pair *ib = filp->private_data;
206 struct iio_buffer *rb = ib->buffer;
208 /* check if buffer was opened through new API */
209 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
212 return iio_buffer_poll(filp, wait);
216 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
217 * @indio_dev: The IIO device
219 * Wakes up the event waitqueue used for poll(). Should usually
220 * be called when the device is unregistered.
222 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
224 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225 struct iio_buffer *buffer;
228 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
229 buffer = iio_dev_opaque->attached_buffers[i];
230 wake_up(&buffer->pollq);
234 void iio_buffer_init(struct iio_buffer *buffer)
236 INIT_LIST_HEAD(&buffer->demux_list);
237 INIT_LIST_HEAD(&buffer->buffer_list);
238 init_waitqueue_head(&buffer->pollq);
239 kref_init(&buffer->ref);
240 if (!buffer->watermark)
241 buffer->watermark = 1;
243 EXPORT_SYMBOL(iio_buffer_init);
245 void iio_device_detach_buffers(struct iio_dev *indio_dev)
247 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
248 struct iio_buffer *buffer;
251 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
252 buffer = iio_dev_opaque->attached_buffers[i];
253 iio_buffer_put(buffer);
256 kfree(iio_dev_opaque->attached_buffers);
259 static ssize_t iio_show_scan_index(struct device *dev,
260 struct device_attribute *attr,
263 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
266 static ssize_t iio_show_fixed_type(struct device *dev,
267 struct device_attribute *attr,
270 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271 u8 type = this_attr->c->scan_type.endianness;
273 if (type == IIO_CPU) {
274 #ifdef __LITTLE_ENDIAN
280 if (this_attr->c->scan_type.repeat > 1)
281 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
282 iio_endian_prefix[type],
283 this_attr->c->scan_type.sign,
284 this_attr->c->scan_type.realbits,
285 this_attr->c->scan_type.storagebits,
286 this_attr->c->scan_type.repeat,
287 this_attr->c->scan_type.shift);
289 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
290 iio_endian_prefix[type],
291 this_attr->c->scan_type.sign,
292 this_attr->c->scan_type.realbits,
293 this_attr->c->scan_type.storagebits,
294 this_attr->c->scan_type.shift);
297 static ssize_t iio_scan_el_show(struct device *dev,
298 struct device_attribute *attr,
302 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
304 /* Ensure ret is 0 or 1. */
305 ret = !!test_bit(to_iio_dev_attr(attr)->address,
308 return sysfs_emit(buf, "%d\n", ret);
311 /* Note NULL used as error indicator as it doesn't make sense. */
312 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
313 unsigned int masklength,
314 const unsigned long *mask,
317 if (bitmap_empty(mask, masklength))
321 if (bitmap_equal(mask, av_masks, masklength))
324 if (bitmap_subset(mask, av_masks, masklength))
327 av_masks += BITS_TO_LONGS(masklength);
332 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
333 const unsigned long *mask)
335 if (!indio_dev->setup_ops->validate_scan_mask)
338 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
342 * iio_scan_mask_set() - set particular bit in the scan mask
343 * @indio_dev: the iio device
344 * @buffer: the buffer whose scan mask we are interested in
345 * @bit: the bit to be set.
347 * Note that at this point we have no way of knowing what other
348 * buffers might request, hence this code only verifies that the
349 * individual buffers request is plausible.
351 static int iio_scan_mask_set(struct iio_dev *indio_dev,
352 struct iio_buffer *buffer, int bit)
354 const unsigned long *mask;
355 unsigned long *trialmask;
357 trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
358 if (trialmask == NULL)
360 if (!indio_dev->masklength) {
361 WARN(1, "Trying to set scanmask prior to registering buffer\n");
362 goto err_invalid_mask;
364 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
365 set_bit(bit, trialmask);
367 if (!iio_validate_scan_mask(indio_dev, trialmask))
368 goto err_invalid_mask;
370 if (indio_dev->available_scan_masks) {
371 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
372 indio_dev->masklength,
375 goto err_invalid_mask;
377 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
379 bitmap_free(trialmask);
384 bitmap_free(trialmask);
388 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
390 clear_bit(bit, buffer->scan_mask);
394 static int iio_scan_mask_query(struct iio_dev *indio_dev,
395 struct iio_buffer *buffer, int bit)
397 if (bit > indio_dev->masklength)
400 if (!buffer->scan_mask)
403 /* Ensure return value is 0 or 1. */
404 return !!test_bit(bit, buffer->scan_mask);
407 static ssize_t iio_scan_el_store(struct device *dev,
408 struct device_attribute *attr,
414 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
415 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
416 struct iio_buffer *buffer = this_attr->buffer;
418 ret = strtobool(buf, &state);
421 mutex_lock(&indio_dev->mlock);
422 if (iio_buffer_is_active(buffer)) {
426 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
430 ret = iio_scan_mask_clear(buffer, this_attr->address);
433 } else if (state && !ret) {
434 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
440 mutex_unlock(&indio_dev->mlock);
442 return ret < 0 ? ret : len;
446 static ssize_t iio_scan_el_ts_show(struct device *dev,
447 struct device_attribute *attr,
450 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
452 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
455 static ssize_t iio_scan_el_ts_store(struct device *dev,
456 struct device_attribute *attr,
461 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
462 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
465 ret = strtobool(buf, &state);
469 mutex_lock(&indio_dev->mlock);
470 if (iio_buffer_is_active(buffer)) {
474 buffer->scan_timestamp = state;
476 mutex_unlock(&indio_dev->mlock);
478 return ret ? ret : len;
481 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
482 struct iio_buffer *buffer,
483 const struct iio_chan_spec *chan)
485 int ret, attrcount = 0;
487 ret = __iio_add_chan_devattr("index",
489 &iio_show_scan_index,
495 &buffer->buffer_attr_list);
499 ret = __iio_add_chan_devattr("type",
501 &iio_show_fixed_type,
507 &buffer->buffer_attr_list);
511 if (chan->type != IIO_TIMESTAMP)
512 ret = __iio_add_chan_devattr("en",
520 &buffer->buffer_attr_list);
522 ret = __iio_add_chan_devattr("en",
524 &iio_scan_el_ts_show,
525 &iio_scan_el_ts_store,
530 &buffer->buffer_attr_list);
538 static ssize_t iio_buffer_read_length(struct device *dev,
539 struct device_attribute *attr,
542 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
544 return sysfs_emit(buf, "%d\n", buffer->length);
547 static ssize_t iio_buffer_write_length(struct device *dev,
548 struct device_attribute *attr,
549 const char *buf, size_t len)
551 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
552 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
556 ret = kstrtouint(buf, 10, &val);
560 if (val == buffer->length)
563 mutex_lock(&indio_dev->mlock);
564 if (iio_buffer_is_active(buffer)) {
567 buffer->access->set_length(buffer, val);
572 if (buffer->length && buffer->length < buffer->watermark)
573 buffer->watermark = buffer->length;
575 mutex_unlock(&indio_dev->mlock);
577 return ret ? ret : len;
580 static ssize_t iio_buffer_show_enable(struct device *dev,
581 struct device_attribute *attr,
584 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
586 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
589 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
590 unsigned int scan_index)
592 const struct iio_chan_spec *ch;
595 ch = iio_find_channel_from_si(indio_dev, scan_index);
596 bytes = ch->scan_type.storagebits / 8;
597 if (ch->scan_type.repeat > 1)
598 bytes *= ch->scan_type.repeat;
602 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
604 return iio_storage_bytes_for_si(indio_dev,
605 indio_dev->scan_index_timestamp);
608 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
609 const unsigned long *mask, bool timestamp)
612 int length, i, largest = 0;
614 /* How much space will the demuxed element take? */
615 for_each_set_bit(i, mask,
616 indio_dev->masklength) {
617 length = iio_storage_bytes_for_si(indio_dev, i);
618 bytes = ALIGN(bytes, length);
620 largest = max(largest, length);
624 length = iio_storage_bytes_for_timestamp(indio_dev);
625 bytes = ALIGN(bytes, length);
627 largest = max(largest, length);
630 bytes = ALIGN(bytes, largest);
634 static void iio_buffer_activate(struct iio_dev *indio_dev,
635 struct iio_buffer *buffer)
637 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
639 iio_buffer_get(buffer);
640 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
643 static void iio_buffer_deactivate(struct iio_buffer *buffer)
645 list_del_init(&buffer->buffer_list);
646 wake_up_interruptible(&buffer->pollq);
647 iio_buffer_put(buffer);
650 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
652 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
653 struct iio_buffer *buffer, *_buffer;
655 list_for_each_entry_safe(buffer, _buffer,
656 &iio_dev_opaque->buffer_list, buffer_list)
657 iio_buffer_deactivate(buffer);
660 static int iio_buffer_enable(struct iio_buffer *buffer,
661 struct iio_dev *indio_dev)
663 if (!buffer->access->enable)
665 return buffer->access->enable(buffer, indio_dev);
668 static int iio_buffer_disable(struct iio_buffer *buffer,
669 struct iio_dev *indio_dev)
671 if (!buffer->access->disable)
673 return buffer->access->disable(buffer, indio_dev);
676 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
677 struct iio_buffer *buffer)
681 if (!buffer->access->set_bytes_per_datum)
684 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
685 buffer->scan_timestamp);
687 buffer->access->set_bytes_per_datum(buffer, bytes);
690 static int iio_buffer_request_update(struct iio_dev *indio_dev,
691 struct iio_buffer *buffer)
695 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
696 if (buffer->access->request_update) {
697 ret = buffer->access->request_update(buffer);
699 dev_dbg(&indio_dev->dev,
700 "Buffer not started: buffer parameter update failed (%d)\n",
709 static void iio_free_scan_mask(struct iio_dev *indio_dev,
710 const unsigned long *mask)
712 /* If the mask is dynamically allocated free it, otherwise do nothing */
713 if (!indio_dev->available_scan_masks)
717 struct iio_device_config {
719 unsigned int watermark;
720 const unsigned long *scan_mask;
721 unsigned int scan_bytes;
725 static int iio_verify_update(struct iio_dev *indio_dev,
726 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
727 struct iio_device_config *config)
729 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
730 unsigned long *compound_mask;
731 const unsigned long *scan_mask;
732 bool strict_scanmask = false;
733 struct iio_buffer *buffer;
738 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
739 dev_dbg(&indio_dev->dev,
740 "At least one scan element must be enabled first\n");
744 memset(config, 0, sizeof(*config));
745 config->watermark = ~0;
748 * If there is just one buffer and we are removing it there is nothing
751 if (remove_buffer && !insert_buffer &&
752 list_is_singular(&iio_dev_opaque->buffer_list))
755 modes = indio_dev->modes;
757 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
758 if (buffer == remove_buffer)
760 modes &= buffer->access->modes;
761 config->watermark = min(config->watermark, buffer->watermark);
765 modes &= insert_buffer->access->modes;
766 config->watermark = min(config->watermark,
767 insert_buffer->watermark);
770 /* Definitely possible for devices to support both of these. */
771 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
772 config->mode = INDIO_BUFFER_TRIGGERED;
773 } else if (modes & INDIO_BUFFER_HARDWARE) {
775 * Keep things simple for now and only allow a single buffer to
776 * be connected in hardware mode.
778 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
780 config->mode = INDIO_BUFFER_HARDWARE;
781 strict_scanmask = true;
782 } else if (modes & INDIO_BUFFER_SOFTWARE) {
783 config->mode = INDIO_BUFFER_SOFTWARE;
785 /* Can only occur on first buffer */
786 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
787 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
791 /* What scan mask do we actually have? */
792 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
793 if (compound_mask == NULL)
796 scan_timestamp = false;
798 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
799 if (buffer == remove_buffer)
801 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
802 indio_dev->masklength);
803 scan_timestamp |= buffer->scan_timestamp;
807 bitmap_or(compound_mask, compound_mask,
808 insert_buffer->scan_mask, indio_dev->masklength);
809 scan_timestamp |= insert_buffer->scan_timestamp;
812 if (indio_dev->available_scan_masks) {
813 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
814 indio_dev->masklength,
817 bitmap_free(compound_mask);
818 if (scan_mask == NULL)
821 scan_mask = compound_mask;
824 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
825 scan_mask, scan_timestamp);
826 config->scan_mask = scan_mask;
827 config->scan_timestamp = scan_timestamp;
833 * struct iio_demux_table - table describing demux memcpy ops
834 * @from: index to copy from
835 * @to: index to copy to
836 * @length: how many bytes to copy
837 * @l: list head used for management
839 struct iio_demux_table {
846 static void iio_buffer_demux_free(struct iio_buffer *buffer)
848 struct iio_demux_table *p, *q;
849 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
855 static int iio_buffer_add_demux(struct iio_buffer *buffer,
856 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
860 if (*p && (*p)->from + (*p)->length == in_loc &&
861 (*p)->to + (*p)->length == out_loc) {
862 (*p)->length += length;
864 *p = kmalloc(sizeof(**p), GFP_KERNEL);
869 (*p)->length = length;
870 list_add_tail(&(*p)->l, &buffer->demux_list);
876 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
877 struct iio_buffer *buffer)
879 int ret, in_ind = -1, out_ind, length;
880 unsigned in_loc = 0, out_loc = 0;
881 struct iio_demux_table *p = NULL;
883 /* Clear out any old demux */
884 iio_buffer_demux_free(buffer);
885 kfree(buffer->demux_bounce);
886 buffer->demux_bounce = NULL;
888 /* First work out which scan mode we will actually have */
889 if (bitmap_equal(indio_dev->active_scan_mask,
891 indio_dev->masklength))
894 /* Now we have the two masks, work from least sig and build up sizes */
895 for_each_set_bit(out_ind,
897 indio_dev->masklength) {
898 in_ind = find_next_bit(indio_dev->active_scan_mask,
899 indio_dev->masklength,
901 while (in_ind != out_ind) {
902 length = iio_storage_bytes_for_si(indio_dev, in_ind);
903 /* Make sure we are aligned */
904 in_loc = roundup(in_loc, length) + length;
905 in_ind = find_next_bit(indio_dev->active_scan_mask,
906 indio_dev->masklength,
909 length = iio_storage_bytes_for_si(indio_dev, in_ind);
910 out_loc = roundup(out_loc, length);
911 in_loc = roundup(in_loc, length);
912 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
914 goto error_clear_mux_table;
918 /* Relies on scan_timestamp being last */
919 if (buffer->scan_timestamp) {
920 length = iio_storage_bytes_for_timestamp(indio_dev);
921 out_loc = roundup(out_loc, length);
922 in_loc = roundup(in_loc, length);
923 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
925 goto error_clear_mux_table;
929 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
930 if (buffer->demux_bounce == NULL) {
932 goto error_clear_mux_table;
936 error_clear_mux_table:
937 iio_buffer_demux_free(buffer);
942 static int iio_update_demux(struct iio_dev *indio_dev)
944 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
945 struct iio_buffer *buffer;
948 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
949 ret = iio_buffer_update_demux(indio_dev, buffer);
951 goto error_clear_mux_table;
955 error_clear_mux_table:
956 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
957 iio_buffer_demux_free(buffer);
962 static int iio_enable_buffers(struct iio_dev *indio_dev,
963 struct iio_device_config *config)
965 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
966 struct iio_buffer *buffer;
969 indio_dev->active_scan_mask = config->scan_mask;
970 indio_dev->scan_timestamp = config->scan_timestamp;
971 indio_dev->scan_bytes = config->scan_bytes;
972 indio_dev->currentmode = config->mode;
974 iio_update_demux(indio_dev);
977 if (indio_dev->setup_ops->preenable) {
978 ret = indio_dev->setup_ops->preenable(indio_dev);
980 dev_dbg(&indio_dev->dev,
981 "Buffer not started: buffer preenable failed (%d)\n", ret);
982 goto err_undo_config;
986 if (indio_dev->info->update_scan_mode) {
987 ret = indio_dev->info
988 ->update_scan_mode(indio_dev,
989 indio_dev->active_scan_mask);
991 dev_dbg(&indio_dev->dev,
992 "Buffer not started: update scan mode failed (%d)\n",
994 goto err_run_postdisable;
998 if (indio_dev->info->hwfifo_set_watermark)
999 indio_dev->info->hwfifo_set_watermark(indio_dev,
1002 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1003 ret = iio_buffer_enable(buffer, indio_dev);
1005 goto err_disable_buffers;
1008 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1009 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1010 indio_dev->pollfunc);
1012 goto err_disable_buffers;
1015 if (indio_dev->setup_ops->postenable) {
1016 ret = indio_dev->setup_ops->postenable(indio_dev);
1018 dev_dbg(&indio_dev->dev,
1019 "Buffer not started: postenable failed (%d)\n", ret);
1020 goto err_detach_pollfunc;
1026 err_detach_pollfunc:
1027 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1028 iio_trigger_detach_poll_func(indio_dev->trig,
1029 indio_dev->pollfunc);
1031 err_disable_buffers:
1032 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1034 iio_buffer_disable(buffer, indio_dev);
1035 err_run_postdisable:
1036 if (indio_dev->setup_ops->postdisable)
1037 indio_dev->setup_ops->postdisable(indio_dev);
1039 indio_dev->currentmode = INDIO_DIRECT_MODE;
1040 indio_dev->active_scan_mask = NULL;
1045 static int iio_disable_buffers(struct iio_dev *indio_dev)
1047 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1048 struct iio_buffer *buffer;
1052 /* Wind down existing buffers - iff there are any */
1053 if (list_empty(&iio_dev_opaque->buffer_list))
1057 * If things go wrong at some step in disable we still need to continue
1058 * to perform the other steps, otherwise we leave the device in a
1059 * inconsistent state. We return the error code for the first error we
1063 if (indio_dev->setup_ops->predisable) {
1064 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1069 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1070 iio_trigger_detach_poll_func(indio_dev->trig,
1071 indio_dev->pollfunc);
1074 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1075 ret2 = iio_buffer_disable(buffer, indio_dev);
1080 if (indio_dev->setup_ops->postdisable) {
1081 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1086 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1087 indio_dev->active_scan_mask = NULL;
1088 indio_dev->currentmode = INDIO_DIRECT_MODE;
1093 static int __iio_update_buffers(struct iio_dev *indio_dev,
1094 struct iio_buffer *insert_buffer,
1095 struct iio_buffer *remove_buffer)
1097 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1098 struct iio_device_config new_config;
1101 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1106 if (insert_buffer) {
1107 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1109 goto err_free_config;
1112 ret = iio_disable_buffers(indio_dev);
1114 goto err_deactivate_all;
1117 iio_buffer_deactivate(remove_buffer);
1119 iio_buffer_activate(indio_dev, insert_buffer);
1121 /* If no buffers in list, we are done */
1122 if (list_empty(&iio_dev_opaque->buffer_list))
1125 ret = iio_enable_buffers(indio_dev, &new_config);
1127 goto err_deactivate_all;
1133 * We've already verified that the config is valid earlier. If things go
1134 * wrong in either enable or disable the most likely reason is an IO
1135 * error from the device. In this case there is no good recovery
1136 * strategy. Just make sure to disable everything and leave the device
1137 * in a sane state. With a bit of luck the device might come back to
1138 * life again later and userspace can try again.
1140 iio_buffer_deactivate_all(indio_dev);
1143 iio_free_scan_mask(indio_dev, new_config.scan_mask);
1147 int iio_update_buffers(struct iio_dev *indio_dev,
1148 struct iio_buffer *insert_buffer,
1149 struct iio_buffer *remove_buffer)
1153 if (insert_buffer == remove_buffer)
1156 mutex_lock(&indio_dev->info_exist_lock);
1157 mutex_lock(&indio_dev->mlock);
1159 if (insert_buffer && iio_buffer_is_active(insert_buffer))
1160 insert_buffer = NULL;
1162 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1163 remove_buffer = NULL;
1165 if (!insert_buffer && !remove_buffer) {
1170 if (indio_dev->info == NULL) {
1175 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1178 mutex_unlock(&indio_dev->mlock);
1179 mutex_unlock(&indio_dev->info_exist_lock);
1183 EXPORT_SYMBOL_GPL(iio_update_buffers);
1185 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1187 iio_disable_buffers(indio_dev);
1188 iio_buffer_deactivate_all(indio_dev);
1191 static ssize_t iio_buffer_store_enable(struct device *dev,
1192 struct device_attribute *attr,
1197 bool requested_state;
1198 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1199 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1202 ret = strtobool(buf, &requested_state);
1206 mutex_lock(&indio_dev->mlock);
1208 /* Find out if it is in the list */
1209 inlist = iio_buffer_is_active(buffer);
1210 /* Already in desired state */
1211 if (inlist == requested_state)
1214 if (requested_state)
1215 ret = __iio_update_buffers(indio_dev, buffer, NULL);
1217 ret = __iio_update_buffers(indio_dev, NULL, buffer);
1220 mutex_unlock(&indio_dev->mlock);
1221 return (ret < 0) ? ret : len;
1224 static ssize_t iio_buffer_show_watermark(struct device *dev,
1225 struct device_attribute *attr,
1228 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1230 return sysfs_emit(buf, "%u\n", buffer->watermark);
1233 static ssize_t iio_buffer_store_watermark(struct device *dev,
1234 struct device_attribute *attr,
1238 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1239 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1243 ret = kstrtouint(buf, 10, &val);
1249 mutex_lock(&indio_dev->mlock);
1251 if (val > buffer->length) {
1256 if (iio_buffer_is_active(buffer)) {
1261 buffer->watermark = val;
1263 mutex_unlock(&indio_dev->mlock);
1265 return ret ? ret : len;
1268 static ssize_t iio_dma_show_data_available(struct device *dev,
1269 struct device_attribute *attr,
1272 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1274 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1277 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1278 iio_buffer_write_length);
1279 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1280 S_IRUGO, iio_buffer_read_length, NULL);
1281 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1282 iio_buffer_show_enable, iio_buffer_store_enable);
1283 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1284 iio_buffer_show_watermark, iio_buffer_store_watermark);
1285 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1286 S_IRUGO, iio_buffer_show_watermark, NULL);
1287 static DEVICE_ATTR(data_available, S_IRUGO,
1288 iio_dma_show_data_available, NULL);
1290 static struct attribute *iio_buffer_attrs[] = {
1291 &dev_attr_length.attr,
1292 &dev_attr_enable.attr,
1293 &dev_attr_watermark.attr,
1294 &dev_attr_data_available.attr,
1297 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1299 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1300 struct attribute *attr)
1302 struct device_attribute *dattr = to_dev_attr(attr);
1303 struct iio_dev_attr *iio_attr;
1305 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1309 iio_attr->buffer = buffer;
1310 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1311 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1313 list_add(&iio_attr->l, &buffer->buffer_attr_list);
1315 return &iio_attr->dev_attr.attr;
1318 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1319 struct attribute **buffer_attrs,
1320 int buffer_attrcount,
1321 int scan_el_attrcount)
1323 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1324 struct attribute_group *group;
1325 struct attribute **attrs;
1328 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1332 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1334 group = &iio_dev_opaque->legacy_buffer_group;
1335 group->attrs = attrs;
1336 group->name = "buffer";
1338 ret = iio_device_register_sysfs_group(indio_dev, group);
1340 goto error_free_buffer_attrs;
1342 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1345 goto error_free_buffer_attrs;
1348 memcpy(attrs, &buffer_attrs[buffer_attrcount],
1349 scan_el_attrcount * sizeof(*attrs));
1351 group = &iio_dev_opaque->legacy_scan_el_group;
1352 group->attrs = attrs;
1353 group->name = "scan_elements";
1355 ret = iio_device_register_sysfs_group(indio_dev, group);
1357 goto error_free_scan_el_attrs;
1361 error_free_buffer_attrs:
1362 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1363 error_free_scan_el_attrs:
1364 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1369 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1371 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1373 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1374 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1377 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1379 struct iio_dev_buffer_pair *ib = filep->private_data;
1380 struct iio_dev *indio_dev = ib->indio_dev;
1381 struct iio_buffer *buffer = ib->buffer;
1383 wake_up(&buffer->pollq);
1386 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1387 iio_device_put(indio_dev);
1392 static const struct file_operations iio_buffer_chrdev_fileops = {
1393 .owner = THIS_MODULE,
1394 .llseek = noop_llseek,
1395 .read = iio_buffer_read,
1396 .poll = iio_buffer_poll,
1397 .release = iio_buffer_chrdev_release,
1400 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1402 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1403 int __user *ival = (int __user *)arg;
1404 struct iio_dev_buffer_pair *ib;
1405 struct iio_buffer *buffer;
1408 if (copy_from_user(&idx, ival, sizeof(idx)))
1411 if (idx >= iio_dev_opaque->attached_buffers_cnt)
1414 iio_device_get(indio_dev);
1416 buffer = iio_dev_opaque->attached_buffers[idx];
1418 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1420 goto error_iio_dev_put;
1423 ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1426 goto error_clear_busy_bit;
1429 ib->indio_dev = indio_dev;
1430 ib->buffer = buffer;
1432 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1433 ib, O_RDWR | O_CLOEXEC);
1439 if (copy_to_user(ival, &fd, sizeof(fd))) {
1449 error_clear_busy_bit:
1450 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1452 iio_device_put(indio_dev);
1456 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1457 unsigned int cmd, unsigned long arg)
1460 case IIO_BUFFER_GET_FD_IOCTL:
1461 return iio_device_buffer_getfd(indio_dev, arg);
1463 return IIO_IOCTL_UNHANDLED;
1467 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1468 struct iio_dev *indio_dev,
1471 struct iio_dev_attr *p;
1472 struct attribute **attr;
1473 int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1474 const struct iio_chan_spec *channels;
1476 buffer_attrcount = 0;
1477 if (buffer->attrs) {
1478 while (buffer->attrs[buffer_attrcount] != NULL)
1482 scan_el_attrcount = 0;
1483 INIT_LIST_HEAD(&buffer->buffer_attr_list);
1484 channels = indio_dev->channels;
1487 for (i = 0; i < indio_dev->num_channels; i++) {
1488 if (channels[i].scan_index < 0)
1491 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1494 goto error_cleanup_dynamic;
1495 scan_el_attrcount += ret;
1496 if (channels[i].type == IIO_TIMESTAMP)
1497 indio_dev->scan_index_timestamp =
1498 channels[i].scan_index;
1500 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1501 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1503 if (buffer->scan_mask == NULL) {
1505 goto error_cleanup_dynamic;
1510 attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1511 attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
1514 goto error_free_scan_mask;
1517 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1518 if (!buffer->access->set_length)
1519 attr[0] = &dev_attr_length_ro.attr;
1521 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1522 attr[2] = &dev_attr_watermark_ro.attr;
1525 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1526 sizeof(struct attribute *) * buffer_attrcount);
1528 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1530 for (i = 0; i < buffer_attrcount; i++) {
1531 struct attribute *wrapped;
1533 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1536 goto error_free_scan_mask;
1542 list_for_each_entry(p, &buffer->buffer_attr_list, l)
1543 attr[attrn++] = &p->dev_attr.attr;
1545 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1546 if (!buffer->buffer_group.name) {
1548 goto error_free_buffer_attrs;
1551 buffer->buffer_group.attrs = attr;
1553 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1555 goto error_free_buffer_attr_group_name;
1557 /* we only need to register the legacy groups for the first buffer */
1561 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1565 goto error_free_buffer_attr_group_name;
1569 error_free_buffer_attr_group_name:
1570 kfree(buffer->buffer_group.name);
1571 error_free_buffer_attrs:
1572 kfree(buffer->buffer_group.attrs);
1573 error_free_scan_mask:
1574 bitmap_free(buffer->scan_mask);
1575 error_cleanup_dynamic:
1576 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1581 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
1583 bitmap_free(buffer->scan_mask);
1584 kfree(buffer->buffer_group.name);
1585 kfree(buffer->buffer_group.attrs);
1586 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1589 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1591 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1592 const struct iio_chan_spec *channels;
1593 struct iio_buffer *buffer;
1598 channels = indio_dev->channels;
1600 int ml = indio_dev->masklength;
1602 for (i = 0; i < indio_dev->num_channels; i++)
1603 ml = max(ml, channels[i].scan_index + 1);
1604 indio_dev->masklength = ml;
1607 if (!iio_dev_opaque->attached_buffers_cnt)
1610 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
1611 buffer = iio_dev_opaque->attached_buffers[i];
1612 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
1615 goto error_unwind_sysfs_and_mask;
1618 unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
1620 sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1621 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1622 if (!iio_dev_opaque->buffer_ioctl_handler) {
1624 goto error_unwind_sysfs_and_mask;
1627 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1628 iio_device_ioctl_handler_register(indio_dev,
1629 iio_dev_opaque->buffer_ioctl_handler);
1633 error_unwind_sysfs_and_mask:
1634 for (; unwind_idx >= 0; unwind_idx--) {
1635 buffer = iio_dev_opaque->attached_buffers[unwind_idx];
1636 __iio_buffer_free_sysfs_and_mask(buffer);
1641 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1643 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1644 struct iio_buffer *buffer;
1647 if (!iio_dev_opaque->attached_buffers_cnt)
1650 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1651 kfree(iio_dev_opaque->buffer_ioctl_handler);
1653 iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1655 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1656 buffer = iio_dev_opaque->attached_buffers[i];
1657 __iio_buffer_free_sysfs_and_mask(buffer);
1662 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1663 * @indio_dev: the iio device
1664 * @mask: scan mask to be checked
1666 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1667 * can be used for devices where only one channel can be active for sampling at
1670 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1671 const unsigned long *mask)
1673 return bitmap_weight(mask, indio_dev->masklength) == 1;
1675 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1677 static const void *iio_demux(struct iio_buffer *buffer,
1680 struct iio_demux_table *t;
1682 if (list_empty(&buffer->demux_list))
1684 list_for_each_entry(t, &buffer->demux_list, l)
1685 memcpy(buffer->demux_bounce + t->to,
1686 datain + t->from, t->length);
1688 return buffer->demux_bounce;
1691 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1693 const void *dataout = iio_demux(buffer, data);
1696 ret = buffer->access->store_to(buffer, dataout);
1701 * We can't just test for watermark to decide if we wake the poll queue
1702 * because read may request less samples than the watermark.
1704 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1709 * iio_push_to_buffers() - push to a registered buffer.
1710 * @indio_dev: iio_dev structure for device.
1713 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1715 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1717 struct iio_buffer *buf;
1719 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1720 ret = iio_push_to_buffer(buf, data);
1727 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1730 * iio_buffer_release() - Free a buffer's resources
1731 * @ref: Pointer to the kref embedded in the iio_buffer struct
1733 * This function is called when the last reference to the buffer has been
1734 * dropped. It will typically free all resources allocated by the buffer. Do not
1735 * call this function manually, always use iio_buffer_put() when done using a
1738 static void iio_buffer_release(struct kref *ref)
1740 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1742 buffer->access->release(buffer);
1746 * iio_buffer_get() - Grab a reference to the buffer
1747 * @buffer: The buffer to grab a reference for, may be NULL
1749 * Returns the pointer to the buffer that was passed into the function.
1751 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1754 kref_get(&buffer->ref);
1758 EXPORT_SYMBOL_GPL(iio_buffer_get);
1761 * iio_buffer_put() - Release the reference to the buffer
1762 * @buffer: The buffer to release the reference for, may be NULL
1764 void iio_buffer_put(struct iio_buffer *buffer)
1767 kref_put(&buffer->ref, iio_buffer_release);
1769 EXPORT_SYMBOL_GPL(iio_buffer_put);
1772 * iio_device_attach_buffer - Attach a buffer to a IIO device
1773 * @indio_dev: The device the buffer should be attached to
1774 * @buffer: The buffer to attach to the device
1776 * Return 0 if successful, negative if error.
1778 * This function attaches a buffer to a IIO device. The buffer stays attached to
1779 * the device until the device is freed. For legacy reasons, the first attached
1780 * buffer will also be assigned to 'indio_dev->buffer'.
1781 * The array allocated here, will be free'd via the iio_device_detach_buffers()
1782 * call which is handled by the iio_device_free().
1784 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1785 struct iio_buffer *buffer)
1787 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1788 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1789 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1793 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1796 iio_dev_opaque->attached_buffers = new;
1798 buffer = iio_buffer_get(buffer);
1800 /* first buffer is legacy; attach it to the IIO device directly */
1801 if (!indio_dev->buffer)
1802 indio_dev->buffer = buffer;
1804 iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1805 iio_dev_opaque->attached_buffers_cnt = cnt;
1809 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);