1 #include <linux/interrupt.h>
2 #include <linux/mutex.h>
3 #include <linux/kernel.h>
4 #include <linux/spi/spi.h>
5 #include <linux/slab.h>
6 #include <linux/bitops.h>
7 #include <linux/export.h>
9 #include <linux/iio/iio.h>
10 #include "../ring_sw.h"
11 #include <linux/iio/trigger_consumer.h>
12 #include "adis16400.h"
15 * adis16400_spi_read_burst() - read all data registers
16 * @dev: device associated with child of actual device (iio_dev or iio_trig)
17 * @rx: somewhere to pass back the value read (min size is 24 bytes)
19 static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
21 struct spi_message msg;
22 struct iio_dev *indio_dev = dev_get_drvdata(dev);
23 struct adis16400_state *st = iio_priv(indio_dev);
24 u32 old_speed_hz = st->us->max_speed_hz;
27 struct spi_transfer xfers[] = {
39 mutex_lock(&st->buf_lock);
40 st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
43 spi_message_init(&msg);
44 spi_message_add_tail(&xfers[0], &msg);
45 spi_message_add_tail(&xfers[1], &msg);
47 st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
50 ret = spi_sync(st->us, &msg);
52 dev_err(&st->us->dev, "problem when burst reading");
54 st->us->max_speed_hz = old_speed_hz;
56 mutex_unlock(&st->buf_lock);
60 static const u16 read_all_tx_array[] = {
61 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_SUPPLY_OUT)),
62 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XGYRO_OUT)),
63 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YGYRO_OUT)),
64 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZGYRO_OUT)),
65 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XACCL_OUT)),
66 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YACCL_OUT)),
67 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZACCL_OUT)),
68 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_XTEMP_OUT)),
69 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_YTEMP_OUT)),
70 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_ZTEMP_OUT)),
71 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_AUX_ADC)),
74 static int adis16350_spi_read_all(struct device *dev, u8 *rx)
76 struct iio_dev *indio_dev = dev_get_drvdata(dev);
77 struct adis16400_state *st = iio_priv(indio_dev);
79 struct spi_message msg;
81 struct spi_transfer *xfers;
82 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
83 indio_dev->masklength);
85 xfers = kzalloc(sizeof(*xfers)*(scan_count + 1),
90 for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
91 if (test_bit(i, indio_dev->active_scan_mask)) {
92 xfers[j].tx_buf = &read_all_tx_array[i];
93 xfers[j].bits_per_word = 16;
95 xfers[j + 1].rx_buf = rx + j*2;
98 xfers[j].bits_per_word = 16;
101 spi_message_init(&msg);
102 for (j = 0; j < scan_count + 1; j++)
103 spi_message_add_tail(&xfers[j], &msg);
105 ret = spi_sync(st->us, &msg);
111 /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
112 * specific to be rolled into the core.
114 static irqreturn_t adis16400_trigger_handler(int irq, void *p)
116 struct iio_poll_func *pf = p;
117 struct iio_dev *indio_dev = pf->indio_dev;
118 struct adis16400_state *st = iio_priv(indio_dev);
119 struct iio_buffer *ring = indio_dev->buffer;
120 int i = 0, j, ret = 0;
123 /* Asumption that long is enough for maximum channels */
124 unsigned long mask = *indio_dev->active_scan_mask;
125 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
126 indio_dev->masklength);
127 data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
129 dev_err(&st->us->dev, "memory alloc failed in ring bh");
134 if (st->variant->flags & ADIS16400_NO_BURST) {
135 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
138 for (; i < scan_count; i++)
139 data[i] = *(s16 *)(st->rx + i*2);
141 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
144 for (; i < scan_count; i++) {
147 data[i] = be16_to_cpup(
148 (__be16 *)&(st->rx[j*2]));
152 /* Guaranteed to be aligned with 8 byte boundary */
153 if (ring->scan_timestamp)
154 *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
155 ring->access->store_to(indio_dev->buffer, (u8 *) data, pf->timestamp);
157 iio_trigger_notify_done(indio_dev->trig);
167 void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
169 iio_dealloc_pollfunc(indio_dev->pollfunc);
170 iio_sw_rb_free(indio_dev->buffer);
173 static const struct iio_buffer_setup_ops adis16400_ring_setup_ops = {
174 .preenable = &iio_sw_buffer_preenable,
175 .postenable = &iio_triggered_buffer_postenable,
176 .predisable = &iio_triggered_buffer_predisable,
179 int adis16400_configure_ring(struct iio_dev *indio_dev)
182 struct iio_buffer *ring;
184 ring = iio_sw_rb_allocate(indio_dev);
189 indio_dev->buffer = ring;
190 ring->scan_timestamp = true;
191 indio_dev->setup_ops = &adis16400_ring_setup_ops;
193 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
194 &adis16400_trigger_handler,
200 if (indio_dev->pollfunc == NULL) {
202 goto error_iio_sw_rb_free;
205 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
207 error_iio_sw_rb_free:
208 iio_sw_rb_free(indio_dev->buffer);