blob: 421a9fab52ee1c5d0ed75ab15ddc81dcc12796d7 [file] [log] [blame]
Barry Songa9d26f02010-05-04 14:43:15 +01001#include <linux/interrupt.h>
Barry Songa9d26f02010-05-04 14:43:15 +01002#include <linux/mutex.h>
Barry Songa9d26f02010-05-04 14:43:15 +01003#include <linux/kernel.h>
4#include <linux/spi/spi.h>
Mike Frysinger1cb6c1f2010-05-23 03:10:35 -04005#include <linux/slab.h>
Jonathan Cameron2a29a902011-05-18 14:41:28 +01006#include <linux/bitops.h>
Barry Songa9d26f02010-05-04 14:43:15 +01007
8#include "../iio.h"
Barry Songa9d26f02010-05-04 14:43:15 +01009#include "../ring_sw.h"
Jonathan Cameron3f723952011-08-24 17:28:39 +010010#include "../trigger_consumer.h"
Barry Songa9d26f02010-05-04 14:43:15 +010011#include "adis16400.h"
12
Barry Song3fd66da2010-06-04 17:19:53 +080013/**
14 * adis16400_spi_read_burst() - read all data registers
15 * @dev: device associated with child of actual device (iio_dev or iio_trig)
16 * @rx: somewhere to pass back the value read (min size is 24 bytes)
17 **/
18static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
19{
20 struct spi_message msg;
21 struct iio_dev *indio_dev = dev_get_drvdata(dev);
Jonathan Cameron38d15f02011-05-18 14:42:23 +010022 struct adis16400_state *st = iio_priv(indio_dev);
Barry Song3fd66da2010-06-04 17:19:53 +080023 u32 old_speed_hz = st->us->max_speed_hz;
24 int ret;
25
26 struct spi_transfer xfers[] = {
27 {
28 .tx_buf = st->tx,
29 .bits_per_word = 8,
30 .len = 2,
Barry Song3fd66da2010-06-04 17:19:53 +080031 }, {
32 .rx_buf = rx,
33 .bits_per_word = 8,
34 .len = 24,
Barry Song3fd66da2010-06-04 17:19:53 +080035 },
36 };
37
38 mutex_lock(&st->buf_lock);
39 st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
40 st->tx[1] = 0;
41
42 spi_message_init(&msg);
43 spi_message_add_tail(&xfers[0], &msg);
44 spi_message_add_tail(&xfers[1], &msg);
45
46 st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
47 spi_setup(st->us);
48
49 ret = spi_sync(st->us, &msg);
50 if (ret)
51 dev_err(&st->us->dev, "problem when burst reading");
52
53 st->us->max_speed_hz = old_speed_hz;
54 spi_setup(st->us);
55 mutex_unlock(&st->buf_lock);
56 return ret;
57}
58
Jonathan Cameron2a29a902011-05-18 14:41:28 +010059static const u16 read_all_tx_array[] = {
60 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_SUPPLY_OUT)),
61 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XGYRO_OUT)),
62 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YGYRO_OUT)),
63 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZGYRO_OUT)),
64 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XACCL_OUT)),
65 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YACCL_OUT)),
66 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZACCL_OUT)),
67 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_XTEMP_OUT)),
68 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_YTEMP_OUT)),
69 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_ZTEMP_OUT)),
70 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_AUX_ADC)),
71};
72
73static int adis16350_spi_read_all(struct device *dev, u8 *rx)
74{
75 struct iio_dev *indio_dev = dev_get_drvdata(dev);
Jonathan Cameron38d15f02011-05-18 14:42:23 +010076 struct adis16400_state *st = iio_priv(indio_dev);
Jonathan Cameron2a29a902011-05-18 14:41:28 +010077
78 struct spi_message msg;
79 int i, j = 0, ret;
80 struct spi_transfer *xfers;
81
Jonathan Cameron38d15f02011-05-18 14:42:23 +010082 xfers = kzalloc(sizeof(*xfers)*indio_dev->ring->scan_count + 1,
83 GFP_KERNEL);
Jonathan Cameron2a29a902011-05-18 14:41:28 +010084 if (xfers == NULL)
85 return -ENOMEM;
86
87 for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
Jonathan Cameron32b5eec2011-09-02 17:14:38 +010088 if (test_bit(i, indio_dev->ring->scan_mask)) {
Jonathan Cameron2a29a902011-05-18 14:41:28 +010089 xfers[j].tx_buf = &read_all_tx_array[i];
90 xfers[j].bits_per_word = 16;
91 xfers[j].len = 2;
92 xfers[j + 1].rx_buf = rx + j*2;
93 j++;
94 }
95 xfers[j].bits_per_word = 16;
96 xfers[j].len = 2;
97
98 spi_message_init(&msg);
Jonathan Cameron38d15f02011-05-18 14:42:23 +010099 for (j = 0; j < indio_dev->ring->scan_count + 1; j++)
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100100 spi_message_add_tail(&xfers[j], &msg);
101
102 ret = spi_sync(st->us, &msg);
103 kfree(xfers);
104
105 return ret;
106}
107
Barry Songa9d26f02010-05-04 14:43:15 +0100108/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
109 * specific to be rolled into the core.
110 */
Jonathan Camerone7854842011-05-18 14:41:27 +0100111static irqreturn_t adis16400_trigger_handler(int irq, void *p)
Barry Songa9d26f02010-05-04 14:43:15 +0100112{
Jonathan Camerone7854842011-05-18 14:41:27 +0100113 struct iio_poll_func *pf = p;
Jonathan Camerone65bc6a2011-08-24 17:28:36 +0100114 struct iio_dev *indio_dev = pf->indio_dev;
Jonathan Cameron38d15f02011-05-18 14:42:23 +0100115 struct adis16400_state *st = iio_priv(indio_dev);
Jonathan Camerone7854842011-05-18 14:41:27 +0100116 struct iio_ring_buffer *ring = indio_dev->ring;
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100117 int i = 0, j, ret = 0;
Barry Songa9d26f02010-05-04 14:43:15 +0100118 s16 *data;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100119 size_t datasize = ring->access->get_bytes_per_datum(ring);
Jonathan Cameron32b5eec2011-09-02 17:14:38 +0100120 /* Asumption that long is enough for maximum channels */
121 unsigned long mask = *ring->scan_mask;
Barry Songa9d26f02010-05-04 14:43:15 +0100122
123 data = kmalloc(datasize , GFP_KERNEL);
124 if (data == NULL) {
125 dev_err(&st->us->dev, "memory alloc failed in ring bh");
Jonathan Camerone7854842011-05-18 14:41:27 +0100126 return -ENOMEM;
Barry Songa9d26f02010-05-04 14:43:15 +0100127 }
128
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100129 if (ring->scan_count) {
130 if (st->variant->flags & ADIS16400_NO_BURST) {
131 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
132 if (ret < 0)
Andre Bartke53aebb52011-06-02 00:21:45 +0200133 goto err;
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100134 for (; i < ring->scan_count; i++)
135 data[i] = *(s16 *)(st->rx + i*2);
136 } else {
137 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
138 if (ret < 0)
Andre Bartke53aebb52011-06-02 00:21:45 +0200139 goto err;
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100140 for (; i < indio_dev->ring->scan_count; i++) {
Michael Hennerich0fea4d62011-03-21 16:44:38 +0100141 j = __ffs(mask);
142 mask &= ~(1 << j);
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100143 data[i] = be16_to_cpup(
Michael Hennerich0fea4d62011-03-21 16:44:38 +0100144 (__be16 *)&(st->rx[j*2]));
145 }
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100146 }
147 }
Barry Songa9d26f02010-05-04 14:43:15 +0100148 /* Guaranteed to be aligned with 8 byte boundary */
Manuel Stahlbf329632010-08-31 11:32:52 +0200149 if (ring->scan_timestamp)
Jonathan Camerone7854842011-05-18 14:41:27 +0100150 *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100151 ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
Barry Songa9d26f02010-05-04 14:43:15 +0100152
Jonathan Camerone7854842011-05-18 14:41:27 +0100153 iio_trigger_notify_done(indio_dev->trig);
Barry Songa9d26f02010-05-04 14:43:15 +0100154
Andre Bartke53aebb52011-06-02 00:21:45 +0200155 kfree(data);
Jonathan Camerone7854842011-05-18 14:41:27 +0100156 return IRQ_HANDLED;
Andre Bartke53aebb52011-06-02 00:21:45 +0200157
158err:
159 kfree(data);
160 return ret;
Barry Songa9d26f02010-05-04 14:43:15 +0100161}
Barry Songa9d26f02010-05-04 14:43:15 +0100162
Barry Songa9d26f02010-05-04 14:43:15 +0100163void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
164{
Jonathan Cameron21b185f2011-05-18 14:42:32 +0100165 iio_dealloc_pollfunc(indio_dev->pollfunc);
Barry Songa9d26f02010-05-04 14:43:15 +0100166 iio_sw_rb_free(indio_dev->ring);
167}
168
Jonathan Cameron5565a452011-05-18 14:42:24 +0100169static const struct iio_ring_setup_ops adis16400_ring_setup_ops = {
170 .preenable = &iio_sw_ring_preenable,
Jonathan Cameron3b99fb72011-09-21 11:15:53 +0100171 .postenable = &iio_triggered_buffer_postenable,
172 .predisable = &iio_triggered_buffer_predisable,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100173};
174
Barry Songa9d26f02010-05-04 14:43:15 +0100175int adis16400_configure_ring(struct iio_dev *indio_dev)
176{
177 int ret = 0;
Jonathan Cameron38d15f02011-05-18 14:42:23 +0100178 struct adis16400_state *st = iio_priv(indio_dev);
Barry Songa9d26f02010-05-04 14:43:15 +0100179 struct iio_ring_buffer *ring;
Barry Songa9d26f02010-05-04 14:43:15 +0100180
181 ring = iio_sw_rb_allocate(indio_dev);
182 if (!ring) {
183 ret = -ENOMEM;
184 return ret;
185 }
186 indio_dev->ring = ring;
187 /* Effectively select the ring buffer implementation */
Jonathan Cameron5565a452011-05-18 14:42:24 +0100188 ring->access = &ring_sw_access_funcs;
Jonathan Cameron43c11b42010-07-11 16:39:17 +0100189 ring->bpe = 2;
Manuel Stahlbf329632010-08-31 11:32:52 +0200190 ring->scan_timestamp = true;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100191 ring->setup_ops = &adis16400_ring_setup_ops;
Barry Songa9d26f02010-05-04 14:43:15 +0100192 ring->owner = THIS_MODULE;
Manuel Stahlbf329632010-08-31 11:32:52 +0200193
Jonathan Cameron21b185f2011-05-18 14:42:32 +0100194 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
195 &adis16400_trigger_handler,
196 IRQF_ONESHOT,
197 indio_dev,
198 "%s_consumer%d",
199 indio_dev->name,
200 indio_dev->id);
Jonathan Camerone7854842011-05-18 14:41:27 +0100201 if (indio_dev->pollfunc == NULL) {
202 ret = -ENOMEM;
Jonathan Cameron15744092010-07-11 16:39:09 +0100203 goto error_iio_sw_rb_free;
Jonathan Camerone7854842011-05-18 14:41:27 +0100204 }
Jonathan Cameron15744092010-07-11 16:39:09 +0100205
Barry Songa9d26f02010-05-04 14:43:15 +0100206 indio_dev->modes |= INDIO_RING_TRIGGERED;
207 return 0;
Barry Songa9d26f02010-05-04 14:43:15 +0100208error_iio_sw_rb_free:
209 iio_sw_rb_free(indio_dev->ring);
210 return ret;
211}