blob: fd886bf51a6d8a2fd7a31472fe8e98c3924dcea5 [file] [log] [blame]
Barry Songa9d26f02010-05-04 14:43:15 +01001#include <linux/interrupt.h>
Barry Songa9d26f02010-05-04 14:43:15 +01002#include <linux/mutex.h>
Barry Songa9d26f02010-05-04 14:43:15 +01003#include <linux/kernel.h>
4#include <linux/spi/spi.h>
Mike Frysinger1cb6c1f2010-05-23 03:10:35 -04005#include <linux/slab.h>
Jonathan Cameron2a29a902011-05-18 14:41:28 +01006#include <linux/bitops.h>
Paul Gortmaker8e336a72011-07-10 13:09:12 -04007#include <linux/export.h>
Barry Songa9d26f02010-05-04 14:43:15 +01008
9#include "../iio.h"
Barry Songa9d26f02010-05-04 14:43:15 +010010#include "../ring_sw.h"
Jonathan Cameron3f723952011-08-24 17:28:39 +010011#include "../trigger_consumer.h"
Barry Songa9d26f02010-05-04 14:43:15 +010012#include "adis16400.h"
13
Barry Song3fd66da2010-06-04 17:19:53 +080014/**
15 * adis16400_spi_read_burst() - read all data registers
16 * @dev: device associated with child of actual device (iio_dev or iio_trig)
17 * @rx: somewhere to pass back the value read (min size is 24 bytes)
18 **/
19static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
20{
21 struct spi_message msg;
22 struct iio_dev *indio_dev = dev_get_drvdata(dev);
Jonathan Cameron38d15f02011-05-18 14:42:23 +010023 struct adis16400_state *st = iio_priv(indio_dev);
Barry Song3fd66da2010-06-04 17:19:53 +080024 u32 old_speed_hz = st->us->max_speed_hz;
25 int ret;
26
27 struct spi_transfer xfers[] = {
28 {
29 .tx_buf = st->tx,
30 .bits_per_word = 8,
31 .len = 2,
Barry Song3fd66da2010-06-04 17:19:53 +080032 }, {
33 .rx_buf = rx,
34 .bits_per_word = 8,
35 .len = 24,
Barry Song3fd66da2010-06-04 17:19:53 +080036 },
37 };
38
39 mutex_lock(&st->buf_lock);
40 st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
41 st->tx[1] = 0;
42
43 spi_message_init(&msg);
44 spi_message_add_tail(&xfers[0], &msg);
45 spi_message_add_tail(&xfers[1], &msg);
46
47 st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
48 spi_setup(st->us);
49
50 ret = spi_sync(st->us, &msg);
51 if (ret)
52 dev_err(&st->us->dev, "problem when burst reading");
53
54 st->us->max_speed_hz = old_speed_hz;
55 spi_setup(st->us);
56 mutex_unlock(&st->buf_lock);
57 return ret;
58}
59
Jonathan Cameron2a29a902011-05-18 14:41:28 +010060static const u16 read_all_tx_array[] = {
61 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_SUPPLY_OUT)),
62 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XGYRO_OUT)),
63 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YGYRO_OUT)),
64 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZGYRO_OUT)),
65 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_XACCL_OUT)),
66 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_YACCL_OUT)),
67 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_ZACCL_OUT)),
68 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_XTEMP_OUT)),
69 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_YTEMP_OUT)),
70 cpu_to_be16(ADIS16400_READ_REG(ADIS16350_ZTEMP_OUT)),
71 cpu_to_be16(ADIS16400_READ_REG(ADIS16400_AUX_ADC)),
72};
73
74static int adis16350_spi_read_all(struct device *dev, u8 *rx)
75{
76 struct iio_dev *indio_dev = dev_get_drvdata(dev);
Jonathan Cameron38d15f02011-05-18 14:42:23 +010077 struct adis16400_state *st = iio_priv(indio_dev);
Jonathan Cameron2a29a902011-05-18 14:41:28 +010078
79 struct spi_message msg;
80 int i, j = 0, ret;
81 struct spi_transfer *xfers;
82
Jonathan Cameron14555b12011-09-21 11:15:57 +010083 xfers = kzalloc(sizeof(*xfers)*indio_dev->buffer->scan_count + 1,
Jonathan Cameron38d15f02011-05-18 14:42:23 +010084 GFP_KERNEL);
Jonathan Cameron2a29a902011-05-18 14:41:28 +010085 if (xfers == NULL)
86 return -ENOMEM;
87
88 for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
Jonathan Cameron14555b12011-09-21 11:15:57 +010089 if (test_bit(i, indio_dev->buffer->scan_mask)) {
Jonathan Cameron2a29a902011-05-18 14:41:28 +010090 xfers[j].tx_buf = &read_all_tx_array[i];
91 xfers[j].bits_per_word = 16;
92 xfers[j].len = 2;
93 xfers[j + 1].rx_buf = rx + j*2;
94 j++;
95 }
96 xfers[j].bits_per_word = 16;
97 xfers[j].len = 2;
98
99 spi_message_init(&msg);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100100 for (j = 0; j < indio_dev->buffer->scan_count + 1; j++)
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100101 spi_message_add_tail(&xfers[j], &msg);
102
103 ret = spi_sync(st->us, &msg);
104 kfree(xfers);
105
106 return ret;
107}
108
Barry Songa9d26f02010-05-04 14:43:15 +0100109/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
110 * specific to be rolled into the core.
111 */
Jonathan Camerone7854842011-05-18 14:41:27 +0100112static irqreturn_t adis16400_trigger_handler(int irq, void *p)
Barry Songa9d26f02010-05-04 14:43:15 +0100113{
Jonathan Camerone7854842011-05-18 14:41:27 +0100114 struct iio_poll_func *pf = p;
Jonathan Camerone65bc6a2011-08-24 17:28:36 +0100115 struct iio_dev *indio_dev = pf->indio_dev;
Jonathan Cameron38d15f02011-05-18 14:42:23 +0100116 struct adis16400_state *st = iio_priv(indio_dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100117 struct iio_buffer *ring = indio_dev->buffer;
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100118 int i = 0, j, ret = 0;
Barry Songa9d26f02010-05-04 14:43:15 +0100119 s16 *data;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100120 size_t datasize = ring->access->get_bytes_per_datum(ring);
Jonathan Cameron32b5eec2011-09-02 17:14:38 +0100121 /* Asumption that long is enough for maximum channels */
122 unsigned long mask = *ring->scan_mask;
Barry Songa9d26f02010-05-04 14:43:15 +0100123
124 data = kmalloc(datasize , GFP_KERNEL);
125 if (data == NULL) {
126 dev_err(&st->us->dev, "memory alloc failed in ring bh");
Jonathan Camerone7854842011-05-18 14:41:27 +0100127 return -ENOMEM;
Barry Songa9d26f02010-05-04 14:43:15 +0100128 }
129
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100130 if (ring->scan_count) {
131 if (st->variant->flags & ADIS16400_NO_BURST) {
132 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
133 if (ret < 0)
Andre Bartke53aebb52011-06-02 00:21:45 +0200134 goto err;
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100135 for (; i < ring->scan_count; i++)
136 data[i] = *(s16 *)(st->rx + i*2);
137 } else {
138 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
139 if (ret < 0)
Andre Bartke53aebb52011-06-02 00:21:45 +0200140 goto err;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100141 for (; i < indio_dev->buffer->scan_count; i++) {
Michael Hennerich0fea4d62011-03-21 16:44:38 +0100142 j = __ffs(mask);
143 mask &= ~(1 << j);
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100144 data[i] = be16_to_cpup(
Michael Hennerich0fea4d62011-03-21 16:44:38 +0100145 (__be16 *)&(st->rx[j*2]));
146 }
Jonathan Cameron2a29a902011-05-18 14:41:28 +0100147 }
148 }
Barry Songa9d26f02010-05-04 14:43:15 +0100149 /* Guaranteed to be aligned with 8 byte boundary */
Manuel Stahlbf329632010-08-31 11:32:52 +0200150 if (ring->scan_timestamp)
Jonathan Camerone7854842011-05-18 14:41:27 +0100151 *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100152 ring->access->store_to(indio_dev->buffer, (u8 *) data, pf->timestamp);
Barry Songa9d26f02010-05-04 14:43:15 +0100153
Jonathan Camerone7854842011-05-18 14:41:27 +0100154 iio_trigger_notify_done(indio_dev->trig);
Barry Songa9d26f02010-05-04 14:43:15 +0100155
Andre Bartke53aebb52011-06-02 00:21:45 +0200156 kfree(data);
Jonathan Camerone7854842011-05-18 14:41:27 +0100157 return IRQ_HANDLED;
Andre Bartke53aebb52011-06-02 00:21:45 +0200158
159err:
160 kfree(data);
161 return ret;
Barry Songa9d26f02010-05-04 14:43:15 +0100162}
Barry Songa9d26f02010-05-04 14:43:15 +0100163
Barry Songa9d26f02010-05-04 14:43:15 +0100164void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
165{
Jonathan Cameron21b185f2011-05-18 14:42:32 +0100166 iio_dealloc_pollfunc(indio_dev->pollfunc);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100167 iio_sw_rb_free(indio_dev->buffer);
Barry Songa9d26f02010-05-04 14:43:15 +0100168}
169
Jonathan Cameron14555b12011-09-21 11:15:57 +0100170static const struct iio_buffer_setup_ops adis16400_ring_setup_ops = {
171 .preenable = &iio_sw_buffer_preenable,
Jonathan Cameron3b99fb72011-09-21 11:15:53 +0100172 .postenable = &iio_triggered_buffer_postenable,
173 .predisable = &iio_triggered_buffer_predisable,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100174};
175
Barry Songa9d26f02010-05-04 14:43:15 +0100176int adis16400_configure_ring(struct iio_dev *indio_dev)
177{
178 int ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100179 struct iio_buffer *ring;
Barry Songa9d26f02010-05-04 14:43:15 +0100180
181 ring = iio_sw_rb_allocate(indio_dev);
182 if (!ring) {
183 ret = -ENOMEM;
184 return ret;
185 }
Jonathan Cameron14555b12011-09-21 11:15:57 +0100186 indio_dev->buffer = ring;
Barry Songa9d26f02010-05-04 14:43:15 +0100187 /* Effectively select the ring buffer implementation */
Jonathan Cameron5565a452011-05-18 14:42:24 +0100188 ring->access = &ring_sw_access_funcs;
Jonathan Cameron43c11b42010-07-11 16:39:17 +0100189 ring->bpe = 2;
Manuel Stahlbf329632010-08-31 11:32:52 +0200190 ring->scan_timestamp = true;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100191 ring->setup_ops = &adis16400_ring_setup_ops;
Barry Songa9d26f02010-05-04 14:43:15 +0100192 ring->owner = THIS_MODULE;
Manuel Stahlbf329632010-08-31 11:32:52 +0200193
Jonathan Cameron21b185f2011-05-18 14:42:32 +0100194 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
195 &adis16400_trigger_handler,
196 IRQF_ONESHOT,
197 indio_dev,
198 "%s_consumer%d",
199 indio_dev->name,
200 indio_dev->id);
Jonathan Camerone7854842011-05-18 14:41:27 +0100201 if (indio_dev->pollfunc == NULL) {
202 ret = -ENOMEM;
Jonathan Cameron15744092010-07-11 16:39:09 +0100203 goto error_iio_sw_rb_free;
Jonathan Camerone7854842011-05-18 14:41:27 +0100204 }
Jonathan Cameron15744092010-07-11 16:39:09 +0100205
Jonathan Cameronec3afa42011-09-21 11:15:54 +0100206 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
Barry Songa9d26f02010-05-04 14:43:15 +0100207 return 0;
Barry Songa9d26f02010-05-04 14:43:15 +0100208error_iio_sw_rb_free:
Jonathan Cameron14555b12011-09-21 11:15:57 +0100209 iio_sw_rb_free(indio_dev->buffer);
Barry Songa9d26f02010-05-04 14:43:15 +0100210 return ret;
211}