blob: 685ded8fb857ed891006f468e37d3ed537dd428a [file] [log] [blame]
Jonathan Cameron574fb252009-08-18 18:06:25 +01001/*
2 * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk>
9 *
10 */
11
12#include <linux/interrupt.h>
Jonathan Cameron574fb252009-08-18 18:06:25 +010013#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Jonathan Cameron574fb252009-08-18 18:06:25 +010015#include <linux/kernel.h>
16#include <linux/spi/spi.h>
17#include <linux/sysfs.h>
Jonathan Cameron25888dc2011-05-18 14:41:01 +010018#include <linux/sched.h>
19#include <linux/poll.h>
Jonathan Cameron574fb252009-08-18 18:06:25 +010020
21#include "../iio.h"
22#include "../sysfs.h"
Jonathan Cameronaf5046a2011-10-26 17:41:32 +010023#include "../buffer.h"
Jonathan Cameron574fb252009-08-18 18:06:25 +010024#include "../ring_hw.h"
Jonathan Cameron574fb252009-08-18 18:06:25 +010025#include "sca3000.h"
26
27/* RFC / future work
28 *
29 * The internal ring buffer doesn't actually change what it holds depending
30 * on which signals are enabled etc, merely whether you can read them.
31 * As such the scan mode selection is somewhat different than for a software
32 * ring buffer and changing it actually covers any data already in the buffer.
33 * Currently scan elements aren't configured so it doesn't matter.
34 */
35
Jonathan Cameron25888dc2011-05-18 14:41:01 +010036static int sca3000_read_data(struct sca3000_state *st,
37 uint8_t reg_address_high,
38 u8 **rx_p,
39 int len)
40{
41 int ret;
42 struct spi_message msg;
43 struct spi_transfer xfer[2] = {
44 {
45 .len = 1,
46 .tx_buf = st->tx,
47 }, {
48 .len = len,
49 }
50 };
51 *rx_p = kmalloc(len, GFP_KERNEL);
52 if (*rx_p == NULL) {
53 ret = -ENOMEM;
54 goto error_ret;
55 }
56 xfer[1].rx_buf = *rx_p;
57 st->tx[0] = SCA3000_READ_REG(reg_address_high);
58 spi_message_init(&msg);
59 spi_message_add_tail(&xfer[0], &msg);
60 spi_message_add_tail(&xfer[1], &msg);
61 ret = spi_sync(st->us, &msg);
62 if (ret) {
63 dev_err(get_device(&st->us->dev), "problem reading register");
64 goto error_free_rx;
65 }
66
67 return 0;
68error_free_rx:
69 kfree(*rx_p);
70error_ret:
71 return ret;
72}
73
Jonathan Cameron574fb252009-08-18 18:06:25 +010074/**
Jonathan Cameronb4281732011-04-15 18:55:55 +010075 * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
Jonathan Cameron574fb252009-08-18 18:06:25 +010076 * @r: the ring
77 * @count: number of samples to try and pull
78 * @data: output the actual samples pulled from the hw ring
Jonathan Cameron574fb252009-08-18 18:06:25 +010079 *
80 * Currently does not provide timestamps. As the hardware doesn't add them they
Lucas De Marchi25985ed2011-03-30 22:57:33 -030081 * can only be inferred approximately from ring buffer events such as 50% full
Jonathan Cameron574fb252009-08-18 18:06:25 +010082 * and knowledge of when buffer was last emptied. This is left to userspace.
83 **/
Jonathan Cameron14555b12011-09-21 11:15:57 +010084static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
Jonathan Cameronb26a2182011-05-18 14:41:02 +010085 size_t count, char __user *buf)
Jonathan Cameron574fb252009-08-18 18:06:25 +010086{
Jonathan Cameron14555b12011-09-21 11:15:57 +010087 struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
Jonathan Cameron574fb252009-08-18 18:06:25 +010088 struct iio_dev *indio_dev = hw_ring->private;
Jonathan Cameron83f04222011-06-27 13:07:16 +010089 struct sca3000_state *st = iio_priv(indio_dev);
Jonathan Cameron574fb252009-08-18 18:06:25 +010090 u8 *rx;
Manuel Stahl6267ea62010-09-09 17:40:11 +020091 int ret, i, num_available, num_read = 0;
Jonathan Cameron574fb252009-08-18 18:06:25 +010092 int bytes_per_sample = 1;
93
94 if (st->bpse == 11)
95 bytes_per_sample = 2;
96
97 mutex_lock(&st->lock);
Jonathan Cameron25888dc2011-05-18 14:41:01 +010098 if (count % bytes_per_sample) {
99 ret = -EINVAL;
100 goto error_ret;
101 }
102
103 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100104 if (ret)
105 goto error_ret;
106 else
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100107 num_available = st->rx[0];
108 /*
109 * num_available is the total number of samples available
Jonathan Cameron574fb252009-08-18 18:06:25 +0100110 * i.e. number of time points * number of channels.
111 */
Jonathan Cameron574fb252009-08-18 18:06:25 +0100112 if (count > num_available * bytes_per_sample)
113 num_read = num_available*bytes_per_sample;
114 else
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100115 num_read = count;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100116
Jonathan Cameron574fb252009-08-18 18:06:25 +0100117 ret = sca3000_read_data(st,
118 SCA3000_REG_ADDR_RING_OUT,
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100119 &rx, num_read);
120 if (ret)
121 goto error_ret;
Manuel Stahl6267ea62010-09-09 17:40:11 +0200122
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100123 for (i = 0; i < num_read; i++)
124 *(((u16 *)rx) + i) = be16_to_cpup((u16 *)rx + i);
Manuel Stahl6267ea62010-09-09 17:40:11 +0200125
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100126 if (copy_to_user(buf, rx, num_read))
127 ret = -EFAULT;
128 kfree(rx);
129 r->stufftoread = 0;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100130error_ret:
131 mutex_unlock(&st->lock);
132
133 return ret ? ret : num_read;
134}
135
136/* This is only valid with all 3 elements enabled */
Jonathan Cameron14555b12011-09-21 11:15:57 +0100137static int sca3000_ring_get_length(struct iio_buffer *r)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100138{
139 return 64;
140}
141
142/* only valid if resolution is kept at 11bits */
Jonathan Cameron14555b12011-09-21 11:15:57 +0100143static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100144{
145 return 6;
146}
Jonathan Cameron574fb252009-08-18 18:06:25 +0100147
Jonathan Cameron14555b12011-09-21 11:15:57 +0100148static IIO_BUFFER_ENABLE_ATTR;
149static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
150static IIO_BUFFER_LENGTH_ATTR;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100151
152/**
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100153 * sca3000_query_ring_int() is the hardware ring status interrupt enabled
154 **/
155static ssize_t sca3000_query_ring_int(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
160 int ret, val;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100161 struct iio_buffer *ring = dev_get_drvdata(dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100162 struct iio_dev *indio_dev = ring->indio_dev;
Jonathan Cameron83f04222011-06-27 13:07:16 +0100163 struct sca3000_state *st = iio_priv(indio_dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100164
165 mutex_lock(&st->lock);
166 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
167 val = st->rx[0];
168 mutex_unlock(&st->lock);
169 if (ret)
170 return ret;
171
172 return sprintf(buf, "%d\n", !!(val & this_attr->address));
173}
174
175/**
176 * sca3000_set_ring_int() set state of ring status interrupt
177 **/
178static ssize_t sca3000_set_ring_int(struct device *dev,
179 struct device_attribute *attr,
180 const char *buf,
181 size_t len)
182{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100183 struct iio_buffer *ring = dev_get_drvdata(dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100184 struct iio_dev *indio_dev = ring->indio_dev;
Jonathan Cameron83f04222011-06-27 13:07:16 +0100185 struct sca3000_state *st = iio_priv(indio_dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100186 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
187 long val;
188 int ret;
189
190 mutex_lock(&st->lock);
191 ret = strict_strtol(buf, 10, &val);
192 if (ret)
193 goto error_ret;
194 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
195 if (ret)
196 goto error_ret;
197 if (val)
198 ret = sca3000_write_reg(st,
199 SCA3000_REG_ADDR_INT_MASK,
200 st->rx[0] | this_attr->address);
201 else
202 ret = sca3000_write_reg(st,
203 SCA3000_REG_ADDR_INT_MASK,
204 st->rx[0] & ~this_attr->address);
205error_ret:
206 mutex_unlock(&st->lock);
207
208 return ret ? ret : len;
209}
210
211static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
212 sca3000_query_ring_int,
213 sca3000_set_ring_int,
214 SCA3000_INT_MASK_RING_HALF);
215
216static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
217 sca3000_query_ring_int,
218 sca3000_set_ring_int,
219 SCA3000_INT_MASK_RING_THREE_QUARTER);
220
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100221static ssize_t sca3000_show_buffer_scale(struct device *dev,
222 struct device_attribute *attr,
223 char *buf)
224{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100225 struct iio_buffer *ring = dev_get_drvdata(dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100226 struct iio_dev *indio_dev = ring->indio_dev;
Jonathan Cameron83f04222011-06-27 13:07:16 +0100227 struct sca3000_state *st = iio_priv(indio_dev);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100228
229 return sprintf(buf, "0.%06d\n", 4*st->info->scale);
230}
231
Jonathan Cameron322c9562011-09-14 13:01:23 +0100232static IIO_DEVICE_ATTR(in_accel_scale,
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100233 S_IRUGO,
234 sca3000_show_buffer_scale,
235 NULL,
Jonathan Cameronf3fb0012010-05-04 14:42:58 +0100236 0);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100237
Jonathan Cameron574fb252009-08-18 18:06:25 +0100238/*
239 * Ring buffer attributes
240 * This device is a bit unusual in that the sampling frequency and bpse
241 * only apply to the ring buffer. At all times full rate and accuracy
242 * is available via direct reading from registers.
243 */
Jonathan Cameronf3fb0012010-05-04 14:42:58 +0100244static struct attribute *sca3000_ring_attributes[] = {
Jonathan Cameron574fb252009-08-18 18:06:25 +0100245 &dev_attr_length.attr,
Manuel Stahlffcab072010-08-31 11:32:50 +0200246 &dev_attr_bytes_per_datum.attr,
247 &dev_attr_enable.attr,
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100248 &iio_dev_attr_50_percent.dev_attr.attr,
249 &iio_dev_attr_75_percent.dev_attr.attr,
Jonathan Cameron322c9562011-09-14 13:01:23 +0100250 &iio_dev_attr_in_accel_scale.dev_attr.attr,
Jonathan Cameron574fb252009-08-18 18:06:25 +0100251 NULL,
252};
253
254static struct attribute_group sca3000_ring_attr = {
Jonathan Cameronf3fb0012010-05-04 14:42:58 +0100255 .attrs = sca3000_ring_attributes,
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100256 .name = "buffer",
Jonathan Cameron574fb252009-08-18 18:06:25 +0100257};
258
Jonathan Cameron14555b12011-09-21 11:15:57 +0100259static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100260{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100261 struct iio_buffer *buf;
262 struct iio_hw_buffer *ring;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100263
264 ring = kzalloc(sizeof *ring, GFP_KERNEL);
265 if (!ring)
Greg Kroah-Hartman7cfce522010-05-04 22:32:01 -0700266 return NULL;
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100267
Jonathan Cameron574fb252009-08-18 18:06:25 +0100268 ring->private = indio_dev;
269 buf = &ring->buf;
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100270 buf->stufftoread = 0;
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100271 buf->attrs = &sca3000_ring_attr;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100272 iio_buffer_init(buf, indio_dev);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100273
274 return buf;
275}
276
Jonathan Cameron14555b12011-09-21 11:15:57 +0100277static inline void sca3000_rb_free(struct iio_buffer *r)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100278{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100279 kfree(iio_to_hw_buf(r));
Jonathan Cameron574fb252009-08-18 18:06:25 +0100280}
281
Jonathan Cameron14555b12011-09-21 11:15:57 +0100282static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
Jonathan Cameron5565a452011-05-18 14:42:24 +0100283 .read_first_n = &sca3000_read_first_n_hw_rb,
284 .get_length = &sca3000_ring_get_length,
285 .get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
286};
287
Jonathan Cameron574fb252009-08-18 18:06:25 +0100288int sca3000_configure_ring(struct iio_dev *indio_dev)
289{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100290 indio_dev->buffer = sca3000_rb_allocate(indio_dev);
291 if (indio_dev->buffer == NULL)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100292 return -ENOMEM;
Jonathan Cameronec3afa42011-09-21 11:15:54 +0100293 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100294
Jonathan Cameron14555b12011-09-21 11:15:57 +0100295 indio_dev->buffer->access = &sca3000_ring_access_funcs;
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100296
Jonathan Cameron574fb252009-08-18 18:06:25 +0100297 return 0;
298}
299
300void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
301{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100302 sca3000_rb_free(indio_dev->buffer);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100303}
304
305static inline
306int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
307{
Jonathan Cameron83f04222011-06-27 13:07:16 +0100308 struct sca3000_state *st = iio_priv(indio_dev);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100309 int ret;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100310
311 mutex_lock(&st->lock);
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100312 ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
Jonathan Cameron574fb252009-08-18 18:06:25 +0100313 if (ret)
314 goto error_ret;
315 if (state) {
316 printk(KERN_INFO "supposedly enabling ring buffer\n");
317 ret = sca3000_write_reg(st,
318 SCA3000_REG_ADDR_MODE,
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100319 (st->rx[0] | SCA3000_RING_BUF_ENABLE));
Jonathan Cameron574fb252009-08-18 18:06:25 +0100320 } else
321 ret = sca3000_write_reg(st,
322 SCA3000_REG_ADDR_MODE,
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100323 (st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
Jonathan Cameron574fb252009-08-18 18:06:25 +0100324error_ret:
325 mutex_unlock(&st->lock);
326
327 return ret;
328}
329/**
330 * sca3000_hw_ring_preenable() hw ring buffer preenable function
331 *
332 * Very simple enable function as the chip will allows normal reads
333 * during ring buffer operation so as long as it is indeed running
334 * before we notify the core, the precise ordering does not matter.
335 **/
336static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
337{
338 return __sca3000_hw_ring_state_set(indio_dev, 1);
339}
340
341static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
342{
343 return __sca3000_hw_ring_state_set(indio_dev, 0);
344}
345
Jonathan Cameron14555b12011-09-21 11:15:57 +0100346static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
Jonathan Cameron5565a452011-05-18 14:42:24 +0100347 .preenable = &sca3000_hw_ring_preenable,
348 .postdisable = &sca3000_hw_ring_postdisable,
349};
350
Jonathan Cameron574fb252009-08-18 18:06:25 +0100351void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
352{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100353 indio_dev->buffer->setup_ops = &sca3000_ring_setup_ops;
Jonathan Cameron574fb252009-08-18 18:06:25 +0100354}
355
356/**
357 * sca3000_ring_int_process() ring specific interrupt handling.
358 *
359 * This is only split from the main interrupt handler so as to
360 * reduce the amount of code if the ring buffer is not enabled.
361 **/
Jonathan Cameron14555b12011-09-21 11:15:57 +0100362void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
Jonathan Cameron574fb252009-08-18 18:06:25 +0100363{
Jonathan Cameron25888dc2011-05-18 14:41:01 +0100364 if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
365 SCA3000_INT_STATUS_HALF)) {
366 ring->stufftoread = true;
367 wake_up_interruptible(&ring->pollq);
368 }
Jonathan Cameron574fb252009-08-18 18:06:25 +0100369}