blob: 854d94971fc4cdb67818a9fbade4c4566b239fdb [file] [log] [blame]
Michael Hennerich349282d2010-10-11 18:05:37 +02001/*
2 * Copyright 2010 Analog Devices Inc.
3 * Copyright (C) 2008 Jonathan Cameron
4 *
5 * Licensed under the GPL-2 or later.
6 *
7 * ad7476_ring.c
8 */
9
10#include <linux/interrupt.h>
Michael Hennerich349282d2010-10-11 18:05:37 +020011#include <linux/device.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
Michael Hennerich349282d2010-10-11 18:05:37 +020014#include <linux/spi/spi.h>
15
16#include "../iio.h"
Jonathan Cameron3811cd62011-09-21 11:15:56 +010017#include "../buffer_generic.h"
Michael Hennerich349282d2010-10-11 18:05:37 +020018#include "../ring_sw.h"
Jonathan Cameron3f723952011-08-24 17:28:39 +010019#include "../trigger_consumer.h"
Michael Hennerich349282d2010-10-11 18:05:37 +020020
21#include "ad7476.h"
22
Jonathan Cameron67688102011-06-27 13:07:18 +010023int ad7476_scan_from_ring(struct iio_dev *indio_dev)
Michael Hennerich349282d2010-10-11 18:05:37 +020024{
Jonathan Cameron67688102011-06-27 13:07:18 +010025 struct iio_ring_buffer *ring = indio_dev->ring;
Michael Hennerich349282d2010-10-11 18:05:37 +020026 int ret;
27 u8 *ring_data;
28
Jonathan Cameron5565a452011-05-18 14:42:24 +010029 ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
30 GFP_KERNEL);
Michael Hennerich349282d2010-10-11 18:05:37 +020031 if (ring_data == NULL) {
32 ret = -ENOMEM;
33 goto error_ret;
34 }
Jonathan Cameron5565a452011-05-18 14:42:24 +010035 ret = ring->access->read_last(ring, ring_data);
Michael Hennerich349282d2010-10-11 18:05:37 +020036 if (ret)
37 goto error_free_ring_data;
38
39 ret = (ring_data[0] << 8) | ring_data[1];
40
41error_free_ring_data:
42 kfree(ring_data);
43error_ret:
44 return ret;
45}
46
47/**
48 * ad7476_ring_preenable() setup the parameters of the ring before enabling
49 *
50 * The complex nature of the setting of the nuber of bytes per datum is due
51 * to this driver currently ensuring that the timestamp is stored at an 8
52 * byte boundary.
53 **/
54static int ad7476_ring_preenable(struct iio_dev *indio_dev)
55{
Jonathan Cameron67688102011-06-27 13:07:18 +010056 struct ad7476_state *st = iio_priv(indio_dev);
Michael Hennerich86f702a2011-02-24 22:19:47 +010057 struct iio_ring_buffer *ring = indio_dev->ring;
Michael Hennerich349282d2010-10-11 18:05:37 +020058
Jonathan Cameronc5e08192011-05-18 14:41:24 +010059 st->d_size = ring->scan_count *
60 st->chip_info->channel[0].scan_type.storagebits / 8;
Michael Hennerich86f702a2011-02-24 22:19:47 +010061
62 if (ring->scan_timestamp) {
63 st->d_size += sizeof(s64);
64
65 if (st->d_size % sizeof(s64))
66 st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
Michael Hennerich349282d2010-10-11 18:05:37 +020067 }
68
Jonathan Cameron5565a452011-05-18 14:42:24 +010069 if (indio_dev->ring->access->set_bytes_per_datum)
70 indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
Michael Hennerich86f702a2011-02-24 22:19:47 +010071 st->d_size);
72
Michael Hennerich349282d2010-10-11 18:05:37 +020073 return 0;
74}
75
Jonathan Cameronf20546e2011-05-18 14:41:25 +010076static irqreturn_t ad7476_trigger_handler(int irq, void *p)
Michael Hennerich349282d2010-10-11 18:05:37 +020077{
Jonathan Cameronf20546e2011-05-18 14:41:25 +010078 struct iio_poll_func *pf = p;
Jonathan Camerone65bc6a2011-08-24 17:28:36 +010079 struct iio_dev *indio_dev = pf->indio_dev;
Jonathan Cameron67688102011-06-27 13:07:18 +010080 struct ad7476_state *st = iio_priv(indio_dev);
Michael Hennerich349282d2010-10-11 18:05:37 +020081 s64 time_ns;
82 __u8 *rxbuf;
83 int b_sent;
Michael Hennerich349282d2010-10-11 18:05:37 +020084
Michael Hennerich86f702a2011-02-24 22:19:47 +010085 rxbuf = kzalloc(st->d_size, GFP_KERNEL);
Michael Hennerich349282d2010-10-11 18:05:37 +020086 if (rxbuf == NULL)
Jonathan Cameronf20546e2011-05-18 14:41:25 +010087 return -ENOMEM;
Michael Hennerich349282d2010-10-11 18:05:37 +020088
Jonathan Cameronc5e08192011-05-18 14:41:24 +010089 b_sent = spi_read(st->spi, rxbuf,
90 st->chip_info->channel[0].scan_type.storagebits / 8);
Michael Hennerich349282d2010-10-11 18:05:37 +020091 if (b_sent < 0)
92 goto done;
93
94 time_ns = iio_get_time_ns();
95
Michael Hennerich86f702a2011-02-24 22:19:47 +010096 if (indio_dev->ring->scan_timestamp)
97 memcpy(rxbuf + st->d_size - sizeof(s64),
98 &time_ns, sizeof(time_ns));
Michael Hennerich349282d2010-10-11 18:05:37 +020099
Jonathan Cameron5565a452011-05-18 14:42:24 +0100100 indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns);
Michael Hennerich349282d2010-10-11 18:05:37 +0200101done:
Jonathan Cameronf20546e2011-05-18 14:41:25 +0100102 iio_trigger_notify_done(indio_dev->trig);
Michael Hennerich349282d2010-10-11 18:05:37 +0200103 kfree(rxbuf);
Jonathan Cameronf20546e2011-05-18 14:41:25 +0100104
105 return IRQ_HANDLED;
Michael Hennerich349282d2010-10-11 18:05:37 +0200106}
107
Jonathan Cameron5565a452011-05-18 14:42:24 +0100108static const struct iio_ring_setup_ops ad7476_ring_setup_ops = {
109 .preenable = &ad7476_ring_preenable,
Jonathan Cameron3b99fb72011-09-21 11:15:53 +0100110 .postenable = &iio_triggered_buffer_postenable,
111 .predisable = &iio_triggered_buffer_predisable,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100112};
113
Michael Hennerich349282d2010-10-11 18:05:37 +0200114int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
115{
Jonathan Cameron67688102011-06-27 13:07:18 +0100116 struct ad7476_state *st = iio_priv(indio_dev);
Michael Hennerich349282d2010-10-11 18:05:37 +0200117 int ret = 0;
118
119 indio_dev->ring = iio_sw_rb_allocate(indio_dev);
120 if (!indio_dev->ring) {
121 ret = -ENOMEM;
122 goto error_ret;
123 }
124 /* Effectively select the ring buffer implementation */
Jonathan Cameron5565a452011-05-18 14:42:24 +0100125 indio_dev->ring->access = &ring_sw_access_funcs;
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100126 indio_dev->pollfunc
127 = iio_alloc_pollfunc(NULL,
128 &ad7476_trigger_handler,
129 IRQF_ONESHOT,
130 indio_dev,
131 "%s_consumer%d",
132 spi_get_device_id(st->spi)->name,
133 indio_dev->id);
Jonathan Cameronf20546e2011-05-18 14:41:25 +0100134 if (indio_dev->pollfunc == NULL) {
135 ret = -ENOMEM;
Michael Hennerich349282d2010-10-11 18:05:37 +0200136 goto error_deallocate_sw_rb;
Jonathan Cameronf20546e2011-05-18 14:41:25 +0100137 }
Michael Hennerich349282d2010-10-11 18:05:37 +0200138
139 /* Ring buffer functions - here trigger setup related */
Jonathan Cameron5565a452011-05-18 14:42:24 +0100140 indio_dev->ring->setup_ops = &ad7476_ring_setup_ops;
Michael Hennerich86f702a2011-02-24 22:19:47 +0100141 indio_dev->ring->scan_timestamp = true;
Michael Hennerich349282d2010-10-11 18:05:37 +0200142
Michael Hennerich349282d2010-10-11 18:05:37 +0200143 /* Flag that polled ring buffering is possible */
Jonathan Cameronec3afa42011-09-21 11:15:54 +0100144 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
Michael Hennerich349282d2010-10-11 18:05:37 +0200145 return 0;
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100146
Michael Hennerich349282d2010-10-11 18:05:37 +0200147error_deallocate_sw_rb:
148 iio_sw_rb_free(indio_dev->ring);
149error_ret:
150 return ret;
151}
152
153void ad7476_ring_cleanup(struct iio_dev *indio_dev)
154{
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100155 iio_dealloc_pollfunc(indio_dev->pollfunc);
Michael Hennerich349282d2010-10-11 18:05:37 +0200156 iio_sw_rb_free(indio_dev->ring);
157}