blob: fbe21b5bd3dd38766320fb4b64e29d29eedf3841 [file] [log] [blame]
Michael Hennerich2b4756a2010-11-22 14:35:32 +01001/*
Michael Hennerich596d0602011-05-18 14:41:50 +01002 * Copyright 2010-2011 Analog Devices Inc.
Michael Hennerich2b4756a2010-11-22 14:35:32 +01003 * Copyright (C) 2008 Jonathan Cameron
4 *
Michael Hennerich596d0602011-05-18 14:41:50 +01005 * Licensed under the GPL-2.
Michael Hennerich2b4756a2010-11-22 14:35:32 +01006 *
7 * ad7887_ring.c
8 */
9
10#include <linux/interrupt.h>
Michael Hennerich2b4756a2010-11-22 14:35:32 +010011#include <linux/kernel.h>
12#include <linux/slab.h>
Michael Hennerich2b4756a2010-11-22 14:35:32 +010013#include <linux/spi/spi.h>
14
15#include "../iio.h"
Jonathan Cameronaf5046a2011-10-26 17:41:32 +010016#include "../buffer.h"
Michael Hennerich2b4756a2010-11-22 14:35:32 +010017#include "../ring_sw.h"
Jonathan Cameron3f723952011-08-24 17:28:39 +010018#include "../trigger_consumer.h"
Michael Hennerich2b4756a2010-11-22 14:35:32 +010019
20#include "ad7887.h"
21
Jonathan Cameron32b5eec2011-09-02 17:14:38 +010022int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
Michael Hennerich2b4756a2010-11-22 14:35:32 +010023{
Jonathan Cameron14555b12011-09-21 11:15:57 +010024 struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
Michael Hennerich2b4756a2010-11-22 14:35:32 +010025 int count = 0, ret;
26 u16 *ring_data;
27
Jonathan Cameron32b5eec2011-09-02 17:14:38 +010028 if (!(test_bit(channum, ring->scan_mask))) {
Michael Hennerich2b4756a2010-11-22 14:35:32 +010029 ret = -EBUSY;
30 goto error_ret;
31 }
32
Jonathan Cameron5565a452011-05-18 14:42:24 +010033 ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
34 GFP_KERNEL);
Michael Hennerich2b4756a2010-11-22 14:35:32 +010035 if (ring_data == NULL) {
36 ret = -ENOMEM;
37 goto error_ret;
38 }
Jonathan Cameron5565a452011-05-18 14:42:24 +010039 ret = ring->access->read_last(ring, (u8 *) ring_data);
Michael Hennerich2b4756a2010-11-22 14:35:32 +010040 if (ret)
41 goto error_free_ring_data;
42
43 /* for single channel scan the result is stored with zero offset */
Jonathan Cameron32b5eec2011-09-02 17:14:38 +010044 if ((test_bit(1, ring->scan_mask) || test_bit(0, ring->scan_mask)) &&
45 (channum == 1))
Michael Hennerich2b4756a2010-11-22 14:35:32 +010046 count = 1;
47
48 ret = be16_to_cpu(ring_data[count]);
49
50error_free_ring_data:
51 kfree(ring_data);
52error_ret:
53 return ret;
54}
55
56/**
57 * ad7887_ring_preenable() setup the parameters of the ring before enabling
58 *
59 * The complex nature of the setting of the nuber of bytes per datum is due
60 * to this driver currently ensuring that the timestamp is stored at an 8
61 * byte boundary.
62 **/
63static int ad7887_ring_preenable(struct iio_dev *indio_dev)
64{
Jonathan Cameronf490f422011-06-27 13:07:19 +010065 struct ad7887_state *st = iio_priv(indio_dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +010066 struct iio_buffer *ring = indio_dev->buffer;
Michael Hennerich2b4756a2010-11-22 14:35:32 +010067
Michael Hennerich596d0602011-05-18 14:41:50 +010068 st->d_size = ring->scan_count *
69 st->chip_info->channel[0].scan_type.storagebits / 8;
Michael Henneriche08d0262011-02-24 22:19:48 +010070
71 if (ring->scan_timestamp) {
72 st->d_size += sizeof(s64);
73
74 if (st->d_size % sizeof(s64))
75 st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
Michael Hennerich2b4756a2010-11-22 14:35:32 +010076 }
77
Jonathan Cameron14555b12011-09-21 11:15:57 +010078 if (indio_dev->buffer->access->set_bytes_per_datum)
79 indio_dev->buffer->access->
80 set_bytes_per_datum(indio_dev->buffer, st->d_size);
Michael Henneriche08d0262011-02-24 22:19:48 +010081
Jonathan Cameron32b5eec2011-09-02 17:14:38 +010082 /* We know this is a single long so can 'cheat' */
83 switch (*ring->scan_mask) {
Michael Hennerich2b4756a2010-11-22 14:35:32 +010084 case (1 << 0):
85 st->ring_msg = &st->msg[AD7887_CH0];
86 break;
87 case (1 << 1):
88 st->ring_msg = &st->msg[AD7887_CH1];
89 /* Dummy read: push CH1 setting down to hardware */
90 spi_sync(st->spi, st->ring_msg);
91 break;
92 case ((1 << 1) | (1 << 0)):
93 st->ring_msg = &st->msg[AD7887_CH0_CH1];
94 break;
95 }
96
97 return 0;
98}
99
100static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
101{
Jonathan Cameronf490f422011-06-27 13:07:19 +0100102 struct ad7887_state *st = iio_priv(indio_dev);
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100103
104 /* dummy read: restore default CH0 settin */
105 return spi_sync(st->spi, &st->msg[AD7887_CH0]);
106}
107
108/**
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100109 * ad7887_trigger_handler() bh of trigger launched polling to ring buffer
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100110 *
111 * Currently there is no option in this driver to disable the saving of
112 * timestamps within the ring.
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100113 **/
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100114static irqreturn_t ad7887_trigger_handler(int irq, void *p)
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100115{
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100116 struct iio_poll_func *pf = p;
Jonathan Camerone65bc6a2011-08-24 17:28:36 +0100117 struct iio_dev *indio_dev = pf->indio_dev;
Jonathan Cameronf490f422011-06-27 13:07:19 +0100118 struct ad7887_state *st = iio_priv(indio_dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100119 struct iio_buffer *ring = indio_dev->buffer;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100120 s64 time_ns;
121 __u8 *buf;
122 int b_sent;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100123
Michael Hennerich596d0602011-05-18 14:41:50 +0100124 unsigned int bytes = ring->scan_count *
125 st->chip_info->channel[0].scan_type.storagebits / 8;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100126
Michael Henneriche08d0262011-02-24 22:19:48 +0100127 buf = kzalloc(st->d_size, GFP_KERNEL);
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100128 if (buf == NULL)
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100129 return -ENOMEM;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100130
131 b_sent = spi_sync(st->spi, st->ring_msg);
132 if (b_sent)
133 goto done;
134
135 time_ns = iio_get_time_ns();
136
137 memcpy(buf, st->data, bytes);
Michael Henneriche08d0262011-02-24 22:19:48 +0100138 if (ring->scan_timestamp)
139 memcpy(buf + st->d_size - sizeof(s64),
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100140 &time_ns, sizeof(time_ns));
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100141
Jonathan Cameron14555b12011-09-21 11:15:57 +0100142 indio_dev->buffer->access->store_to(indio_dev->buffer, buf, time_ns);
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100143done:
144 kfree(buf);
Michael Hennerich596d0602011-05-18 14:41:50 +0100145 iio_trigger_notify_done(indio_dev->trig);
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100146
147 return IRQ_HANDLED;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100148}
149
Jonathan Cameron14555b12011-09-21 11:15:57 +0100150static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = {
Jonathan Cameron5565a452011-05-18 14:42:24 +0100151 .preenable = &ad7887_ring_preenable,
Jonathan Cameron3b99fb72011-09-21 11:15:53 +0100152 .postenable = &iio_triggered_buffer_postenable,
153 .predisable = &iio_triggered_buffer_predisable,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100154 .postdisable = &ad7887_ring_postdisable,
155};
156
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100157int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
158{
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100159 int ret;
160
Jonathan Cameron14555b12011-09-21 11:15:57 +0100161 indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
162 if (!indio_dev->buffer) {
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100163 ret = -ENOMEM;
164 goto error_ret;
165 }
166 /* Effectively select the ring buffer implementation */
Jonathan Cameron14555b12011-09-21 11:15:57 +0100167 indio_dev->buffer->access = &ring_sw_access_funcs;
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100168 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
169 &ad7887_trigger_handler,
170 IRQF_ONESHOT,
171 indio_dev,
172 "ad7887_consumer%d",
173 indio_dev->id);
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100174 if (indio_dev->pollfunc == NULL) {
175 ret = -ENOMEM;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100176 goto error_deallocate_sw_rb;
Jonathan Camerone362dfb2011-05-18 14:41:39 +0100177 }
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100178 /* Ring buffer functions - here trigger setup related */
Jonathan Cameron14555b12011-09-21 11:15:57 +0100179 indio_dev->buffer->setup_ops = &ad7887_ring_setup_ops;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100180
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100181 /* Flag that polled ring buffering is possible */
Jonathan Cameronec3afa42011-09-21 11:15:54 +0100182 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100183 return 0;
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100184
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100185error_deallocate_sw_rb:
Jonathan Cameron14555b12011-09-21 11:15:57 +0100186 iio_sw_rb_free(indio_dev->buffer);
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100187error_ret:
188 return ret;
189}
190
191void ad7887_ring_cleanup(struct iio_dev *indio_dev)
192{
Jonathan Cameron0ed731d2011-05-18 14:42:39 +0100193 iio_dealloc_pollfunc(indio_dev->pollfunc);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100194 iio_sw_rb_free(indio_dev->buffer);
Michael Hennerich2b4756a2010-11-22 14:35:32 +0100195}