blob: 9201022945e966719c4322e9242d0582316a9eee [file] [log] [blame]
Jonathan Cameron92d10792012-06-30 20:06:00 +01001#include <linux/kernel.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/export.h>
5#include <linux/iio/buffer.h>
6#include <linux/iio/consumer.h>
7
8struct iio_cb_buffer {
9 struct iio_buffer buffer;
10 int (*cb)(u8 *data, void *private);
11 void *private;
12 struct iio_channel *channels;
13};
14
15static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
16{
17 struct iio_cb_buffer *cb_buff = container_of(buffer,
18 struct iio_cb_buffer,
19 buffer);
20
21 return cb_buff->cb(data, cb_buff->private);
22}
23
24static struct iio_buffer_access_funcs iio_cb_access = {
25 .store_to = &iio_buffer_cb_store_to,
26};
27
Guenter Roeckca7d98d2013-01-31 21:43:00 +000028struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
Jonathan Cameron92d10792012-06-30 20:06:00 +010029 int (*cb)(u8 *data,
30 void *private),
31 void *private)
32{
33 int ret;
34 struct iio_cb_buffer *cb_buff;
35 struct iio_dev *indio_dev;
36 struct iio_channel *chan;
37
38 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
39 if (cb_buff == NULL) {
40 ret = -ENOMEM;
41 goto error_ret;
42 }
43
44 cb_buff->private = private;
45 cb_buff->cb = cb;
46 cb_buff->buffer.access = &iio_cb_access;
47 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
48
Guenter Roeckca7d98d2013-01-31 21:43:00 +000049 cb_buff->channels = iio_channel_get_all(dev);
Jonathan Cameron92d10792012-06-30 20:06:00 +010050 if (IS_ERR(cb_buff->channels)) {
51 ret = PTR_ERR(cb_buff->channels);
52 goto error_free_cb_buff;
53 }
54
55 indio_dev = cb_buff->channels[0].indio_dev;
56 cb_buff->buffer.scan_mask
57 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
58 GFP_KERNEL);
59 if (cb_buff->buffer.scan_mask == NULL) {
60 ret = -ENOMEM;
61 goto error_release_channels;
62 }
63 chan = &cb_buff->channels[0];
64 while (chan->indio_dev) {
65 if (chan->indio_dev != indio_dev) {
66 ret = -EINVAL;
67 goto error_release_channels;
68 }
69 set_bit(chan->channel->scan_index,
70 cb_buff->buffer.scan_mask);
71 chan++;
72 }
73
74 return cb_buff;
75
76error_release_channels:
77 iio_channel_release_all(cb_buff->channels);
78error_free_cb_buff:
79 kfree(cb_buff);
80error_ret:
81 return ERR_PTR(ret);
82}
83EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
84
85int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
86{
87 return iio_update_buffers(cb_buff->channels[0].indio_dev,
88 &cb_buff->buffer,
89 NULL);
90}
91EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
92
93void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
94{
95 iio_update_buffers(cb_buff->channels[0].indio_dev,
96 NULL,
97 &cb_buff->buffer);
98}
99EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
100
101void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
102{
103 iio_channel_release_all(cb_buff->channels);
104 kfree(cb_buff);
105}
106EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
107
108struct iio_channel
109*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
110{
111 return cb_buffer->channels;
112}
113EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);