blob: f406889248c89038831405b76d675d6c31b6cb46 [file] [log] [blame]
Jonathan Cameron92d10792012-06-30 20:06:00 +01001#include <linux/kernel.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/export.h>
5#include <linux/iio/buffer.h>
6#include <linux/iio/consumer.h>
7
8struct iio_cb_buffer {
9 struct iio_buffer buffer;
10 int (*cb)(u8 *data, void *private);
11 void *private;
12 struct iio_channel *channels;
13};
14
15static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
16{
17 struct iio_cb_buffer *cb_buff = container_of(buffer,
18 struct iio_cb_buffer,
19 buffer);
20
21 return cb_buff->cb(data, cb_buff->private);
22}
23
Lars-Peter Clausenc7a22c32013-09-15 17:09:00 +010024static const struct iio_buffer_access_funcs iio_cb_access = {
Jonathan Cameron92d10792012-06-30 20:06:00 +010025 .store_to = &iio_buffer_cb_store_to,
26};
27
Guenter Roeckca7d98d2013-01-31 21:43:00 +000028struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
Jonathan Cameron92d10792012-06-30 20:06:00 +010029 int (*cb)(u8 *data,
30 void *private),
31 void *private)
32{
33 int ret;
34 struct iio_cb_buffer *cb_buff;
35 struct iio_dev *indio_dev;
36 struct iio_channel *chan;
37
38 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
39 if (cb_buff == NULL) {
40 ret = -ENOMEM;
41 goto error_ret;
42 }
43
44 cb_buff->private = private;
45 cb_buff->cb = cb;
46 cb_buff->buffer.access = &iio_cb_access;
47 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
48
Guenter Roeckca7d98d2013-01-31 21:43:00 +000049 cb_buff->channels = iio_channel_get_all(dev);
Jonathan Cameron92d10792012-06-30 20:06:00 +010050 if (IS_ERR(cb_buff->channels)) {
51 ret = PTR_ERR(cb_buff->channels);
52 goto error_free_cb_buff;
53 }
54
55 indio_dev = cb_buff->channels[0].indio_dev;
56 cb_buff->buffer.scan_mask
57 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
58 GFP_KERNEL);
59 if (cb_buff->buffer.scan_mask == NULL) {
60 ret = -ENOMEM;
61 goto error_release_channels;
62 }
63 chan = &cb_buff->channels[0];
64 while (chan->indio_dev) {
65 if (chan->indio_dev != indio_dev) {
66 ret = -EINVAL;
Jonathan Cameron702df9f2013-05-22 22:41:00 +010067 goto error_free_scan_mask;
Jonathan Cameron92d10792012-06-30 20:06:00 +010068 }
69 set_bit(chan->channel->scan_index,
70 cb_buff->buffer.scan_mask);
71 chan++;
72 }
73
74 return cb_buff;
75
Jonathan Cameron702df9f2013-05-22 22:41:00 +010076error_free_scan_mask:
77 kfree(cb_buff->buffer.scan_mask);
Jonathan Cameron92d10792012-06-30 20:06:00 +010078error_release_channels:
79 iio_channel_release_all(cb_buff->channels);
80error_free_cb_buff:
81 kfree(cb_buff);
82error_ret:
83 return ERR_PTR(ret);
84}
85EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
86
87int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
88{
89 return iio_update_buffers(cb_buff->channels[0].indio_dev,
90 &cb_buff->buffer,
91 NULL);
92}
93EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
94
95void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
96{
97 iio_update_buffers(cb_buff->channels[0].indio_dev,
98 NULL,
99 &cb_buff->buffer);
100}
101EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
102
103void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
104{
Jonathan Cameron702df9f2013-05-22 22:41:00 +0100105 kfree(cb_buff->buffer.scan_mask);
Jonathan Cameron92d10792012-06-30 20:06:00 +0100106 iio_channel_release_all(cb_buff->channels);
107 kfree(cb_buff);
108}
109EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
110
111struct iio_channel
112*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
113{
114 return cb_buffer->channels;
115}
116EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);