blob: 2d9c6f8c06db999017576e50c6d3e789c0f3f584 [file] [log] [blame]
Jonathan Cameron92d10792012-06-30 20:06:00 +01001#include <linux/kernel.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/export.h>
5#include <linux/iio/buffer.h>
6#include <linux/iio/consumer.h>
7
8struct iio_cb_buffer {
9 struct iio_buffer buffer;
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +010010 int (*cb)(const void *data, void *private);
Jonathan Cameron92d10792012-06-30 20:06:00 +010011 void *private;
12 struct iio_channel *channels;
13};
14
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010015static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
16{
17 return container_of(buffer, struct iio_cb_buffer, buffer);
18}
19
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +010020static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
Jonathan Cameron92d10792012-06-30 20:06:00 +010021{
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010022 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
Jonathan Cameron92d10792012-06-30 20:06:00 +010023 return cb_buff->cb(data, cb_buff->private);
24}
25
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010026static void iio_buffer_cb_release(struct iio_buffer *buffer)
27{
28 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
29 kfree(cb_buff->buffer.scan_mask);
30 kfree(cb_buff);
31}
32
Lars-Peter Clausenc7a22c32013-09-15 17:09:00 +010033static const struct iio_buffer_access_funcs iio_cb_access = {
Jonathan Cameron92d10792012-06-30 20:06:00 +010034 .store_to = &iio_buffer_cb_store_to,
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010035 .release = &iio_buffer_cb_release,
Jonathan Cameron92d10792012-06-30 20:06:00 +010036};
37
Guenter Roeckca7d98d2013-01-31 21:43:00 +000038struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +010039 int (*cb)(const void *data,
Jonathan Cameron92d10792012-06-30 20:06:00 +010040 void *private),
41 void *private)
42{
43 int ret;
44 struct iio_cb_buffer *cb_buff;
45 struct iio_dev *indio_dev;
46 struct iio_channel *chan;
47
48 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
49 if (cb_buff == NULL) {
50 ret = -ENOMEM;
51 goto error_ret;
52 }
53
Lars-Peter Clausenbda2f8f2013-09-18 21:02:00 +010054 iio_buffer_init(&cb_buff->buffer);
55
Jonathan Cameron92d10792012-06-30 20:06:00 +010056 cb_buff->private = private;
57 cb_buff->cb = cb;
58 cb_buff->buffer.access = &iio_cb_access;
59 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
60
Guenter Roeckca7d98d2013-01-31 21:43:00 +000061 cb_buff->channels = iio_channel_get_all(dev);
Jonathan Cameron92d10792012-06-30 20:06:00 +010062 if (IS_ERR(cb_buff->channels)) {
63 ret = PTR_ERR(cb_buff->channels);
64 goto error_free_cb_buff;
65 }
66
67 indio_dev = cb_buff->channels[0].indio_dev;
68 cb_buff->buffer.scan_mask
69 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
70 GFP_KERNEL);
71 if (cb_buff->buffer.scan_mask == NULL) {
72 ret = -ENOMEM;
73 goto error_release_channels;
74 }
75 chan = &cb_buff->channels[0];
76 while (chan->indio_dev) {
77 if (chan->indio_dev != indio_dev) {
78 ret = -EINVAL;
Jonathan Cameron702df9f2013-05-22 22:41:00 +010079 goto error_free_scan_mask;
Jonathan Cameron92d10792012-06-30 20:06:00 +010080 }
81 set_bit(chan->channel->scan_index,
82 cb_buff->buffer.scan_mask);
83 chan++;
84 }
85
86 return cb_buff;
87
Jonathan Cameron702df9f2013-05-22 22:41:00 +010088error_free_scan_mask:
89 kfree(cb_buff->buffer.scan_mask);
Jonathan Cameron92d10792012-06-30 20:06:00 +010090error_release_channels:
91 iio_channel_release_all(cb_buff->channels);
92error_free_cb_buff:
93 kfree(cb_buff);
94error_ret:
95 return ERR_PTR(ret);
96}
97EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
98
99int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
100{
101 return iio_update_buffers(cb_buff->channels[0].indio_dev,
102 &cb_buff->buffer,
103 NULL);
104}
105EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
106
107void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
108{
109 iio_update_buffers(cb_buff->channels[0].indio_dev,
110 NULL,
111 &cb_buff->buffer);
112}
113EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
114
115void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
116{
Jonathan Cameron92d10792012-06-30 20:06:00 +0100117 iio_channel_release_all(cb_buff->channels);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100118 iio_buffer_put(&cb_buff->buffer);
Jonathan Cameron92d10792012-06-30 20:06:00 +0100119}
120EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
121
122struct iio_channel
123*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
124{
125 return cb_buffer->channels;
126}
127EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);