blob: 4dcc3a0f9930535f22e5c9ed8a241fd450312a49 [file] [log] [blame]
Jonathan Cameron14555b12011-09-21 11:15:57 +01001/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
Paul Gortmaker8e336a72011-07-10 13:09:12 -040017#include <linux/export.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010018#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
Lars-Peter Clausend2f0a482013-10-04 12:07:00 +010023#include <linux/sched.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010024
Jonathan Cameron06458e22012-04-25 15:54:58 +010025#include <linux/iio/iio.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010026#include "iio_core.h"
Jonathan Cameron06458e22012-04-25 15:54:58 +010027#include <linux/iio/sysfs.h>
28#include <linux/iio/buffer.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010029
30static const char * const iio_endian_prefix[] = {
31 [IIO_BE] = "be",
32 [IIO_LE] = "le",
33};
34
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010035static bool iio_buffer_is_active(struct iio_buffer *buf)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010036{
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010037 return !list_empty(&buf->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010038}
39
Lars-Peter Clausen647cc7b2013-11-25 14:56:00 +000040static bool iio_buffer_data_available(struct iio_buffer *buf)
41{
42 if (buf->access->data_available)
43 return buf->access->data_available(buf);
44
45 return buf->stufftoread;
46}
47
Jonathan Cameron14555b12011-09-21 11:15:57 +010048/**
49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
50 *
51 * This function relies on all buffer implementations having an
52 * iio_buffer as their first element.
53 **/
54ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
55 size_t n, loff_t *f_ps)
56{
57 struct iio_dev *indio_dev = filp->private_data;
58 struct iio_buffer *rb = indio_dev->buffer;
59
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +010060 if (!indio_dev->info)
61 return -ENODEV;
62
Jonathan Cameron96e00f12011-10-26 17:27:45 +010063 if (!rb || !rb->access->read_first_n)
Jonathan Cameron14555b12011-09-21 11:15:57 +010064 return -EINVAL;
65 return rb->access->read_first_n(rb, n, buf);
66}
67
68/**
69 * iio_buffer_poll() - poll the buffer to find out if it has data
70 */
71unsigned int iio_buffer_poll(struct file *filp,
72 struct poll_table_struct *wait)
73{
74 struct iio_dev *indio_dev = filp->private_data;
75 struct iio_buffer *rb = indio_dev->buffer;
76
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +010077 if (!indio_dev->info)
78 return -ENODEV;
79
Jonathan Cameron14555b12011-09-21 11:15:57 +010080 poll_wait(filp, &rb->pollq, wait);
Lars-Peter Clausen647cc7b2013-11-25 14:56:00 +000081 if (iio_buffer_data_available(rb))
Jonathan Cameron14555b12011-09-21 11:15:57 +010082 return POLLIN | POLLRDNORM;
83 /* need a way of knowing if there may be enough data... */
84 return 0;
85}
86
Lars-Peter Clausend2f0a482013-10-04 12:07:00 +010087/**
88 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
89 * @indio_dev: The IIO device
90 *
91 * Wakes up the event waitqueue used for poll(). Should usually
92 * be called when the device is unregistered.
93 */
94void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
95{
96 if (!indio_dev->buffer)
97 return;
98
99 wake_up(&indio_dev->buffer->pollq);
100}
101
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000102void iio_buffer_init(struct iio_buffer *buffer)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100103{
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000104 INIT_LIST_HEAD(&buffer->demux_list);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100105 INIT_LIST_HEAD(&buffer->buffer_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100106 init_waitqueue_head(&buffer->pollq);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100107 kref_init(&buffer->ref);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100108}
109EXPORT_SYMBOL(iio_buffer_init);
110
111static ssize_t iio_show_scan_index(struct device *dev,
112 struct device_attribute *attr,
113 char *buf)
114{
115 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
116}
117
118static ssize_t iio_show_fixed_type(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
123 u8 type = this_attr->c->scan_type.endianness;
124
125 if (type == IIO_CPU) {
Jonathan Cameron9d5d1152011-10-04 16:02:08 +0100126#ifdef __LITTLE_ENDIAN
127 type = IIO_LE;
128#else
129 type = IIO_BE;
130#endif
Jonathan Cameron14555b12011-09-21 11:15:57 +0100131 }
132 return sprintf(buf, "%s:%c%d/%d>>%u\n",
133 iio_endian_prefix[type],
134 this_attr->c->scan_type.sign,
135 this_attr->c->scan_type.realbits,
136 this_attr->c->scan_type.storagebits,
137 this_attr->c->scan_type.shift);
138}
139
140static ssize_t iio_scan_el_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143{
144 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200145 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100146
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000147 ret = test_bit(to_iio_dev_attr(attr)->address,
148 indio_dev->buffer->scan_mask);
149
Jonathan Cameron14555b12011-09-21 11:15:57 +0100150 return sprintf(buf, "%d\n", ret);
151}
152
153static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
154{
155 clear_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100156 return 0;
157}
158
159static ssize_t iio_scan_el_store(struct device *dev,
160 struct device_attribute *attr,
161 const char *buf,
162 size_t len)
163{
Jonathan Camerona714af22012-04-21 10:09:32 +0100164 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100165 bool state;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200166 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100167 struct iio_buffer *buffer = indio_dev->buffer;
168 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
169
Jonathan Camerona714af22012-04-21 10:09:32 +0100170 ret = strtobool(buf, &state);
171 if (ret < 0)
172 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100173 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100174 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100175 ret = -EBUSY;
176 goto error_ret;
177 }
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000178 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100179 if (ret < 0)
180 goto error_ret;
181 if (!state && ret) {
182 ret = iio_scan_mask_clear(buffer, this_attr->address);
183 if (ret)
184 goto error_ret;
185 } else if (state && !ret) {
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000186 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100187 if (ret)
188 goto error_ret;
189 }
190
191error_ret:
192 mutex_unlock(&indio_dev->mlock);
193
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100194 return ret < 0 ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100195
196}
197
198static ssize_t iio_scan_el_ts_show(struct device *dev,
199 struct device_attribute *attr,
200 char *buf)
201{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200202 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100203 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100204}
205
206static ssize_t iio_scan_el_ts_store(struct device *dev,
207 struct device_attribute *attr,
208 const char *buf,
209 size_t len)
210{
Jonathan Camerona714af22012-04-21 10:09:32 +0100211 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200212 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100213 bool state;
214
Jonathan Camerona714af22012-04-21 10:09:32 +0100215 ret = strtobool(buf, &state);
216 if (ret < 0)
217 return ret;
218
Jonathan Cameron14555b12011-09-21 11:15:57 +0100219 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100220 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100221 ret = -EBUSY;
222 goto error_ret;
223 }
224 indio_dev->buffer->scan_timestamp = state;
225error_ret:
226 mutex_unlock(&indio_dev->mlock);
227
228 return ret ? ret : len;
229}
230
231static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
232 const struct iio_chan_spec *chan)
233{
234 int ret, attrcount = 0;
235 struct iio_buffer *buffer = indio_dev->buffer;
236
237 ret = __iio_add_chan_devattr("index",
238 chan,
239 &iio_show_scan_index,
240 NULL,
241 0,
Jonathan Cameron37044322013-09-08 14:57:00 +0100242 IIO_SEPARATE,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100243 &indio_dev->dev,
244 &buffer->scan_el_dev_attr_list);
245 if (ret)
246 goto error_ret;
247 attrcount++;
248 ret = __iio_add_chan_devattr("type",
249 chan,
250 &iio_show_fixed_type,
251 NULL,
252 0,
253 0,
254 &indio_dev->dev,
255 &buffer->scan_el_dev_attr_list);
256 if (ret)
257 goto error_ret;
258 attrcount++;
259 if (chan->type != IIO_TIMESTAMP)
260 ret = __iio_add_chan_devattr("en",
261 chan,
262 &iio_scan_el_show,
263 &iio_scan_el_store,
264 chan->scan_index,
265 0,
266 &indio_dev->dev,
267 &buffer->scan_el_dev_attr_list);
268 else
269 ret = __iio_add_chan_devattr("en",
270 chan,
271 &iio_scan_el_ts_show,
272 &iio_scan_el_ts_store,
273 chan->scan_index,
274 0,
275 &indio_dev->dev,
276 &buffer->scan_el_dev_attr_list);
Peter Meerwald95725882013-09-17 23:42:00 +0100277 if (ret)
278 goto error_ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100279 attrcount++;
280 ret = attrcount;
281error_ret:
282 return ret;
283}
284
Jonathan Cameron14555b12011-09-21 11:15:57 +0100285static const char * const iio_scan_elements_group_name = "scan_elements";
286
287int iio_buffer_register(struct iio_dev *indio_dev,
288 const struct iio_chan_spec *channels,
289 int num_channels)
290{
291 struct iio_dev_attr *p;
292 struct attribute **attr;
293 struct iio_buffer *buffer = indio_dev->buffer;
294 int ret, i, attrn, attrcount, attrcount_orig = 0;
295
296 if (buffer->attrs)
297 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
298
299 if (buffer->scan_el_attrs != NULL) {
300 attr = buffer->scan_el_attrs->attrs;
301 while (*attr++ != NULL)
302 attrcount_orig++;
303 }
304 attrcount = attrcount_orig;
305 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
306 if (channels) {
307 /* new magic */
308 for (i = 0; i < num_channels; i++) {
Lars-Peter Clausenf5b81dd2012-06-18 18:33:47 +0200309 if (channels[i].scan_index < 0)
310 continue;
311
Jonathan Cameron14555b12011-09-21 11:15:57 +0100312 /* Establish necessary mask length */
313 if (channels[i].scan_index >
314 (int)indio_dev->masklength - 1)
315 indio_dev->masklength
Lars-Peter Clausene1dc7be2012-07-02 14:52:56 +0200316 = channels[i].scan_index + 1;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100317
318 ret = iio_buffer_add_channel_sysfs(indio_dev,
319 &channels[i]);
320 if (ret < 0)
321 goto error_cleanup_dynamic;
322 attrcount += ret;
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000323 if (channels[i].type == IIO_TIMESTAMP)
Jonathan Cameronf1264802012-04-21 10:09:34 +0100324 indio_dev->scan_index_timestamp =
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000325 channels[i].scan_index;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100326 }
327 if (indio_dev->masklength && buffer->scan_mask == NULL) {
Thomas Meyerd83fb182011-11-29 22:08:00 +0100328 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
329 sizeof(*buffer->scan_mask),
330 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100331 if (buffer->scan_mask == NULL) {
332 ret = -ENOMEM;
333 goto error_cleanup_dynamic;
334 }
335 }
336 }
337
338 buffer->scan_el_group.name = iio_scan_elements_group_name;
339
Thomas Meyerd83fb182011-11-29 22:08:00 +0100340 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
341 sizeof(buffer->scan_el_group.attrs[0]),
342 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100343 if (buffer->scan_el_group.attrs == NULL) {
344 ret = -ENOMEM;
345 goto error_free_scan_mask;
346 }
347 if (buffer->scan_el_attrs)
348 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
349 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
350 attrn = attrcount_orig;
351
352 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
353 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
354 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
355
356 return 0;
357
358error_free_scan_mask:
359 kfree(buffer->scan_mask);
360error_cleanup_dynamic:
Lars-Peter Clausen84088eb2013-10-07 12:50:00 +0100361 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100362
363 return ret;
364}
365EXPORT_SYMBOL(iio_buffer_register);
366
367void iio_buffer_unregister(struct iio_dev *indio_dev)
368{
369 kfree(indio_dev->buffer->scan_mask);
370 kfree(indio_dev->buffer->scan_el_group.attrs);
Lars-Peter Clausen84088eb2013-10-07 12:50:00 +0100371 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100372}
373EXPORT_SYMBOL(iio_buffer_unregister);
374
375ssize_t iio_buffer_read_length(struct device *dev,
376 struct device_attribute *attr,
377 char *buf)
378{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200379 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100380 struct iio_buffer *buffer = indio_dev->buffer;
381
382 if (buffer->access->get_length)
383 return sprintf(buf, "%d\n",
384 buffer->access->get_length(buffer));
385
386 return 0;
387}
388EXPORT_SYMBOL(iio_buffer_read_length);
389
390ssize_t iio_buffer_write_length(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf,
393 size_t len)
394{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200395 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100396 struct iio_buffer *buffer = indio_dev->buffer;
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100397 unsigned int val;
398 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100399
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100400 ret = kstrtouint(buf, 10, &val);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100401 if (ret)
402 return ret;
403
404 if (buffer->access->get_length)
405 if (val == buffer->access->get_length(buffer))
406 return len;
407
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100408 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100409 if (iio_buffer_is_active(indio_dev->buffer)) {
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100410 ret = -EBUSY;
411 } else {
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100412 if (buffer->access->set_length)
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100413 buffer->access->set_length(buffer, val);
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100414 ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100415 }
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100416 mutex_unlock(&indio_dev->mlock);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100417
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100418 return ret ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100419}
420EXPORT_SYMBOL(iio_buffer_write_length);
421
Jonathan Cameron14555b12011-09-21 11:15:57 +0100422ssize_t iio_buffer_show_enable(struct device *dev,
423 struct device_attribute *attr,
424 char *buf)
425{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200426 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100427 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
Jonathan Cameron14555b12011-09-21 11:15:57 +0100428}
429EXPORT_SYMBOL(iio_buffer_show_enable);
430
Peter Meerwald95725882013-09-17 23:42:00 +0100431/* Note NULL used as error indicator as it doesn't make sense. */
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100432static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100433 unsigned int masklength,
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100434 const unsigned long *mask)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100435{
436 if (bitmap_empty(mask, masklength))
437 return NULL;
438 while (*av_masks) {
439 if (bitmap_subset(mask, av_masks, masklength))
440 return av_masks;
441 av_masks += BITS_TO_LONGS(masklength);
442 }
443 return NULL;
444}
445
Peter Meerwald183f4172013-09-18 22:10:00 +0100446static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
447 const unsigned long *mask, bool timestamp)
Jonathan Cameron959d2952011-12-05 21:37:13 +0000448{
Jonathan Cameron959d2952011-12-05 21:37:13 +0000449 const struct iio_chan_spec *ch;
450 unsigned bytes = 0;
451 int length, i;
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100452
453 /* How much space will the demuxed element take? */
454 for_each_set_bit(i, mask,
455 indio_dev->masklength) {
456 ch = iio_find_channel_from_si(indio_dev, i);
457 length = ch->scan_type.storagebits / 8;
458 bytes = ALIGN(bytes, length);
459 bytes += length;
460 }
461 if (timestamp) {
462 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100463 indio_dev->scan_index_timestamp);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100464 length = ch->scan_type.storagebits / 8;
465 bytes = ALIGN(bytes, length);
466 bytes += length;
467 }
468 return bytes;
469}
470
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100471static void iio_buffer_activate(struct iio_dev *indio_dev,
472 struct iio_buffer *buffer)
473{
474 iio_buffer_get(buffer);
475 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
476}
477
478static void iio_buffer_deactivate(struct iio_buffer *buffer)
479{
480 list_del_init(&buffer->buffer_list);
481 iio_buffer_put(buffer);
482}
483
Lars-Peter Clausena87c82e2013-09-18 21:02:00 +0100484void iio_disable_all_buffers(struct iio_dev *indio_dev)
485{
486 struct iio_buffer *buffer, *_buffer;
487
488 if (list_empty(&indio_dev->buffer_list))
489 return;
490
491 if (indio_dev->setup_ops->predisable)
492 indio_dev->setup_ops->predisable(indio_dev);
493
494 list_for_each_entry_safe(buffer, _buffer,
495 &indio_dev->buffer_list, buffer_list)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100496 iio_buffer_deactivate(buffer);
Lars-Peter Clausena87c82e2013-09-18 21:02:00 +0100497
498 indio_dev->currentmode = INDIO_DIRECT_MODE;
499 if (indio_dev->setup_ops->postdisable)
500 indio_dev->setup_ops->postdisable(indio_dev);
Lars-Peter Clausene086ed72013-10-15 09:38:00 +0100501
502 if (indio_dev->available_scan_masks == NULL)
503 kfree(indio_dev->active_scan_mask);
Lars-Peter Clausena87c82e2013-09-18 21:02:00 +0100504}
505
Lars-Peter Clausen8e050992013-10-14 17:49:00 +0100506static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
507 struct iio_buffer *buffer)
508{
509 unsigned int bytes;
510
511 if (!buffer->access->set_bytes_per_datum)
512 return;
513
514 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
515 buffer->scan_timestamp);
516
517 buffer->access->set_bytes_per_datum(buffer, bytes);
518}
519
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100520static int __iio_update_buffers(struct iio_dev *indio_dev,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100521 struct iio_buffer *insert_buffer,
522 struct iio_buffer *remove_buffer)
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100523{
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100524 int ret;
525 int success = 0;
526 struct iio_buffer *buffer;
527 unsigned long *compound_mask;
528 const unsigned long *old_mask;
Jonathan Cameron959d2952011-12-05 21:37:13 +0000529
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100530 /* Wind down existing buffers - iff there are any */
531 if (!list_empty(&indio_dev->buffer_list)) {
532 if (indio_dev->setup_ops->predisable) {
533 ret = indio_dev->setup_ops->predisable(indio_dev);
534 if (ret)
535 goto error_ret;
536 }
537 indio_dev->currentmode = INDIO_DIRECT_MODE;
538 if (indio_dev->setup_ops->postdisable) {
539 ret = indio_dev->setup_ops->postdisable(indio_dev);
540 if (ret)
541 goto error_ret;
542 }
543 }
544 /* Keep a copy of current setup to allow roll back */
545 old_mask = indio_dev->active_scan_mask;
546 if (!indio_dev->available_scan_masks)
547 indio_dev->active_scan_mask = NULL;
548
549 if (remove_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100550 iio_buffer_deactivate(remove_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100551 if (insert_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100552 iio_buffer_activate(indio_dev, insert_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100553
554 /* If no buffers in list, we are done */
555 if (list_empty(&indio_dev->buffer_list)) {
556 indio_dev->currentmode = INDIO_DIRECT_MODE;
557 if (indio_dev->available_scan_masks == NULL)
558 kfree(old_mask);
559 return 0;
560 }
Jonathan Cameron959d2952011-12-05 21:37:13 +0000561
Peter Meerwald95725882013-09-17 23:42:00 +0100562 /* What scan mask do we actually have? */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100563 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
564 sizeof(long), GFP_KERNEL);
565 if (compound_mask == NULL) {
566 if (indio_dev->available_scan_masks == NULL)
567 kfree(old_mask);
568 return -ENOMEM;
569 }
570 indio_dev->scan_timestamp = 0;
571
572 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
573 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
574 indio_dev->masklength);
575 indio_dev->scan_timestamp |= buffer->scan_timestamp;
576 }
577 if (indio_dev->available_scan_masks) {
Jonathan Cameron959d2952011-12-05 21:37:13 +0000578 indio_dev->active_scan_mask =
579 iio_scan_mask_match(indio_dev->available_scan_masks,
580 indio_dev->masklength,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100581 compound_mask);
582 if (indio_dev->active_scan_mask == NULL) {
583 /*
584 * Roll back.
585 * Note can only occur when adding a buffer.
586 */
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100587 iio_buffer_deactivate(insert_buffer);
Peter Meerwaldd66e0452013-09-18 22:10:00 +0100588 if (old_mask) {
589 indio_dev->active_scan_mask = old_mask;
590 success = -EINVAL;
591 }
592 else {
593 kfree(compound_mask);
594 ret = -EINVAL;
595 goto error_ret;
596 }
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100597 }
598 } else {
599 indio_dev->active_scan_mask = compound_mask;
600 }
Lars-Peter Clausenaff1eb42012-06-15 18:08:59 +0200601
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000602 iio_update_demux(indio_dev);
603
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100604 /* Wind up again */
605 if (indio_dev->setup_ops->preenable) {
606 ret = indio_dev->setup_ops->preenable(indio_dev);
607 if (ret) {
608 printk(KERN_ERR
Michał Mirosławbec18892013-05-04 14:19:00 +0100609 "Buffer not started: buffer preenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100610 goto error_remove_inserted;
611 }
612 }
613 indio_dev->scan_bytes =
614 iio_compute_scan_bytes(indio_dev,
615 indio_dev->active_scan_mask,
616 indio_dev->scan_timestamp);
Lars-Peter Clausen8e050992013-10-14 17:49:00 +0100617 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
618 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100619 if (buffer->access->request_update) {
620 ret = buffer->access->request_update(buffer);
621 if (ret) {
622 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100623 "Buffer not started: buffer parameter update failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100624 goto error_run_postdisable;
625 }
626 }
Lars-Peter Clausen8e050992013-10-14 17:49:00 +0100627 }
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100628 if (indio_dev->info->update_scan_mode) {
629 ret = indio_dev->info
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000630 ->update_scan_mode(indio_dev,
631 indio_dev->active_scan_mask);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100632 if (ret < 0) {
Michał Mirosławbec18892013-05-04 14:19:00 +0100633 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100634 goto error_run_postdisable;
635 }
636 }
Peter Meerwald95725882013-09-17 23:42:00 +0100637 /* Definitely possible for devices to support both of these. */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100638 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
639 if (!indio_dev->trig) {
640 printk(KERN_INFO "Buffer not started: no trigger\n");
641 ret = -EINVAL;
642 /* Can only occur on first buffer */
643 goto error_run_postdisable;
644 }
645 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
646 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
647 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
Peter Meerwald95725882013-09-17 23:42:00 +0100648 } else { /* Should never be reached */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100649 ret = -EINVAL;
650 goto error_run_postdisable;
651 }
652
653 if (indio_dev->setup_ops->postenable) {
654 ret = indio_dev->setup_ops->postenable(indio_dev);
655 if (ret) {
656 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100657 "Buffer not started: postenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100658 indio_dev->currentmode = INDIO_DIRECT_MODE;
659 if (indio_dev->setup_ops->postdisable)
660 indio_dev->setup_ops->postdisable(indio_dev);
661 goto error_disable_all_buffers;
662 }
663 }
664
665 if (indio_dev->available_scan_masks)
666 kfree(compound_mask);
667 else
668 kfree(old_mask);
669
670 return success;
671
672error_disable_all_buffers:
673 indio_dev->currentmode = INDIO_DIRECT_MODE;
674error_run_postdisable:
675 if (indio_dev->setup_ops->postdisable)
676 indio_dev->setup_ops->postdisable(indio_dev);
677error_remove_inserted:
678
679 if (insert_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100680 iio_buffer_deactivate(insert_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100681 indio_dev->active_scan_mask = old_mask;
682 kfree(compound_mask);
683error_ret:
684
685 return ret;
686}
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100687
688int iio_update_buffers(struct iio_dev *indio_dev,
689 struct iio_buffer *insert_buffer,
690 struct iio_buffer *remove_buffer)
691{
692 int ret;
693
Lars-Peter Clausen3909fab2013-10-04 12:07:00 +0100694 if (insert_buffer == remove_buffer)
695 return 0;
696
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100697 mutex_lock(&indio_dev->info_exist_lock);
698 mutex_lock(&indio_dev->mlock);
699
Lars-Peter Clausen3909fab2013-10-04 12:07:00 +0100700 if (insert_buffer && iio_buffer_is_active(insert_buffer))
701 insert_buffer = NULL;
702
703 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
704 remove_buffer = NULL;
705
706 if (!insert_buffer && !remove_buffer) {
707 ret = 0;
708 goto out_unlock;
709 }
710
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100711 if (indio_dev->info == NULL) {
712 ret = -ENODEV;
713 goto out_unlock;
714 }
715
716 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
717
718out_unlock:
719 mutex_unlock(&indio_dev->mlock);
720 mutex_unlock(&indio_dev->info_exist_lock);
721
722 return ret;
723}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100724EXPORT_SYMBOL_GPL(iio_update_buffers);
725
726ssize_t iio_buffer_store_enable(struct device *dev,
727 struct device_attribute *attr,
728 const char *buf,
729 size_t len)
730{
731 int ret;
732 bool requested_state;
733 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100734 bool inlist;
735
736 ret = strtobool(buf, &requested_state);
737 if (ret < 0)
738 return ret;
739
740 mutex_lock(&indio_dev->mlock);
741
742 /* Find out if it is in the list */
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100743 inlist = iio_buffer_is_active(indio_dev->buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100744 /* Already in desired state */
745 if (inlist == requested_state)
746 goto done;
747
748 if (requested_state)
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100749 ret = __iio_update_buffers(indio_dev,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100750 indio_dev->buffer, NULL);
751 else
Lars-Peter Clausena9519452013-10-04 12:07:00 +0100752 ret = __iio_update_buffers(indio_dev,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100753 NULL, indio_dev->buffer);
754
755 if (ret < 0)
756 goto done;
757done:
758 mutex_unlock(&indio_dev->mlock);
759 return (ret < 0) ? ret : len;
760}
761EXPORT_SYMBOL(iio_buffer_store_enable);
762
Jonathan Cameron14555b12011-09-21 11:15:57 +0100763/**
Lars-Peter Clausen81636632012-07-09 10:00:00 +0100764 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
765 * @indio_dev: the iio device
766 * @mask: scan mask to be checked
767 *
768 * Return true if exactly one bit is set in the scan mask, false otherwise. It
769 * can be used for devices where only one channel can be active for sampling at
770 * a time.
771 */
772bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
773 const unsigned long *mask)
774{
775 return bitmap_weight(mask, indio_dev->masklength) == 1;
776}
777EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
778
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100779static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
780 const unsigned long *mask)
781{
782 if (!indio_dev->setup_ops->validate_scan_mask)
783 return true;
784
785 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
786}
787
Jonathan Cameron14555b12011-09-21 11:15:57 +0100788/**
789 * iio_scan_mask_set() - set particular bit in the scan mask
Peter Meerwald95725882013-09-17 23:42:00 +0100790 * @indio_dev: the iio device
Jonathan Cameron14555b12011-09-21 11:15:57 +0100791 * @buffer: the buffer whose scan mask we are interested in
792 * @bit: the bit to be set.
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100793 *
794 * Note that at this point we have no way of knowing what other
795 * buffers might request, hence this code only verifies that the
796 * individual buffers request is plausible.
797 */
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000798int iio_scan_mask_set(struct iio_dev *indio_dev,
799 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100800{
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100801 const unsigned long *mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100802 unsigned long *trialmask;
803
804 trialmask = kmalloc(sizeof(*trialmask)*
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100805 BITS_TO_LONGS(indio_dev->masklength),
Jonathan Cameron14555b12011-09-21 11:15:57 +0100806 GFP_KERNEL);
807
808 if (trialmask == NULL)
809 return -ENOMEM;
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100810 if (!indio_dev->masklength) {
Peter Meerwald95725882013-09-17 23:42:00 +0100811 WARN_ON("Trying to set scanmask prior to registering buffer\n");
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100812 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100813 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100814 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100815 set_bit(bit, trialmask);
816
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100817 if (!iio_validate_scan_mask(indio_dev, trialmask))
818 goto err_invalid_mask;
819
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100820 if (indio_dev->available_scan_masks) {
821 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
822 indio_dev->masklength,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100823 trialmask);
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100824 if (!mask)
825 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100826 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100827 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100828
829 kfree(trialmask);
830
831 return 0;
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100832
833err_invalid_mask:
834 kfree(trialmask);
835 return -EINVAL;
836}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100837EXPORT_SYMBOL_GPL(iio_scan_mask_set);
838
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000839int iio_scan_mask_query(struct iio_dev *indio_dev,
840 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100841{
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100842 if (bit > indio_dev->masklength)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100843 return -EINVAL;
844
845 if (!buffer->scan_mask)
846 return 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100847
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100848 return test_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100849};
850EXPORT_SYMBOL_GPL(iio_scan_mask_query);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000851
852/**
853 * struct iio_demux_table() - table describing demux memcpy ops
854 * @from: index to copy from
Peter Meerwald99698b42012-08-26 13:43:00 +0100855 * @to: index to copy to
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000856 * @length: how many bytes to copy
857 * @l: list head used for management
858 */
859struct iio_demux_table {
860 unsigned from;
861 unsigned to;
862 unsigned length;
863 struct list_head l;
864};
865
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100866static const void *iio_demux(struct iio_buffer *buffer,
867 const void *datain)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000868{
869 struct iio_demux_table *t;
870
871 if (list_empty(&buffer->demux_list))
872 return datain;
873 list_for_each_entry(t, &buffer->demux_list, l)
874 memcpy(buffer->demux_bounce + t->to,
875 datain + t->from, t->length);
876
877 return buffer->demux_bounce;
878}
879
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100880static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000881{
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100882 const void *dataout = iio_demux(buffer, data);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000883
Lars-Peter Clausence56ade2012-09-04 13:38:00 +0100884 return buffer->access->store_to(buffer, dataout);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000885}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000886
Jonathan Cameron842cd102012-04-21 10:09:45 +0100887static void iio_buffer_demux_free(struct iio_buffer *buffer)
888{
889 struct iio_demux_table *p, *q;
890 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
891 list_del(&p->l);
892 kfree(p);
893 }
894}
895
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100896
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100897int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100898{
899 int ret;
900 struct iio_buffer *buf;
901
902 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
903 ret = iio_push_to_buffer(buf, data);
904 if (ret < 0)
905 return ret;
906 }
907
908 return 0;
909}
910EXPORT_SYMBOL_GPL(iio_push_to_buffers);
911
912static int iio_buffer_update_demux(struct iio_dev *indio_dev,
913 struct iio_buffer *buffer)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000914{
915 const struct iio_chan_spec *ch;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000916 int ret, in_ind = -1, out_ind, length;
917 unsigned in_loc = 0, out_loc = 0;
Jonathan Cameron842cd102012-04-21 10:09:45 +0100918 struct iio_demux_table *p;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000919
920 /* Clear out any old demux */
Jonathan Cameron842cd102012-04-21 10:09:45 +0100921 iio_buffer_demux_free(buffer);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000922 kfree(buffer->demux_bounce);
923 buffer->demux_bounce = NULL;
924
925 /* First work out which scan mode we will actually have */
926 if (bitmap_equal(indio_dev->active_scan_mask,
927 buffer->scan_mask,
928 indio_dev->masklength))
929 return 0;
930
931 /* Now we have the two masks, work from least sig and build up sizes */
932 for_each_set_bit(out_ind,
933 indio_dev->active_scan_mask,
934 indio_dev->masklength) {
935 in_ind = find_next_bit(indio_dev->active_scan_mask,
936 indio_dev->masklength,
937 in_ind + 1);
938 while (in_ind != out_ind) {
939 in_ind = find_next_bit(indio_dev->active_scan_mask,
940 indio_dev->masklength,
941 in_ind + 1);
942 ch = iio_find_channel_from_si(indio_dev, in_ind);
943 length = ch->scan_type.storagebits/8;
944 /* Make sure we are aligned */
945 in_loc += length;
946 if (in_loc % length)
947 in_loc += length - in_loc % length;
948 }
949 p = kmalloc(sizeof(*p), GFP_KERNEL);
950 if (p == NULL) {
951 ret = -ENOMEM;
952 goto error_clear_mux_table;
953 }
954 ch = iio_find_channel_from_si(indio_dev, in_ind);
955 length = ch->scan_type.storagebits/8;
956 if (out_loc % length)
957 out_loc += length - out_loc % length;
958 if (in_loc % length)
959 in_loc += length - in_loc % length;
960 p->from = in_loc;
961 p->to = out_loc;
962 p->length = length;
963 list_add_tail(&p->l, &buffer->demux_list);
964 out_loc += length;
965 in_loc += length;
966 }
967 /* Relies on scan_timestamp being last */
968 if (buffer->scan_timestamp) {
969 p = kmalloc(sizeof(*p), GFP_KERNEL);
970 if (p == NULL) {
971 ret = -ENOMEM;
972 goto error_clear_mux_table;
973 }
974 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100975 indio_dev->scan_index_timestamp);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000976 length = ch->scan_type.storagebits/8;
977 if (out_loc % length)
978 out_loc += length - out_loc % length;
979 if (in_loc % length)
980 in_loc += length - in_loc % length;
981 p->from = in_loc;
982 p->to = out_loc;
983 p->length = length;
984 list_add_tail(&p->l, &buffer->demux_list);
985 out_loc += length;
986 in_loc += length;
987 }
988 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
989 if (buffer->demux_bounce == NULL) {
990 ret = -ENOMEM;
991 goto error_clear_mux_table;
992 }
993 return 0;
994
995error_clear_mux_table:
Jonathan Cameron842cd102012-04-21 10:09:45 +0100996 iio_buffer_demux_free(buffer);
997
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000998 return ret;
999}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +01001000
1001int iio_update_demux(struct iio_dev *indio_dev)
1002{
1003 struct iio_buffer *buffer;
1004 int ret;
1005
1006 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1007 ret = iio_buffer_update_demux(indio_dev, buffer);
1008 if (ret < 0)
1009 goto error_clear_mux_table;
1010 }
1011 return 0;
1012
1013error_clear_mux_table:
1014 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1015 iio_buffer_demux_free(buffer);
1016
1017 return ret;
1018}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +00001019EXPORT_SYMBOL_GPL(iio_update_demux);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +01001020
1021/**
1022 * iio_buffer_release() - Free a buffer's resources
1023 * @ref: Pointer to the kref embedded in the iio_buffer struct
1024 *
1025 * This function is called when the last reference to the buffer has been
1026 * dropped. It will typically free all resources allocated by the buffer. Do not
1027 * call this function manually, always use iio_buffer_put() when done using a
1028 * buffer.
1029 */
1030static void iio_buffer_release(struct kref *ref)
1031{
1032 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1033
1034 buffer->access->release(buffer);
1035}
1036
1037/**
1038 * iio_buffer_get() - Grab a reference to the buffer
1039 * @buffer: The buffer to grab a reference for, may be NULL
1040 *
1041 * Returns the pointer to the buffer that was passed into the function.
1042 */
1043struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1044{
1045 if (buffer)
1046 kref_get(&buffer->ref);
1047
1048 return buffer;
1049}
1050EXPORT_SYMBOL_GPL(iio_buffer_get);
1051
1052/**
1053 * iio_buffer_put() - Release the reference to the buffer
1054 * @buffer: The buffer to release the reference for, may be NULL
1055 */
1056void iio_buffer_put(struct iio_buffer *buffer)
1057{
1058 if (buffer)
1059 kref_put(&buffer->ref, iio_buffer_release);
1060}
1061EXPORT_SYMBOL_GPL(iio_buffer_put);