blob: 6c7a9c509399a4cd64c35f049c5b0aee9c018f4d [file] [log] [blame]
Jonathan Cameron14555b12011-09-21 11:15:57 +01001/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
Paul Gortmaker8e336a72011-07-10 13:09:12 -040017#include <linux/export.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010018#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23
Jonathan Cameron06458e22012-04-25 15:54:58 +010024#include <linux/iio/iio.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010025#include "iio_core.h"
Jonathan Cameron06458e22012-04-25 15:54:58 +010026#include <linux/iio/sysfs.h>
27#include <linux/iio/buffer.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010028
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
33
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010034static bool iio_buffer_is_active(struct iio_buffer *buf)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010035{
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010036 return !list_empty(&buf->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010037}
38
Jonathan Cameron14555b12011-09-21 11:15:57 +010039/**
40 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
41 *
42 * This function relies on all buffer implementations having an
43 * iio_buffer as their first element.
44 **/
45ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
46 size_t n, loff_t *f_ps)
47{
48 struct iio_dev *indio_dev = filp->private_data;
49 struct iio_buffer *rb = indio_dev->buffer;
50
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +010051 if (!indio_dev->info)
52 return -ENODEV;
53
Jonathan Cameron96e00f12011-10-26 17:27:45 +010054 if (!rb || !rb->access->read_first_n)
Jonathan Cameron14555b12011-09-21 11:15:57 +010055 return -EINVAL;
56 return rb->access->read_first_n(rb, n, buf);
57}
58
59/**
60 * iio_buffer_poll() - poll the buffer to find out if it has data
61 */
62unsigned int iio_buffer_poll(struct file *filp,
63 struct poll_table_struct *wait)
64{
65 struct iio_dev *indio_dev = filp->private_data;
66 struct iio_buffer *rb = indio_dev->buffer;
67
Lars-Peter Clausenf18e7a02013-10-04 12:06:00 +010068 if (!indio_dev->info)
69 return -ENODEV;
70
Jonathan Cameron14555b12011-09-21 11:15:57 +010071 poll_wait(filp, &rb->pollq, wait);
72 if (rb->stufftoread)
73 return POLLIN | POLLRDNORM;
74 /* need a way of knowing if there may be enough data... */
75 return 0;
76}
77
Jonathan Cameronf79a9092011-12-05 22:18:29 +000078void iio_buffer_init(struct iio_buffer *buffer)
Jonathan Cameron14555b12011-09-21 11:15:57 +010079{
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +000080 INIT_LIST_HEAD(&buffer->demux_list);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010081 INIT_LIST_HEAD(&buffer->buffer_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +010082 init_waitqueue_head(&buffer->pollq);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +010083 kref_init(&buffer->ref);
Jonathan Cameron14555b12011-09-21 11:15:57 +010084}
85EXPORT_SYMBOL(iio_buffer_init);
86
87static ssize_t iio_show_scan_index(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
92}
93
94static ssize_t iio_show_fixed_type(struct device *dev,
95 struct device_attribute *attr,
96 char *buf)
97{
98 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
99 u8 type = this_attr->c->scan_type.endianness;
100
101 if (type == IIO_CPU) {
Jonathan Cameron9d5d1152011-10-04 16:02:08 +0100102#ifdef __LITTLE_ENDIAN
103 type = IIO_LE;
104#else
105 type = IIO_BE;
106#endif
Jonathan Cameron14555b12011-09-21 11:15:57 +0100107 }
108 return sprintf(buf, "%s:%c%d/%d>>%u\n",
109 iio_endian_prefix[type],
110 this_attr->c->scan_type.sign,
111 this_attr->c->scan_type.realbits,
112 this_attr->c->scan_type.storagebits,
113 this_attr->c->scan_type.shift);
114}
115
116static ssize_t iio_scan_el_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119{
120 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200121 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100122
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000123 ret = test_bit(to_iio_dev_attr(attr)->address,
124 indio_dev->buffer->scan_mask);
125
Jonathan Cameron14555b12011-09-21 11:15:57 +0100126 return sprintf(buf, "%d\n", ret);
127}
128
129static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
130{
131 clear_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100132 return 0;
133}
134
135static ssize_t iio_scan_el_store(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf,
138 size_t len)
139{
Jonathan Camerona714af22012-04-21 10:09:32 +0100140 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100141 bool state;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200142 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100143 struct iio_buffer *buffer = indio_dev->buffer;
144 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
145
Jonathan Camerona714af22012-04-21 10:09:32 +0100146 ret = strtobool(buf, &state);
147 if (ret < 0)
148 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100149 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100150 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100151 ret = -EBUSY;
152 goto error_ret;
153 }
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000154 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100155 if (ret < 0)
156 goto error_ret;
157 if (!state && ret) {
158 ret = iio_scan_mask_clear(buffer, this_attr->address);
159 if (ret)
160 goto error_ret;
161 } else if (state && !ret) {
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000162 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100163 if (ret)
164 goto error_ret;
165 }
166
167error_ret:
168 mutex_unlock(&indio_dev->mlock);
169
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100170 return ret < 0 ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100171
172}
173
174static ssize_t iio_scan_el_ts_show(struct device *dev,
175 struct device_attribute *attr,
176 char *buf)
177{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200178 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100179 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100180}
181
182static ssize_t iio_scan_el_ts_store(struct device *dev,
183 struct device_attribute *attr,
184 const char *buf,
185 size_t len)
186{
Jonathan Camerona714af22012-04-21 10:09:32 +0100187 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200188 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100189 bool state;
190
Jonathan Camerona714af22012-04-21 10:09:32 +0100191 ret = strtobool(buf, &state);
192 if (ret < 0)
193 return ret;
194
Jonathan Cameron14555b12011-09-21 11:15:57 +0100195 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100196 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100197 ret = -EBUSY;
198 goto error_ret;
199 }
200 indio_dev->buffer->scan_timestamp = state;
201error_ret:
202 mutex_unlock(&indio_dev->mlock);
203
204 return ret ? ret : len;
205}
206
207static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
208 const struct iio_chan_spec *chan)
209{
210 int ret, attrcount = 0;
211 struct iio_buffer *buffer = indio_dev->buffer;
212
213 ret = __iio_add_chan_devattr("index",
214 chan,
215 &iio_show_scan_index,
216 NULL,
217 0,
Jonathan Cameron37044322013-09-08 14:57:00 +0100218 IIO_SEPARATE,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100219 &indio_dev->dev,
220 &buffer->scan_el_dev_attr_list);
221 if (ret)
222 goto error_ret;
223 attrcount++;
224 ret = __iio_add_chan_devattr("type",
225 chan,
226 &iio_show_fixed_type,
227 NULL,
228 0,
229 0,
230 &indio_dev->dev,
231 &buffer->scan_el_dev_attr_list);
232 if (ret)
233 goto error_ret;
234 attrcount++;
235 if (chan->type != IIO_TIMESTAMP)
236 ret = __iio_add_chan_devattr("en",
237 chan,
238 &iio_scan_el_show,
239 &iio_scan_el_store,
240 chan->scan_index,
241 0,
242 &indio_dev->dev,
243 &buffer->scan_el_dev_attr_list);
244 else
245 ret = __iio_add_chan_devattr("en",
246 chan,
247 &iio_scan_el_ts_show,
248 &iio_scan_el_ts_store,
249 chan->scan_index,
250 0,
251 &indio_dev->dev,
252 &buffer->scan_el_dev_attr_list);
Peter Meerwald95725882013-09-17 23:42:00 +0100253 if (ret)
254 goto error_ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100255 attrcount++;
256 ret = attrcount;
257error_ret:
258 return ret;
259}
260
261static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
262 struct iio_dev_attr *p)
263{
264 kfree(p->dev_attr.attr.name);
265 kfree(p);
266}
267
268static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
269{
270 struct iio_dev_attr *p, *n;
271 struct iio_buffer *buffer = indio_dev->buffer;
272
273 list_for_each_entry_safe(p, n,
274 &buffer->scan_el_dev_attr_list, l)
275 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
276}
277
278static const char * const iio_scan_elements_group_name = "scan_elements";
279
280int iio_buffer_register(struct iio_dev *indio_dev,
281 const struct iio_chan_spec *channels,
282 int num_channels)
283{
284 struct iio_dev_attr *p;
285 struct attribute **attr;
286 struct iio_buffer *buffer = indio_dev->buffer;
287 int ret, i, attrn, attrcount, attrcount_orig = 0;
288
289 if (buffer->attrs)
290 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
291
292 if (buffer->scan_el_attrs != NULL) {
293 attr = buffer->scan_el_attrs->attrs;
294 while (*attr++ != NULL)
295 attrcount_orig++;
296 }
297 attrcount = attrcount_orig;
298 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
299 if (channels) {
300 /* new magic */
301 for (i = 0; i < num_channels; i++) {
Lars-Peter Clausenf5b81dd2012-06-18 18:33:47 +0200302 if (channels[i].scan_index < 0)
303 continue;
304
Jonathan Cameron14555b12011-09-21 11:15:57 +0100305 /* Establish necessary mask length */
306 if (channels[i].scan_index >
307 (int)indio_dev->masklength - 1)
308 indio_dev->masklength
Lars-Peter Clausene1dc7be2012-07-02 14:52:56 +0200309 = channels[i].scan_index + 1;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100310
311 ret = iio_buffer_add_channel_sysfs(indio_dev,
312 &channels[i]);
313 if (ret < 0)
314 goto error_cleanup_dynamic;
315 attrcount += ret;
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000316 if (channels[i].type == IIO_TIMESTAMP)
Jonathan Cameronf1264802012-04-21 10:09:34 +0100317 indio_dev->scan_index_timestamp =
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000318 channels[i].scan_index;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100319 }
320 if (indio_dev->masklength && buffer->scan_mask == NULL) {
Thomas Meyerd83fb182011-11-29 22:08:00 +0100321 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
322 sizeof(*buffer->scan_mask),
323 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100324 if (buffer->scan_mask == NULL) {
325 ret = -ENOMEM;
326 goto error_cleanup_dynamic;
327 }
328 }
329 }
330
331 buffer->scan_el_group.name = iio_scan_elements_group_name;
332
Thomas Meyerd83fb182011-11-29 22:08:00 +0100333 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
334 sizeof(buffer->scan_el_group.attrs[0]),
335 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100336 if (buffer->scan_el_group.attrs == NULL) {
337 ret = -ENOMEM;
338 goto error_free_scan_mask;
339 }
340 if (buffer->scan_el_attrs)
341 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
342 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
343 attrn = attrcount_orig;
344
345 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
346 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
347 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
348
349 return 0;
350
351error_free_scan_mask:
352 kfree(buffer->scan_mask);
353error_cleanup_dynamic:
354 __iio_buffer_attr_cleanup(indio_dev);
355
356 return ret;
357}
358EXPORT_SYMBOL(iio_buffer_register);
359
360void iio_buffer_unregister(struct iio_dev *indio_dev)
361{
362 kfree(indio_dev->buffer->scan_mask);
363 kfree(indio_dev->buffer->scan_el_group.attrs);
364 __iio_buffer_attr_cleanup(indio_dev);
365}
366EXPORT_SYMBOL(iio_buffer_unregister);
367
368ssize_t iio_buffer_read_length(struct device *dev,
369 struct device_attribute *attr,
370 char *buf)
371{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200372 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100373 struct iio_buffer *buffer = indio_dev->buffer;
374
375 if (buffer->access->get_length)
376 return sprintf(buf, "%d\n",
377 buffer->access->get_length(buffer));
378
379 return 0;
380}
381EXPORT_SYMBOL(iio_buffer_read_length);
382
383ssize_t iio_buffer_write_length(struct device *dev,
384 struct device_attribute *attr,
385 const char *buf,
386 size_t len)
387{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200388 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100389 struct iio_buffer *buffer = indio_dev->buffer;
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100390 unsigned int val;
391 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100392
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100393 ret = kstrtouint(buf, 10, &val);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100394 if (ret)
395 return ret;
396
397 if (buffer->access->get_length)
398 if (val == buffer->access->get_length(buffer))
399 return len;
400
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100401 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100402 if (iio_buffer_is_active(indio_dev->buffer)) {
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100403 ret = -EBUSY;
404 } else {
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100405 if (buffer->access->set_length)
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100406 buffer->access->set_length(buffer, val);
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100407 ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100408 }
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100409 mutex_unlock(&indio_dev->mlock);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100410
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100411 return ret ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100412}
413EXPORT_SYMBOL(iio_buffer_write_length);
414
Jonathan Cameron14555b12011-09-21 11:15:57 +0100415ssize_t iio_buffer_show_enable(struct device *dev,
416 struct device_attribute *attr,
417 char *buf)
418{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200419 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100420 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
Jonathan Cameron14555b12011-09-21 11:15:57 +0100421}
422EXPORT_SYMBOL(iio_buffer_show_enable);
423
Peter Meerwald95725882013-09-17 23:42:00 +0100424/* Note NULL used as error indicator as it doesn't make sense. */
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100425static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100426 unsigned int masklength,
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100427 const unsigned long *mask)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100428{
429 if (bitmap_empty(mask, masklength))
430 return NULL;
431 while (*av_masks) {
432 if (bitmap_subset(mask, av_masks, masklength))
433 return av_masks;
434 av_masks += BITS_TO_LONGS(masklength);
435 }
436 return NULL;
437}
438
Peter Meerwald183f4172013-09-18 22:10:00 +0100439static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
440 const unsigned long *mask, bool timestamp)
Jonathan Cameron959d2952011-12-05 21:37:13 +0000441{
Jonathan Cameron959d2952011-12-05 21:37:13 +0000442 const struct iio_chan_spec *ch;
443 unsigned bytes = 0;
444 int length, i;
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100445
446 /* How much space will the demuxed element take? */
447 for_each_set_bit(i, mask,
448 indio_dev->masklength) {
449 ch = iio_find_channel_from_si(indio_dev, i);
450 length = ch->scan_type.storagebits / 8;
451 bytes = ALIGN(bytes, length);
452 bytes += length;
453 }
454 if (timestamp) {
455 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100456 indio_dev->scan_index_timestamp);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100457 length = ch->scan_type.storagebits / 8;
458 bytes = ALIGN(bytes, length);
459 bytes += length;
460 }
461 return bytes;
462}
463
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100464static void iio_buffer_activate(struct iio_dev *indio_dev,
465 struct iio_buffer *buffer)
466{
467 iio_buffer_get(buffer);
468 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
469}
470
471static void iio_buffer_deactivate(struct iio_buffer *buffer)
472{
473 list_del_init(&buffer->buffer_list);
474 iio_buffer_put(buffer);
475}
476
Lars-Peter Clausena87c82e2013-09-18 21:02:00 +0100477void iio_disable_all_buffers(struct iio_dev *indio_dev)
478{
479 struct iio_buffer *buffer, *_buffer;
480
481 if (list_empty(&indio_dev->buffer_list))
482 return;
483
484 if (indio_dev->setup_ops->predisable)
485 indio_dev->setup_ops->predisable(indio_dev);
486
487 list_for_each_entry_safe(buffer, _buffer,
488 &indio_dev->buffer_list, buffer_list)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100489 iio_buffer_deactivate(buffer);
Lars-Peter Clausena87c82e2013-09-18 21:02:00 +0100490
491 indio_dev->currentmode = INDIO_DIRECT_MODE;
492 if (indio_dev->setup_ops->postdisable)
493 indio_dev->setup_ops->postdisable(indio_dev);
494}
495
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100496int iio_update_buffers(struct iio_dev *indio_dev,
497 struct iio_buffer *insert_buffer,
498 struct iio_buffer *remove_buffer)
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100499{
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100500 int ret;
501 int success = 0;
502 struct iio_buffer *buffer;
503 unsigned long *compound_mask;
504 const unsigned long *old_mask;
Jonathan Cameron959d2952011-12-05 21:37:13 +0000505
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100506 /* Wind down existing buffers - iff there are any */
507 if (!list_empty(&indio_dev->buffer_list)) {
508 if (indio_dev->setup_ops->predisable) {
509 ret = indio_dev->setup_ops->predisable(indio_dev);
510 if (ret)
511 goto error_ret;
512 }
513 indio_dev->currentmode = INDIO_DIRECT_MODE;
514 if (indio_dev->setup_ops->postdisable) {
515 ret = indio_dev->setup_ops->postdisable(indio_dev);
516 if (ret)
517 goto error_ret;
518 }
519 }
520 /* Keep a copy of current setup to allow roll back */
521 old_mask = indio_dev->active_scan_mask;
522 if (!indio_dev->available_scan_masks)
523 indio_dev->active_scan_mask = NULL;
524
525 if (remove_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100526 iio_buffer_deactivate(remove_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100527 if (insert_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100528 iio_buffer_activate(indio_dev, insert_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100529
530 /* If no buffers in list, we are done */
531 if (list_empty(&indio_dev->buffer_list)) {
532 indio_dev->currentmode = INDIO_DIRECT_MODE;
533 if (indio_dev->available_scan_masks == NULL)
534 kfree(old_mask);
535 return 0;
536 }
Jonathan Cameron959d2952011-12-05 21:37:13 +0000537
Peter Meerwald95725882013-09-17 23:42:00 +0100538 /* What scan mask do we actually have? */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100539 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
540 sizeof(long), GFP_KERNEL);
541 if (compound_mask == NULL) {
542 if (indio_dev->available_scan_masks == NULL)
543 kfree(old_mask);
544 return -ENOMEM;
545 }
546 indio_dev->scan_timestamp = 0;
547
548 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
549 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
550 indio_dev->masklength);
551 indio_dev->scan_timestamp |= buffer->scan_timestamp;
552 }
553 if (indio_dev->available_scan_masks) {
Jonathan Cameron959d2952011-12-05 21:37:13 +0000554 indio_dev->active_scan_mask =
555 iio_scan_mask_match(indio_dev->available_scan_masks,
556 indio_dev->masklength,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100557 compound_mask);
558 if (indio_dev->active_scan_mask == NULL) {
559 /*
560 * Roll back.
561 * Note can only occur when adding a buffer.
562 */
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100563 iio_buffer_deactivate(insert_buffer);
Peter Meerwaldd66e0452013-09-18 22:10:00 +0100564 if (old_mask) {
565 indio_dev->active_scan_mask = old_mask;
566 success = -EINVAL;
567 }
568 else {
569 kfree(compound_mask);
570 ret = -EINVAL;
571 goto error_ret;
572 }
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100573 }
574 } else {
575 indio_dev->active_scan_mask = compound_mask;
576 }
Lars-Peter Clausenaff1eb42012-06-15 18:08:59 +0200577
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000578 iio_update_demux(indio_dev);
579
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100580 /* Wind up again */
581 if (indio_dev->setup_ops->preenable) {
582 ret = indio_dev->setup_ops->preenable(indio_dev);
583 if (ret) {
584 printk(KERN_ERR
Michał Mirosławbec18892013-05-04 14:19:00 +0100585 "Buffer not started: buffer preenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100586 goto error_remove_inserted;
587 }
588 }
589 indio_dev->scan_bytes =
590 iio_compute_scan_bytes(indio_dev,
591 indio_dev->active_scan_mask,
592 indio_dev->scan_timestamp);
593 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
594 if (buffer->access->request_update) {
595 ret = buffer->access->request_update(buffer);
596 if (ret) {
597 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100598 "Buffer not started: buffer parameter update failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100599 goto error_run_postdisable;
600 }
601 }
602 if (indio_dev->info->update_scan_mode) {
603 ret = indio_dev->info
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000604 ->update_scan_mode(indio_dev,
605 indio_dev->active_scan_mask);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100606 if (ret < 0) {
Michał Mirosławbec18892013-05-04 14:19:00 +0100607 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100608 goto error_run_postdisable;
609 }
610 }
Peter Meerwald95725882013-09-17 23:42:00 +0100611 /* Definitely possible for devices to support both of these. */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100612 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
613 if (!indio_dev->trig) {
614 printk(KERN_INFO "Buffer not started: no trigger\n");
615 ret = -EINVAL;
616 /* Can only occur on first buffer */
617 goto error_run_postdisable;
618 }
619 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
620 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
621 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
Peter Meerwald95725882013-09-17 23:42:00 +0100622 } else { /* Should never be reached */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100623 ret = -EINVAL;
624 goto error_run_postdisable;
625 }
626
627 if (indio_dev->setup_ops->postenable) {
628 ret = indio_dev->setup_ops->postenable(indio_dev);
629 if (ret) {
630 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100631 "Buffer not started: postenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100632 indio_dev->currentmode = INDIO_DIRECT_MODE;
633 if (indio_dev->setup_ops->postdisable)
634 indio_dev->setup_ops->postdisable(indio_dev);
635 goto error_disable_all_buffers;
636 }
637 }
638
639 if (indio_dev->available_scan_masks)
640 kfree(compound_mask);
641 else
642 kfree(old_mask);
643
644 return success;
645
646error_disable_all_buffers:
647 indio_dev->currentmode = INDIO_DIRECT_MODE;
648error_run_postdisable:
649 if (indio_dev->setup_ops->postdisable)
650 indio_dev->setup_ops->postdisable(indio_dev);
651error_remove_inserted:
652
653 if (insert_buffer)
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100654 iio_buffer_deactivate(insert_buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100655 indio_dev->active_scan_mask = old_mask;
656 kfree(compound_mask);
657error_ret:
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(iio_update_buffers);
662
663ssize_t iio_buffer_store_enable(struct device *dev,
664 struct device_attribute *attr,
665 const char *buf,
666 size_t len)
667{
668 int ret;
669 bool requested_state;
670 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100671 bool inlist;
672
673 ret = strtobool(buf, &requested_state);
674 if (ret < 0)
675 return ret;
676
677 mutex_lock(&indio_dev->mlock);
678
679 /* Find out if it is in the list */
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100680 inlist = iio_buffer_is_active(indio_dev->buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100681 /* Already in desired state */
682 if (inlist == requested_state)
683 goto done;
684
685 if (requested_state)
686 ret = iio_update_buffers(indio_dev,
687 indio_dev->buffer, NULL);
688 else
689 ret = iio_update_buffers(indio_dev,
690 NULL, indio_dev->buffer);
691
692 if (ret < 0)
693 goto done;
694done:
695 mutex_unlock(&indio_dev->mlock);
696 return (ret < 0) ? ret : len;
697}
698EXPORT_SYMBOL(iio_buffer_store_enable);
699
700int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
701{
702 struct iio_buffer *buffer;
703 unsigned bytes;
704 dev_dbg(&indio_dev->dev, "%s\n", __func__);
705
706 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
707 if (buffer->access->set_bytes_per_datum) {
708 bytes = iio_compute_scan_bytes(indio_dev,
709 buffer->scan_mask,
710 buffer->scan_timestamp);
711
712 buffer->access->set_bytes_per_datum(buffer, bytes);
713 }
Jonathan Cameron959d2952011-12-05 21:37:13 +0000714 return 0;
715}
716EXPORT_SYMBOL(iio_sw_buffer_preenable);
717
Jonathan Cameron14555b12011-09-21 11:15:57 +0100718/**
Lars-Peter Clausen81636632012-07-09 10:00:00 +0100719 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
720 * @indio_dev: the iio device
721 * @mask: scan mask to be checked
722 *
723 * Return true if exactly one bit is set in the scan mask, false otherwise. It
724 * can be used for devices where only one channel can be active for sampling at
725 * a time.
726 */
727bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
728 const unsigned long *mask)
729{
730 return bitmap_weight(mask, indio_dev->masklength) == 1;
731}
732EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
733
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100734static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
735 const unsigned long *mask)
736{
737 if (!indio_dev->setup_ops->validate_scan_mask)
738 return true;
739
740 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
741}
742
Jonathan Cameron14555b12011-09-21 11:15:57 +0100743/**
744 * iio_scan_mask_set() - set particular bit in the scan mask
Peter Meerwald95725882013-09-17 23:42:00 +0100745 * @indio_dev: the iio device
Jonathan Cameron14555b12011-09-21 11:15:57 +0100746 * @buffer: the buffer whose scan mask we are interested in
747 * @bit: the bit to be set.
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100748 *
749 * Note that at this point we have no way of knowing what other
750 * buffers might request, hence this code only verifies that the
751 * individual buffers request is plausible.
752 */
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000753int iio_scan_mask_set(struct iio_dev *indio_dev,
754 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100755{
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100756 const unsigned long *mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100757 unsigned long *trialmask;
758
759 trialmask = kmalloc(sizeof(*trialmask)*
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100760 BITS_TO_LONGS(indio_dev->masklength),
Jonathan Cameron14555b12011-09-21 11:15:57 +0100761 GFP_KERNEL);
762
763 if (trialmask == NULL)
764 return -ENOMEM;
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100765 if (!indio_dev->masklength) {
Peter Meerwald95725882013-09-17 23:42:00 +0100766 WARN_ON("Trying to set scanmask prior to registering buffer\n");
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100767 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100768 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100769 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100770 set_bit(bit, trialmask);
771
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100772 if (!iio_validate_scan_mask(indio_dev, trialmask))
773 goto err_invalid_mask;
774
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100775 if (indio_dev->available_scan_masks) {
776 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
777 indio_dev->masklength,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100778 trialmask);
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100779 if (!mask)
780 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100781 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100782 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100783
784 kfree(trialmask);
785
786 return 0;
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100787
788err_invalid_mask:
789 kfree(trialmask);
790 return -EINVAL;
791}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100792EXPORT_SYMBOL_GPL(iio_scan_mask_set);
793
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000794int iio_scan_mask_query(struct iio_dev *indio_dev,
795 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100796{
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100797 if (bit > indio_dev->masklength)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100798 return -EINVAL;
799
800 if (!buffer->scan_mask)
801 return 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100802
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100803 return test_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100804};
805EXPORT_SYMBOL_GPL(iio_scan_mask_query);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000806
807/**
808 * struct iio_demux_table() - table describing demux memcpy ops
809 * @from: index to copy from
Peter Meerwald99698b42012-08-26 13:43:00 +0100810 * @to: index to copy to
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000811 * @length: how many bytes to copy
812 * @l: list head used for management
813 */
814struct iio_demux_table {
815 unsigned from;
816 unsigned to;
817 unsigned length;
818 struct list_head l;
819};
820
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100821static const void *iio_demux(struct iio_buffer *buffer,
822 const void *datain)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000823{
824 struct iio_demux_table *t;
825
826 if (list_empty(&buffer->demux_list))
827 return datain;
828 list_for_each_entry(t, &buffer->demux_list, l)
829 memcpy(buffer->demux_bounce + t->to,
830 datain + t->from, t->length);
831
832 return buffer->demux_bounce;
833}
834
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100835static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000836{
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100837 const void *dataout = iio_demux(buffer, data);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000838
Lars-Peter Clausence56ade2012-09-04 13:38:00 +0100839 return buffer->access->store_to(buffer, dataout);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000840}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000841
Jonathan Cameron842cd102012-04-21 10:09:45 +0100842static void iio_buffer_demux_free(struct iio_buffer *buffer)
843{
844 struct iio_demux_table *p, *q;
845 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
846 list_del(&p->l);
847 kfree(p);
848 }
849}
850
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100851
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100852int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100853{
854 int ret;
855 struct iio_buffer *buf;
856
857 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
858 ret = iio_push_to_buffer(buf, data);
859 if (ret < 0)
860 return ret;
861 }
862
863 return 0;
864}
865EXPORT_SYMBOL_GPL(iio_push_to_buffers);
866
867static int iio_buffer_update_demux(struct iio_dev *indio_dev,
868 struct iio_buffer *buffer)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000869{
870 const struct iio_chan_spec *ch;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000871 int ret, in_ind = -1, out_ind, length;
872 unsigned in_loc = 0, out_loc = 0;
Jonathan Cameron842cd102012-04-21 10:09:45 +0100873 struct iio_demux_table *p;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000874
875 /* Clear out any old demux */
Jonathan Cameron842cd102012-04-21 10:09:45 +0100876 iio_buffer_demux_free(buffer);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000877 kfree(buffer->demux_bounce);
878 buffer->demux_bounce = NULL;
879
880 /* First work out which scan mode we will actually have */
881 if (bitmap_equal(indio_dev->active_scan_mask,
882 buffer->scan_mask,
883 indio_dev->masklength))
884 return 0;
885
886 /* Now we have the two masks, work from least sig and build up sizes */
887 for_each_set_bit(out_ind,
888 indio_dev->active_scan_mask,
889 indio_dev->masklength) {
890 in_ind = find_next_bit(indio_dev->active_scan_mask,
891 indio_dev->masklength,
892 in_ind + 1);
893 while (in_ind != out_ind) {
894 in_ind = find_next_bit(indio_dev->active_scan_mask,
895 indio_dev->masklength,
896 in_ind + 1);
897 ch = iio_find_channel_from_si(indio_dev, in_ind);
898 length = ch->scan_type.storagebits/8;
899 /* Make sure we are aligned */
900 in_loc += length;
901 if (in_loc % length)
902 in_loc += length - in_loc % length;
903 }
904 p = kmalloc(sizeof(*p), GFP_KERNEL);
905 if (p == NULL) {
906 ret = -ENOMEM;
907 goto error_clear_mux_table;
908 }
909 ch = iio_find_channel_from_si(indio_dev, in_ind);
910 length = ch->scan_type.storagebits/8;
911 if (out_loc % length)
912 out_loc += length - out_loc % length;
913 if (in_loc % length)
914 in_loc += length - in_loc % length;
915 p->from = in_loc;
916 p->to = out_loc;
917 p->length = length;
918 list_add_tail(&p->l, &buffer->demux_list);
919 out_loc += length;
920 in_loc += length;
921 }
922 /* Relies on scan_timestamp being last */
923 if (buffer->scan_timestamp) {
924 p = kmalloc(sizeof(*p), GFP_KERNEL);
925 if (p == NULL) {
926 ret = -ENOMEM;
927 goto error_clear_mux_table;
928 }
929 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100930 indio_dev->scan_index_timestamp);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000931 length = ch->scan_type.storagebits/8;
932 if (out_loc % length)
933 out_loc += length - out_loc % length;
934 if (in_loc % length)
935 in_loc += length - in_loc % length;
936 p->from = in_loc;
937 p->to = out_loc;
938 p->length = length;
939 list_add_tail(&p->l, &buffer->demux_list);
940 out_loc += length;
941 in_loc += length;
942 }
943 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
944 if (buffer->demux_bounce == NULL) {
945 ret = -ENOMEM;
946 goto error_clear_mux_table;
947 }
948 return 0;
949
950error_clear_mux_table:
Jonathan Cameron842cd102012-04-21 10:09:45 +0100951 iio_buffer_demux_free(buffer);
952
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000953 return ret;
954}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100955
956int iio_update_demux(struct iio_dev *indio_dev)
957{
958 struct iio_buffer *buffer;
959 int ret;
960
961 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
962 ret = iio_buffer_update_demux(indio_dev, buffer);
963 if (ret < 0)
964 goto error_clear_mux_table;
965 }
966 return 0;
967
968error_clear_mux_table:
969 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
970 iio_buffer_demux_free(buffer);
971
972 return ret;
973}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000974EXPORT_SYMBOL_GPL(iio_update_demux);
Lars-Peter Clausen9e69c932013-10-04 12:06:00 +0100975
976/**
977 * iio_buffer_release() - Free a buffer's resources
978 * @ref: Pointer to the kref embedded in the iio_buffer struct
979 *
980 * This function is called when the last reference to the buffer has been
981 * dropped. It will typically free all resources allocated by the buffer. Do not
982 * call this function manually, always use iio_buffer_put() when done using a
983 * buffer.
984 */
985static void iio_buffer_release(struct kref *ref)
986{
987 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
988
989 buffer->access->release(buffer);
990}
991
992/**
993 * iio_buffer_get() - Grab a reference to the buffer
994 * @buffer: The buffer to grab a reference for, may be NULL
995 *
996 * Returns the pointer to the buffer that was passed into the function.
997 */
998struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
999{
1000 if (buffer)
1001 kref_get(&buffer->ref);
1002
1003 return buffer;
1004}
1005EXPORT_SYMBOL_GPL(iio_buffer_get);
1006
1007/**
1008 * iio_buffer_put() - Release the reference to the buffer
1009 * @buffer: The buffer to release the reference for, may be NULL
1010 */
1011void iio_buffer_put(struct iio_buffer *buffer)
1012{
1013 if (buffer)
1014 kref_put(&buffer->ref, iio_buffer_release);
1015}
1016EXPORT_SYMBOL_GPL(iio_buffer_put);