blob: 58180ec36fe89e94eb5113643c3bc7372427f0c3 [file] [log] [blame]
Jonathan Cameron14555b12011-09-21 11:15:57 +01001/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
Paul Gortmaker8e336a72011-07-10 13:09:12 -040017#include <linux/export.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010018#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23
Jonathan Cameron06458e22012-04-25 15:54:58 +010024#include <linux/iio/iio.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010025#include "iio_core.h"
Jonathan Cameron06458e22012-04-25 15:54:58 +010026#include <linux/iio/sysfs.h>
27#include <linux/iio/buffer.h>
Jonathan Cameron14555b12011-09-21 11:15:57 +010028
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
33
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010034static bool iio_buffer_is_active(struct iio_buffer *buf)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010035{
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010036 return !list_empty(&buf->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +010037}
38
Jonathan Cameron14555b12011-09-21 11:15:57 +010039/**
40 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
41 *
42 * This function relies on all buffer implementations having an
43 * iio_buffer as their first element.
44 **/
45ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
46 size_t n, loff_t *f_ps)
47{
48 struct iio_dev *indio_dev = filp->private_data;
49 struct iio_buffer *rb = indio_dev->buffer;
50
Jonathan Cameron96e00f12011-10-26 17:27:45 +010051 if (!rb || !rb->access->read_first_n)
Jonathan Cameron14555b12011-09-21 11:15:57 +010052 return -EINVAL;
53 return rb->access->read_first_n(rb, n, buf);
54}
55
56/**
57 * iio_buffer_poll() - poll the buffer to find out if it has data
58 */
59unsigned int iio_buffer_poll(struct file *filp,
60 struct poll_table_struct *wait)
61{
62 struct iio_dev *indio_dev = filp->private_data;
63 struct iio_buffer *rb = indio_dev->buffer;
64
65 poll_wait(filp, &rb->pollq, wait);
66 if (rb->stufftoread)
67 return POLLIN | POLLRDNORM;
68 /* need a way of knowing if there may be enough data... */
69 return 0;
70}
71
Jonathan Cameronf79a9092011-12-05 22:18:29 +000072void iio_buffer_init(struct iio_buffer *buffer)
Jonathan Cameron14555b12011-09-21 11:15:57 +010073{
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +000074 INIT_LIST_HEAD(&buffer->demux_list);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +010075 INIT_LIST_HEAD(&buffer->buffer_list);
Jonathan Cameron14555b12011-09-21 11:15:57 +010076 init_waitqueue_head(&buffer->pollq);
77}
78EXPORT_SYMBOL(iio_buffer_init);
79
80static ssize_t iio_show_scan_index(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83{
84 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
85}
86
87static ssize_t iio_show_fixed_type(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
92 u8 type = this_attr->c->scan_type.endianness;
93
94 if (type == IIO_CPU) {
Jonathan Cameron9d5d1152011-10-04 16:02:08 +010095#ifdef __LITTLE_ENDIAN
96 type = IIO_LE;
97#else
98 type = IIO_BE;
99#endif
Jonathan Cameron14555b12011-09-21 11:15:57 +0100100 }
101 return sprintf(buf, "%s:%c%d/%d>>%u\n",
102 iio_endian_prefix[type],
103 this_attr->c->scan_type.sign,
104 this_attr->c->scan_type.realbits,
105 this_attr->c->scan_type.storagebits,
106 this_attr->c->scan_type.shift);
107}
108
109static ssize_t iio_scan_el_show(struct device *dev,
110 struct device_attribute *attr,
111 char *buf)
112{
113 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200114 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100115
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000116 ret = test_bit(to_iio_dev_attr(attr)->address,
117 indio_dev->buffer->scan_mask);
118
Jonathan Cameron14555b12011-09-21 11:15:57 +0100119 return sprintf(buf, "%d\n", ret);
120}
121
122static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
123{
124 clear_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100125 return 0;
126}
127
128static ssize_t iio_scan_el_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf,
131 size_t len)
132{
Jonathan Camerona714af22012-04-21 10:09:32 +0100133 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100134 bool state;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200135 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100136 struct iio_buffer *buffer = indio_dev->buffer;
137 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
138
Jonathan Camerona714af22012-04-21 10:09:32 +0100139 ret = strtobool(buf, &state);
140 if (ret < 0)
141 return ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100142 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100143 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100144 ret = -EBUSY;
145 goto error_ret;
146 }
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000147 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100148 if (ret < 0)
149 goto error_ret;
150 if (!state && ret) {
151 ret = iio_scan_mask_clear(buffer, this_attr->address);
152 if (ret)
153 goto error_ret;
154 } else if (state && !ret) {
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000155 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100156 if (ret)
157 goto error_ret;
158 }
159
160error_ret:
161 mutex_unlock(&indio_dev->mlock);
162
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100163 return ret < 0 ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100164
165}
166
167static ssize_t iio_scan_el_ts_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200171 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100172 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100173}
174
175static ssize_t iio_scan_el_ts_store(struct device *dev,
176 struct device_attribute *attr,
177 const char *buf,
178 size_t len)
179{
Jonathan Camerona714af22012-04-21 10:09:32 +0100180 int ret;
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200181 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100182 bool state;
183
Jonathan Camerona714af22012-04-21 10:09:32 +0100184 ret = strtobool(buf, &state);
185 if (ret < 0)
186 return ret;
187
Jonathan Cameron14555b12011-09-21 11:15:57 +0100188 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100189 if (iio_buffer_is_active(indio_dev->buffer)) {
Jonathan Cameron14555b12011-09-21 11:15:57 +0100190 ret = -EBUSY;
191 goto error_ret;
192 }
193 indio_dev->buffer->scan_timestamp = state;
194error_ret:
195 mutex_unlock(&indio_dev->mlock);
196
197 return ret ? ret : len;
198}
199
200static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
201 const struct iio_chan_spec *chan)
202{
203 int ret, attrcount = 0;
204 struct iio_buffer *buffer = indio_dev->buffer;
205
206 ret = __iio_add_chan_devattr("index",
207 chan,
208 &iio_show_scan_index,
209 NULL,
210 0,
Jonathan Cameron37044322013-09-08 14:57:00 +0100211 IIO_SEPARATE,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100212 &indio_dev->dev,
213 &buffer->scan_el_dev_attr_list);
214 if (ret)
215 goto error_ret;
216 attrcount++;
217 ret = __iio_add_chan_devattr("type",
218 chan,
219 &iio_show_fixed_type,
220 NULL,
221 0,
222 0,
223 &indio_dev->dev,
224 &buffer->scan_el_dev_attr_list);
225 if (ret)
226 goto error_ret;
227 attrcount++;
228 if (chan->type != IIO_TIMESTAMP)
229 ret = __iio_add_chan_devattr("en",
230 chan,
231 &iio_scan_el_show,
232 &iio_scan_el_store,
233 chan->scan_index,
234 0,
235 &indio_dev->dev,
236 &buffer->scan_el_dev_attr_list);
237 else
238 ret = __iio_add_chan_devattr("en",
239 chan,
240 &iio_scan_el_ts_show,
241 &iio_scan_el_ts_store,
242 chan->scan_index,
243 0,
244 &indio_dev->dev,
245 &buffer->scan_el_dev_attr_list);
Peter Meerwald95725882013-09-17 23:42:00 +0100246 if (ret)
247 goto error_ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100248 attrcount++;
249 ret = attrcount;
250error_ret:
251 return ret;
252}
253
254static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
255 struct iio_dev_attr *p)
256{
257 kfree(p->dev_attr.attr.name);
258 kfree(p);
259}
260
261static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
262{
263 struct iio_dev_attr *p, *n;
264 struct iio_buffer *buffer = indio_dev->buffer;
265
266 list_for_each_entry_safe(p, n,
267 &buffer->scan_el_dev_attr_list, l)
268 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
269}
270
271static const char * const iio_scan_elements_group_name = "scan_elements";
272
273int iio_buffer_register(struct iio_dev *indio_dev,
274 const struct iio_chan_spec *channels,
275 int num_channels)
276{
277 struct iio_dev_attr *p;
278 struct attribute **attr;
279 struct iio_buffer *buffer = indio_dev->buffer;
280 int ret, i, attrn, attrcount, attrcount_orig = 0;
281
282 if (buffer->attrs)
283 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
284
285 if (buffer->scan_el_attrs != NULL) {
286 attr = buffer->scan_el_attrs->attrs;
287 while (*attr++ != NULL)
288 attrcount_orig++;
289 }
290 attrcount = attrcount_orig;
291 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
292 if (channels) {
293 /* new magic */
294 for (i = 0; i < num_channels; i++) {
Lars-Peter Clausenf5b81dd2012-06-18 18:33:47 +0200295 if (channels[i].scan_index < 0)
296 continue;
297
Jonathan Cameron14555b12011-09-21 11:15:57 +0100298 /* Establish necessary mask length */
299 if (channels[i].scan_index >
300 (int)indio_dev->masklength - 1)
301 indio_dev->masklength
Lars-Peter Clausene1dc7be2012-07-02 14:52:56 +0200302 = channels[i].scan_index + 1;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100303
304 ret = iio_buffer_add_channel_sysfs(indio_dev,
305 &channels[i]);
306 if (ret < 0)
307 goto error_cleanup_dynamic;
308 attrcount += ret;
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000309 if (channels[i].type == IIO_TIMESTAMP)
Jonathan Cameronf1264802012-04-21 10:09:34 +0100310 indio_dev->scan_index_timestamp =
Jonathan Cameronbeb80602011-12-05 21:37:11 +0000311 channels[i].scan_index;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100312 }
313 if (indio_dev->masklength && buffer->scan_mask == NULL) {
Thomas Meyerd83fb182011-11-29 22:08:00 +0100314 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
315 sizeof(*buffer->scan_mask),
316 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100317 if (buffer->scan_mask == NULL) {
318 ret = -ENOMEM;
319 goto error_cleanup_dynamic;
320 }
321 }
322 }
323
324 buffer->scan_el_group.name = iio_scan_elements_group_name;
325
Thomas Meyerd83fb182011-11-29 22:08:00 +0100326 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
327 sizeof(buffer->scan_el_group.attrs[0]),
328 GFP_KERNEL);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100329 if (buffer->scan_el_group.attrs == NULL) {
330 ret = -ENOMEM;
331 goto error_free_scan_mask;
332 }
333 if (buffer->scan_el_attrs)
334 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
335 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
336 attrn = attrcount_orig;
337
338 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
339 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
340 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
341
342 return 0;
343
344error_free_scan_mask:
345 kfree(buffer->scan_mask);
346error_cleanup_dynamic:
347 __iio_buffer_attr_cleanup(indio_dev);
348
349 return ret;
350}
351EXPORT_SYMBOL(iio_buffer_register);
352
353void iio_buffer_unregister(struct iio_dev *indio_dev)
354{
355 kfree(indio_dev->buffer->scan_mask);
356 kfree(indio_dev->buffer->scan_el_group.attrs);
357 __iio_buffer_attr_cleanup(indio_dev);
358}
359EXPORT_SYMBOL(iio_buffer_unregister);
360
361ssize_t iio_buffer_read_length(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200365 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100366 struct iio_buffer *buffer = indio_dev->buffer;
367
368 if (buffer->access->get_length)
369 return sprintf(buf, "%d\n",
370 buffer->access->get_length(buffer));
371
372 return 0;
373}
374EXPORT_SYMBOL(iio_buffer_read_length);
375
376ssize_t iio_buffer_write_length(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf,
379 size_t len)
380{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200381 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100382 struct iio_buffer *buffer = indio_dev->buffer;
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100383 unsigned int val;
384 int ret;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100385
Lars-Peter Clausen948ad202012-10-18 14:47:00 +0100386 ret = kstrtouint(buf, 10, &val);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100387 if (ret)
388 return ret;
389
390 if (buffer->access->get_length)
391 if (val == buffer->access->get_length(buffer))
392 return len;
393
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100394 mutex_lock(&indio_dev->mlock);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100395 if (iio_buffer_is_active(indio_dev->buffer)) {
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100396 ret = -EBUSY;
397 } else {
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100398 if (buffer->access->set_length)
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100399 buffer->access->set_length(buffer, val);
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100400 ret = 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100401 }
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100402 mutex_unlock(&indio_dev->mlock);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100403
Lars-Peter Clausene38c79e2011-12-19 15:23:44 +0100404 return ret ? ret : len;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100405}
406EXPORT_SYMBOL(iio_buffer_write_length);
407
Jonathan Cameron14555b12011-09-21 11:15:57 +0100408ssize_t iio_buffer_show_enable(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
411{
Lars-Peter Clausene53f5ac2012-05-12 15:39:33 +0200412 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100413 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
Jonathan Cameron14555b12011-09-21 11:15:57 +0100414}
415EXPORT_SYMBOL(iio_buffer_show_enable);
416
Peter Meerwald95725882013-09-17 23:42:00 +0100417/* Note NULL used as error indicator as it doesn't make sense. */
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100418static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100419 unsigned int masklength,
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100420 const unsigned long *mask)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100421{
422 if (bitmap_empty(mask, masklength))
423 return NULL;
424 while (*av_masks) {
425 if (bitmap_subset(mask, av_masks, masklength))
426 return av_masks;
427 av_masks += BITS_TO_LONGS(masklength);
428 }
429 return NULL;
430}
431
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100432static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
433 bool timestamp)
Jonathan Cameron959d2952011-12-05 21:37:13 +0000434{
Jonathan Cameron959d2952011-12-05 21:37:13 +0000435 const struct iio_chan_spec *ch;
436 unsigned bytes = 0;
437 int length, i;
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100438
439 /* How much space will the demuxed element take? */
440 for_each_set_bit(i, mask,
441 indio_dev->masklength) {
442 ch = iio_find_channel_from_si(indio_dev, i);
443 length = ch->scan_type.storagebits / 8;
444 bytes = ALIGN(bytes, length);
445 bytes += length;
446 }
447 if (timestamp) {
448 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100449 indio_dev->scan_index_timestamp);
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100450 length = ch->scan_type.storagebits / 8;
451 bytes = ALIGN(bytes, length);
452 bytes += length;
453 }
454 return bytes;
455}
456
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100457int iio_update_buffers(struct iio_dev *indio_dev,
458 struct iio_buffer *insert_buffer,
459 struct iio_buffer *remove_buffer)
Jonathan Cameron6b3b58e2012-04-21 10:09:33 +0100460{
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100461 int ret;
462 int success = 0;
463 struct iio_buffer *buffer;
464 unsigned long *compound_mask;
465 const unsigned long *old_mask;
Jonathan Cameron959d2952011-12-05 21:37:13 +0000466
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100467 /* Wind down existing buffers - iff there are any */
468 if (!list_empty(&indio_dev->buffer_list)) {
469 if (indio_dev->setup_ops->predisable) {
470 ret = indio_dev->setup_ops->predisable(indio_dev);
471 if (ret)
472 goto error_ret;
473 }
474 indio_dev->currentmode = INDIO_DIRECT_MODE;
475 if (indio_dev->setup_ops->postdisable) {
476 ret = indio_dev->setup_ops->postdisable(indio_dev);
477 if (ret)
478 goto error_ret;
479 }
480 }
481 /* Keep a copy of current setup to allow roll back */
482 old_mask = indio_dev->active_scan_mask;
483 if (!indio_dev->available_scan_masks)
484 indio_dev->active_scan_mask = NULL;
485
486 if (remove_buffer)
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100487 list_del_init(&remove_buffer->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100488 if (insert_buffer)
489 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
490
491 /* If no buffers in list, we are done */
492 if (list_empty(&indio_dev->buffer_list)) {
493 indio_dev->currentmode = INDIO_DIRECT_MODE;
494 if (indio_dev->available_scan_masks == NULL)
495 kfree(old_mask);
496 return 0;
497 }
Jonathan Cameron959d2952011-12-05 21:37:13 +0000498
Peter Meerwald95725882013-09-17 23:42:00 +0100499 /* What scan mask do we actually have? */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100500 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
501 sizeof(long), GFP_KERNEL);
502 if (compound_mask == NULL) {
503 if (indio_dev->available_scan_masks == NULL)
504 kfree(old_mask);
505 return -ENOMEM;
506 }
507 indio_dev->scan_timestamp = 0;
508
509 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
510 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
511 indio_dev->masklength);
512 indio_dev->scan_timestamp |= buffer->scan_timestamp;
513 }
514 if (indio_dev->available_scan_masks) {
Jonathan Cameron959d2952011-12-05 21:37:13 +0000515 indio_dev->active_scan_mask =
516 iio_scan_mask_match(indio_dev->available_scan_masks,
517 indio_dev->masklength,
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100518 compound_mask);
519 if (indio_dev->active_scan_mask == NULL) {
520 /*
521 * Roll back.
522 * Note can only occur when adding a buffer.
523 */
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100524 list_del_init(&insert_buffer->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100525 indio_dev->active_scan_mask = old_mask;
526 success = -EINVAL;
527 }
528 } else {
529 indio_dev->active_scan_mask = compound_mask;
530 }
Lars-Peter Clausenaff1eb42012-06-15 18:08:59 +0200531
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000532 iio_update_demux(indio_dev);
533
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100534 /* Wind up again */
535 if (indio_dev->setup_ops->preenable) {
536 ret = indio_dev->setup_ops->preenable(indio_dev);
537 if (ret) {
538 printk(KERN_ERR
Michał Mirosławbec18892013-05-04 14:19:00 +0100539 "Buffer not started: buffer preenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100540 goto error_remove_inserted;
541 }
542 }
543 indio_dev->scan_bytes =
544 iio_compute_scan_bytes(indio_dev,
545 indio_dev->active_scan_mask,
546 indio_dev->scan_timestamp);
547 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
548 if (buffer->access->request_update) {
549 ret = buffer->access->request_update(buffer);
550 if (ret) {
551 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100552 "Buffer not started: buffer parameter update failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100553 goto error_run_postdisable;
554 }
555 }
556 if (indio_dev->info->update_scan_mode) {
557 ret = indio_dev->info
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000558 ->update_scan_mode(indio_dev,
559 indio_dev->active_scan_mask);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100560 if (ret < 0) {
Michał Mirosławbec18892013-05-04 14:19:00 +0100561 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100562 goto error_run_postdisable;
563 }
564 }
Peter Meerwald95725882013-09-17 23:42:00 +0100565 /* Definitely possible for devices to support both of these. */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100566 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
567 if (!indio_dev->trig) {
568 printk(KERN_INFO "Buffer not started: no trigger\n");
569 ret = -EINVAL;
570 /* Can only occur on first buffer */
571 goto error_run_postdisable;
572 }
573 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
574 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
575 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
Peter Meerwald95725882013-09-17 23:42:00 +0100576 } else { /* Should never be reached */
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100577 ret = -EINVAL;
578 goto error_run_postdisable;
579 }
580
581 if (indio_dev->setup_ops->postenable) {
582 ret = indio_dev->setup_ops->postenable(indio_dev);
583 if (ret) {
584 printk(KERN_INFO
Michał Mirosławbec18892013-05-04 14:19:00 +0100585 "Buffer not started: postenable failed (%d)\n", ret);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100586 indio_dev->currentmode = INDIO_DIRECT_MODE;
587 if (indio_dev->setup_ops->postdisable)
588 indio_dev->setup_ops->postdisable(indio_dev);
589 goto error_disable_all_buffers;
590 }
591 }
592
593 if (indio_dev->available_scan_masks)
594 kfree(compound_mask);
595 else
596 kfree(old_mask);
597
598 return success;
599
600error_disable_all_buffers:
601 indio_dev->currentmode = INDIO_DIRECT_MODE;
602error_run_postdisable:
603 if (indio_dev->setup_ops->postdisable)
604 indio_dev->setup_ops->postdisable(indio_dev);
605error_remove_inserted:
606
607 if (insert_buffer)
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100608 list_del_init(&insert_buffer->buffer_list);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100609 indio_dev->active_scan_mask = old_mask;
610 kfree(compound_mask);
611error_ret:
612
613 return ret;
614}
615EXPORT_SYMBOL_GPL(iio_update_buffers);
616
617ssize_t iio_buffer_store_enable(struct device *dev,
618 struct device_attribute *attr,
619 const char *buf,
620 size_t len)
621{
622 int ret;
623 bool requested_state;
624 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100625 bool inlist;
626
627 ret = strtobool(buf, &requested_state);
628 if (ret < 0)
629 return ret;
630
631 mutex_lock(&indio_dev->mlock);
632
633 /* Find out if it is in the list */
Lars-Peter Clausen705ee2c2013-09-15 16:31:00 +0100634 inlist = iio_buffer_is_active(indio_dev->buffer);
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100635 /* Already in desired state */
636 if (inlist == requested_state)
637 goto done;
638
639 if (requested_state)
640 ret = iio_update_buffers(indio_dev,
641 indio_dev->buffer, NULL);
642 else
643 ret = iio_update_buffers(indio_dev,
644 NULL, indio_dev->buffer);
645
646 if (ret < 0)
647 goto done;
648done:
649 mutex_unlock(&indio_dev->mlock);
650 return (ret < 0) ? ret : len;
651}
652EXPORT_SYMBOL(iio_buffer_store_enable);
653
654int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
655{
656 struct iio_buffer *buffer;
657 unsigned bytes;
658 dev_dbg(&indio_dev->dev, "%s\n", __func__);
659
660 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
661 if (buffer->access->set_bytes_per_datum) {
662 bytes = iio_compute_scan_bytes(indio_dev,
663 buffer->scan_mask,
664 buffer->scan_timestamp);
665
666 buffer->access->set_bytes_per_datum(buffer, bytes);
667 }
Jonathan Cameron959d2952011-12-05 21:37:13 +0000668 return 0;
669}
670EXPORT_SYMBOL(iio_sw_buffer_preenable);
671
Jonathan Cameron14555b12011-09-21 11:15:57 +0100672/**
Lars-Peter Clausen81636632012-07-09 10:00:00 +0100673 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
674 * @indio_dev: the iio device
675 * @mask: scan mask to be checked
676 *
677 * Return true if exactly one bit is set in the scan mask, false otherwise. It
678 * can be used for devices where only one channel can be active for sampling at
679 * a time.
680 */
681bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
682 const unsigned long *mask)
683{
684 return bitmap_weight(mask, indio_dev->masklength) == 1;
685}
686EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
687
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100688static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
689 const unsigned long *mask)
690{
691 if (!indio_dev->setup_ops->validate_scan_mask)
692 return true;
693
694 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
695}
696
Jonathan Cameron14555b12011-09-21 11:15:57 +0100697/**
698 * iio_scan_mask_set() - set particular bit in the scan mask
Peter Meerwald95725882013-09-17 23:42:00 +0100699 * @indio_dev: the iio device
Jonathan Cameron14555b12011-09-21 11:15:57 +0100700 * @buffer: the buffer whose scan mask we are interested in
701 * @bit: the bit to be set.
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100702 *
703 * Note that at this point we have no way of knowing what other
704 * buffers might request, hence this code only verifies that the
705 * individual buffers request is plausible.
706 */
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000707int iio_scan_mask_set(struct iio_dev *indio_dev,
708 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100709{
Michael Hennerichcd4361c2012-02-22 13:16:49 +0100710 const unsigned long *mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100711 unsigned long *trialmask;
712
713 trialmask = kmalloc(sizeof(*trialmask)*
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100714 BITS_TO_LONGS(indio_dev->masklength),
Jonathan Cameron14555b12011-09-21 11:15:57 +0100715 GFP_KERNEL);
716
717 if (trialmask == NULL)
718 return -ENOMEM;
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100719 if (!indio_dev->masklength) {
Peter Meerwald95725882013-09-17 23:42:00 +0100720 WARN_ON("Trying to set scanmask prior to registering buffer\n");
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100721 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100722 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100723 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100724 set_bit(bit, trialmask);
725
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100726 if (!iio_validate_scan_mask(indio_dev, trialmask))
727 goto err_invalid_mask;
728
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100729 if (indio_dev->available_scan_masks) {
730 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
731 indio_dev->masklength,
Jonathan Cameron14555b12011-09-21 11:15:57 +0100732 trialmask);
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100733 if (!mask)
734 goto err_invalid_mask;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100735 }
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100736 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100737
738 kfree(trialmask);
739
740 return 0;
Lars-Peter Clausen939546d2012-07-09 10:00:00 +0100741
742err_invalid_mask:
743 kfree(trialmask);
744 return -EINVAL;
745}
Jonathan Cameron14555b12011-09-21 11:15:57 +0100746EXPORT_SYMBOL_GPL(iio_scan_mask_set);
747
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000748int iio_scan_mask_query(struct iio_dev *indio_dev,
749 struct iio_buffer *buffer, int bit)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100750{
Jonathan Cameronf8c6f4e2011-10-06 17:14:35 +0100751 if (bit > indio_dev->masklength)
Jonathan Cameron14555b12011-09-21 11:15:57 +0100752 return -EINVAL;
753
754 if (!buffer->scan_mask)
755 return 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100756
Lars-Peter Clausen5a2a6e12011-12-08 18:35:53 +0100757 return test_bit(bit, buffer->scan_mask);
Jonathan Cameron14555b12011-09-21 11:15:57 +0100758};
759EXPORT_SYMBOL_GPL(iio_scan_mask_query);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000760
761/**
762 * struct iio_demux_table() - table describing demux memcpy ops
763 * @from: index to copy from
Peter Meerwald99698b42012-08-26 13:43:00 +0100764 * @to: index to copy to
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000765 * @length: how many bytes to copy
766 * @l: list head used for management
767 */
768struct iio_demux_table {
769 unsigned from;
770 unsigned to;
771 unsigned length;
772 struct list_head l;
773};
774
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100775static const void *iio_demux(struct iio_buffer *buffer,
776 const void *datain)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000777{
778 struct iio_demux_table *t;
779
780 if (list_empty(&buffer->demux_list))
781 return datain;
782 list_for_each_entry(t, &buffer->demux_list, l)
783 memcpy(buffer->demux_bounce + t->to,
784 datain + t->from, t->length);
785
786 return buffer->demux_bounce;
787}
788
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100789static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000790{
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100791 const void *dataout = iio_demux(buffer, data);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000792
Lars-Peter Clausence56ade2012-09-04 13:38:00 +0100793 return buffer->access->store_to(buffer, dataout);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000794}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000795
Jonathan Cameron842cd102012-04-21 10:09:45 +0100796static void iio_buffer_demux_free(struct iio_buffer *buffer)
797{
798 struct iio_demux_table *p, *q;
799 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
800 list_del(&p->l);
801 kfree(p);
802 }
803}
804
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100805
Lars-Peter Clausen5d65d922013-09-15 17:50:00 +0100806int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100807{
808 int ret;
809 struct iio_buffer *buf;
810
811 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
812 ret = iio_push_to_buffer(buf, data);
813 if (ret < 0)
814 return ret;
815 }
816
817 return 0;
818}
819EXPORT_SYMBOL_GPL(iio_push_to_buffers);
820
821static int iio_buffer_update_demux(struct iio_dev *indio_dev,
822 struct iio_buffer *buffer)
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000823{
824 const struct iio_chan_spec *ch;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000825 int ret, in_ind = -1, out_ind, length;
826 unsigned in_loc = 0, out_loc = 0;
Jonathan Cameron842cd102012-04-21 10:09:45 +0100827 struct iio_demux_table *p;
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000828
829 /* Clear out any old demux */
Jonathan Cameron842cd102012-04-21 10:09:45 +0100830 iio_buffer_demux_free(buffer);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000831 kfree(buffer->demux_bounce);
832 buffer->demux_bounce = NULL;
833
834 /* First work out which scan mode we will actually have */
835 if (bitmap_equal(indio_dev->active_scan_mask,
836 buffer->scan_mask,
837 indio_dev->masklength))
838 return 0;
839
840 /* Now we have the two masks, work from least sig and build up sizes */
841 for_each_set_bit(out_ind,
842 indio_dev->active_scan_mask,
843 indio_dev->masklength) {
844 in_ind = find_next_bit(indio_dev->active_scan_mask,
845 indio_dev->masklength,
846 in_ind + 1);
847 while (in_ind != out_ind) {
848 in_ind = find_next_bit(indio_dev->active_scan_mask,
849 indio_dev->masklength,
850 in_ind + 1);
851 ch = iio_find_channel_from_si(indio_dev, in_ind);
852 length = ch->scan_type.storagebits/8;
853 /* Make sure we are aligned */
854 in_loc += length;
855 if (in_loc % length)
856 in_loc += length - in_loc % length;
857 }
858 p = kmalloc(sizeof(*p), GFP_KERNEL);
859 if (p == NULL) {
860 ret = -ENOMEM;
861 goto error_clear_mux_table;
862 }
863 ch = iio_find_channel_from_si(indio_dev, in_ind);
864 length = ch->scan_type.storagebits/8;
865 if (out_loc % length)
866 out_loc += length - out_loc % length;
867 if (in_loc % length)
868 in_loc += length - in_loc % length;
869 p->from = in_loc;
870 p->to = out_loc;
871 p->length = length;
872 list_add_tail(&p->l, &buffer->demux_list);
873 out_loc += length;
874 in_loc += length;
875 }
876 /* Relies on scan_timestamp being last */
877 if (buffer->scan_timestamp) {
878 p = kmalloc(sizeof(*p), GFP_KERNEL);
879 if (p == NULL) {
880 ret = -ENOMEM;
881 goto error_clear_mux_table;
882 }
883 ch = iio_find_channel_from_si(indio_dev,
Jonathan Cameronf1264802012-04-21 10:09:34 +0100884 indio_dev->scan_index_timestamp);
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000885 length = ch->scan_type.storagebits/8;
886 if (out_loc % length)
887 out_loc += length - out_loc % length;
888 if (in_loc % length)
889 in_loc += length - in_loc % length;
890 p->from = in_loc;
891 p->to = out_loc;
892 p->length = length;
893 list_add_tail(&p->l, &buffer->demux_list);
894 out_loc += length;
895 in_loc += length;
896 }
897 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
898 if (buffer->demux_bounce == NULL) {
899 ret = -ENOMEM;
900 goto error_clear_mux_table;
901 }
902 return 0;
903
904error_clear_mux_table:
Jonathan Cameron842cd102012-04-21 10:09:45 +0100905 iio_buffer_demux_free(buffer);
906
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000907 return ret;
908}
Jonathan Cameron84b36ce2012-06-30 20:06:00 +0100909
910int iio_update_demux(struct iio_dev *indio_dev)
911{
912 struct iio_buffer *buffer;
913 int ret;
914
915 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
916 ret = iio_buffer_update_demux(indio_dev, buffer);
917 if (ret < 0)
918 goto error_clear_mux_table;
919 }
920 return 0;
921
922error_clear_mux_table:
923 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
924 iio_buffer_demux_free(buffer);
925
926 return ret;
927}
Jonathan Cameron5ada4ea2011-12-05 21:37:14 +0000928EXPORT_SYMBOL_GPL(iio_update_demux);