blob: 4ce101af9a9519538a0553247137198bdc946c68 [file] [log] [blame]
Jonathan Cameron14555b12011-09-21 11:15:57 +01001/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/device.h>
18#include <linux/fs.h>
19#include <linux/cdev.h>
20#include <linux/slab.h>
21#include <linux/poll.h>
22
23#include "iio.h"
24#include "iio_core.h"
25#include "sysfs.h"
26#include "buffer_generic.h"
27
28static const char * const iio_endian_prefix[] = {
29 [IIO_BE] = "be",
30 [IIO_LE] = "le",
31};
32
33/**
34 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
35 *
36 * This function relies on all buffer implementations having an
37 * iio_buffer as their first element.
38 **/
39ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
40 size_t n, loff_t *f_ps)
41{
42 struct iio_dev *indio_dev = filp->private_data;
43 struct iio_buffer *rb = indio_dev->buffer;
44
45 if (!rb->access->read_first_n)
46 return -EINVAL;
47 return rb->access->read_first_n(rb, n, buf);
48}
49
50/**
51 * iio_buffer_poll() - poll the buffer to find out if it has data
52 */
53unsigned int iio_buffer_poll(struct file *filp,
54 struct poll_table_struct *wait)
55{
56 struct iio_dev *indio_dev = filp->private_data;
57 struct iio_buffer *rb = indio_dev->buffer;
58
59 poll_wait(filp, &rb->pollq, wait);
60 if (rb->stufftoread)
61 return POLLIN | POLLRDNORM;
62 /* need a way of knowing if there may be enough data... */
63 return 0;
64}
65
Jonathan Cameron30eb82f2011-09-21 11:16:02 +010066int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
Jonathan Cameron14555b12011-09-21 11:15:57 +010067{
68 struct iio_buffer *rb = indio_dev->buffer;
Jonathan Cameron30eb82f2011-09-21 11:16:02 +010069 if (!rb)
70 return -EINVAL;
71 if (rb->access->mark_in_use)
Jonathan Cameron14555b12011-09-21 11:15:57 +010072 rb->access->mark_in_use(rb);
Jonathan Cameron30eb82f2011-09-21 11:16:02 +010073 return 0;
Jonathan Cameron14555b12011-09-21 11:15:57 +010074}
75
76void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
77{
78 struct iio_buffer *rb = indio_dev->buffer;
79
80 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
81 if (rb->access->unmark_in_use)
82 rb->access->unmark_in_use(rb);
Jonathan Cameron14555b12011-09-21 11:15:57 +010083}
84
85void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *dev_info)
86{
87 buffer->indio_dev = dev_info;
88 init_waitqueue_head(&buffer->pollq);
89}
90EXPORT_SYMBOL(iio_buffer_init);
91
92static ssize_t iio_show_scan_index(struct device *dev,
93 struct device_attribute *attr,
94 char *buf)
95{
96 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
97}
98
99static ssize_t iio_show_fixed_type(struct device *dev,
100 struct device_attribute *attr,
101 char *buf)
102{
103 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
104 u8 type = this_attr->c->scan_type.endianness;
105
106 if (type == IIO_CPU) {
107 if (__LITTLE_ENDIAN)
108 type = IIO_LE;
109 else
110 type = IIO_BE;
111 }
112 return sprintf(buf, "%s:%c%d/%d>>%u\n",
113 iio_endian_prefix[type],
114 this_attr->c->scan_type.sign,
115 this_attr->c->scan_type.realbits,
116 this_attr->c->scan_type.storagebits,
117 this_attr->c->scan_type.shift);
118}
119
120static ssize_t iio_scan_el_show(struct device *dev,
121 struct device_attribute *attr,
122 char *buf)
123{
124 int ret;
125 struct iio_dev *dev_info = dev_get_drvdata(dev);
126
127 ret = iio_scan_mask_query(dev_info->buffer,
128 to_iio_dev_attr(attr)->address);
129 if (ret < 0)
130 return ret;
131 return sprintf(buf, "%d\n", ret);
132}
133
134static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
135{
136 clear_bit(bit, buffer->scan_mask);
137 buffer->scan_count--;
138 return 0;
139}
140
141static ssize_t iio_scan_el_store(struct device *dev,
142 struct device_attribute *attr,
143 const char *buf,
144 size_t len)
145{
146 int ret = 0;
147 bool state;
148 struct iio_dev *indio_dev = dev_get_drvdata(dev);
149 struct iio_buffer *buffer = indio_dev->buffer;
150 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
151
152 state = !(buf[0] == '0');
153 mutex_lock(&indio_dev->mlock);
154 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
155 ret = -EBUSY;
156 goto error_ret;
157 }
158 ret = iio_scan_mask_query(buffer, this_attr->address);
159 if (ret < 0)
160 goto error_ret;
161 if (!state && ret) {
162 ret = iio_scan_mask_clear(buffer, this_attr->address);
163 if (ret)
164 goto error_ret;
165 } else if (state && !ret) {
166 ret = iio_scan_mask_set(buffer, this_attr->address);
167 if (ret)
168 goto error_ret;
169 }
170
171error_ret:
172 mutex_unlock(&indio_dev->mlock);
173
174 return ret ? ret : len;
175
176}
177
178static ssize_t iio_scan_el_ts_show(struct device *dev,
179 struct device_attribute *attr,
180 char *buf)
181{
182 struct iio_dev *dev_info = dev_get_drvdata(dev);
183 return sprintf(buf, "%d\n", dev_info->buffer->scan_timestamp);
184}
185
186static ssize_t iio_scan_el_ts_store(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf,
189 size_t len)
190{
191 int ret = 0;
192 struct iio_dev *indio_dev = dev_get_drvdata(dev);
193 bool state;
194
195 state = !(buf[0] == '0');
196 mutex_lock(&indio_dev->mlock);
197 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
198 ret = -EBUSY;
199 goto error_ret;
200 }
201 indio_dev->buffer->scan_timestamp = state;
202error_ret:
203 mutex_unlock(&indio_dev->mlock);
204
205 return ret ? ret : len;
206}
207
208static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
209 const struct iio_chan_spec *chan)
210{
211 int ret, attrcount = 0;
212 struct iio_buffer *buffer = indio_dev->buffer;
213
214 ret = __iio_add_chan_devattr("index",
215 chan,
216 &iio_show_scan_index,
217 NULL,
218 0,
219 0,
220 &indio_dev->dev,
221 &buffer->scan_el_dev_attr_list);
222 if (ret)
223 goto error_ret;
224 attrcount++;
225 ret = __iio_add_chan_devattr("type",
226 chan,
227 &iio_show_fixed_type,
228 NULL,
229 0,
230 0,
231 &indio_dev->dev,
232 &buffer->scan_el_dev_attr_list);
233 if (ret)
234 goto error_ret;
235 attrcount++;
236 if (chan->type != IIO_TIMESTAMP)
237 ret = __iio_add_chan_devattr("en",
238 chan,
239 &iio_scan_el_show,
240 &iio_scan_el_store,
241 chan->scan_index,
242 0,
243 &indio_dev->dev,
244 &buffer->scan_el_dev_attr_list);
245 else
246 ret = __iio_add_chan_devattr("en",
247 chan,
248 &iio_scan_el_ts_show,
249 &iio_scan_el_ts_store,
250 chan->scan_index,
251 0,
252 &indio_dev->dev,
253 &buffer->scan_el_dev_attr_list);
254 attrcount++;
255 ret = attrcount;
256error_ret:
257 return ret;
258}
259
260static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
261 struct iio_dev_attr *p)
262{
263 kfree(p->dev_attr.attr.name);
264 kfree(p);
265}
266
267static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
268{
269 struct iio_dev_attr *p, *n;
270 struct iio_buffer *buffer = indio_dev->buffer;
271
272 list_for_each_entry_safe(p, n,
273 &buffer->scan_el_dev_attr_list, l)
274 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
275}
276
277static const char * const iio_scan_elements_group_name = "scan_elements";
278
279int iio_buffer_register(struct iio_dev *indio_dev,
280 const struct iio_chan_spec *channels,
281 int num_channels)
282{
283 struct iio_dev_attr *p;
284 struct attribute **attr;
285 struct iio_buffer *buffer = indio_dev->buffer;
286 int ret, i, attrn, attrcount, attrcount_orig = 0;
287
288 if (buffer->attrs)
289 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
290
291 if (buffer->scan_el_attrs != NULL) {
292 attr = buffer->scan_el_attrs->attrs;
293 while (*attr++ != NULL)
294 attrcount_orig++;
295 }
296 attrcount = attrcount_orig;
297 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
298 if (channels) {
299 /* new magic */
300 for (i = 0; i < num_channels; i++) {
301 /* Establish necessary mask length */
302 if (channels[i].scan_index >
303 (int)indio_dev->masklength - 1)
304 indio_dev->masklength
305 = indio_dev->channels[i].scan_index + 1;
306
307 ret = iio_buffer_add_channel_sysfs(indio_dev,
308 &channels[i]);
309 if (ret < 0)
310 goto error_cleanup_dynamic;
311 attrcount += ret;
312 }
313 if (indio_dev->masklength && buffer->scan_mask == NULL) {
314 buffer->scan_mask
315 = kzalloc(sizeof(*buffer->scan_mask)*
316 BITS_TO_LONGS(indio_dev->masklength),
317 GFP_KERNEL);
318 if (buffer->scan_mask == NULL) {
319 ret = -ENOMEM;
320 goto error_cleanup_dynamic;
321 }
322 }
323 }
324
325 buffer->scan_el_group.name = iio_scan_elements_group_name;
326
327 buffer->scan_el_group.attrs
328 = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
329 (attrcount + 1),
330 GFP_KERNEL);
331 if (buffer->scan_el_group.attrs == NULL) {
332 ret = -ENOMEM;
333 goto error_free_scan_mask;
334 }
335 if (buffer->scan_el_attrs)
336 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
337 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
338 attrn = attrcount_orig;
339
340 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
341 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
342 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
343
344 return 0;
345
346error_free_scan_mask:
347 kfree(buffer->scan_mask);
348error_cleanup_dynamic:
349 __iio_buffer_attr_cleanup(indio_dev);
350
351 return ret;
352}
353EXPORT_SYMBOL(iio_buffer_register);
354
355void iio_buffer_unregister(struct iio_dev *indio_dev)
356{
357 kfree(indio_dev->buffer->scan_mask);
358 kfree(indio_dev->buffer->scan_el_group.attrs);
359 __iio_buffer_attr_cleanup(indio_dev);
360}
361EXPORT_SYMBOL(iio_buffer_unregister);
362
363ssize_t iio_buffer_read_length(struct device *dev,
364 struct device_attribute *attr,
365 char *buf)
366{
367 struct iio_dev *indio_dev = dev_get_drvdata(dev);
368 struct iio_buffer *buffer = indio_dev->buffer;
369
370 if (buffer->access->get_length)
371 return sprintf(buf, "%d\n",
372 buffer->access->get_length(buffer));
373
374 return 0;
375}
376EXPORT_SYMBOL(iio_buffer_read_length);
377
378ssize_t iio_buffer_write_length(struct device *dev,
379 struct device_attribute *attr,
380 const char *buf,
381 size_t len)
382{
383 int ret;
384 ulong val;
385 struct iio_dev *indio_dev = dev_get_drvdata(dev);
386 struct iio_buffer *buffer = indio_dev->buffer;
387
388 ret = strict_strtoul(buf, 10, &val);
389 if (ret)
390 return ret;
391
392 if (buffer->access->get_length)
393 if (val == buffer->access->get_length(buffer))
394 return len;
395
396 if (buffer->access->set_length) {
397 buffer->access->set_length(buffer, val);
398 if (buffer->access->mark_param_change)
399 buffer->access->mark_param_change(buffer);
400 }
401
402 return len;
403}
404EXPORT_SYMBOL(iio_buffer_write_length);
405
406ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct iio_dev *indio_dev = dev_get_drvdata(dev);
411 struct iio_buffer *buffer = indio_dev->buffer;
412
413 if (buffer->access->get_bytes_per_datum)
414 return sprintf(buf, "%d\n",
415 buffer->access->get_bytes_per_datum(buffer));
416
417 return 0;
418}
419EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
420
421ssize_t iio_buffer_store_enable(struct device *dev,
422 struct device_attribute *attr,
423 const char *buf,
424 size_t len)
425{
426 int ret;
427 bool requested_state, current_state;
428 int previous_mode;
429 struct iio_dev *dev_info = dev_get_drvdata(dev);
430 struct iio_buffer *buffer = dev_info->buffer;
431
432 mutex_lock(&dev_info->mlock);
433 previous_mode = dev_info->currentmode;
434 requested_state = !(buf[0] == '0');
435 current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
436 if (current_state == requested_state) {
437 printk(KERN_INFO "iio-buffer, current state requested again\n");
438 goto done;
439 }
440 if (requested_state) {
441 if (buffer->setup_ops->preenable) {
442 ret = buffer->setup_ops->preenable(dev_info);
443 if (ret) {
444 printk(KERN_ERR
445 "Buffer not started:"
446 "buffer preenable failed\n");
447 goto error_ret;
448 }
449 }
450 if (buffer->access->request_update) {
451 ret = buffer->access->request_update(buffer);
452 if (ret) {
453 printk(KERN_INFO
454 "Buffer not started:"
455 "buffer parameter update failed\n");
456 goto error_ret;
457 }
458 }
459 if (buffer->access->mark_in_use)
460 buffer->access->mark_in_use(buffer);
461 /* Definitely possible for devices to support both of these.*/
462 if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
463 if (!dev_info->trig) {
464 printk(KERN_INFO
465 "Buffer not started: no trigger\n");
466 ret = -EINVAL;
467 if (buffer->access->unmark_in_use)
468 buffer->access->unmark_in_use(buffer);
469 goto error_ret;
470 }
471 dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
472 } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
473 dev_info->currentmode = INDIO_BUFFER_HARDWARE;
474 else { /* should never be reached */
475 ret = -EINVAL;
476 goto error_ret;
477 }
478
479 if (buffer->setup_ops->postenable) {
480 ret = buffer->setup_ops->postenable(dev_info);
481 if (ret) {
482 printk(KERN_INFO
483 "Buffer not started:"
484 "postenable failed\n");
485 if (buffer->access->unmark_in_use)
486 buffer->access->unmark_in_use(buffer);
487 dev_info->currentmode = previous_mode;
488 if (buffer->setup_ops->postdisable)
489 buffer->setup_ops->
490 postdisable(dev_info);
491 goto error_ret;
492 }
493 }
494 } else {
495 if (buffer->setup_ops->predisable) {
496 ret = buffer->setup_ops->predisable(dev_info);
497 if (ret)
498 goto error_ret;
499 }
500 if (buffer->access->unmark_in_use)
501 buffer->access->unmark_in_use(buffer);
502 dev_info->currentmode = INDIO_DIRECT_MODE;
503 if (buffer->setup_ops->postdisable) {
504 ret = buffer->setup_ops->postdisable(dev_info);
505 if (ret)
506 goto error_ret;
507 }
508 }
509done:
510 mutex_unlock(&dev_info->mlock);
511 return len;
512
513error_ret:
514 mutex_unlock(&dev_info->mlock);
515 return ret;
516}
517EXPORT_SYMBOL(iio_buffer_store_enable);
518
519ssize_t iio_buffer_show_enable(struct device *dev,
520 struct device_attribute *attr,
521 char *buf)
522{
523 struct iio_dev *dev_info = dev_get_drvdata(dev);
524 return sprintf(buf, "%d\n", !!(dev_info->currentmode
525 & INDIO_ALL_BUFFER_MODES));
526}
527EXPORT_SYMBOL(iio_buffer_show_enable);
528
529int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
530{
531 struct iio_buffer *buffer = indio_dev->buffer;
532 size_t size;
533 dev_dbg(&indio_dev->dev, "%s\n", __func__);
534 /* Check if there are any scan elements enabled, if not fail*/
535 if (!(buffer->scan_count || buffer->scan_timestamp))
536 return -EINVAL;
537 if (buffer->scan_timestamp)
538 if (buffer->scan_count)
539 /* Timestamp (aligned to s64) and data */
540 size = (((buffer->scan_count * buffer->bpe)
541 + sizeof(s64) - 1)
542 & ~(sizeof(s64) - 1))
543 + sizeof(s64);
544 else /* Timestamp only */
545 size = sizeof(s64);
546 else /* Data only */
547 size = buffer->scan_count * buffer->bpe;
548 buffer->access->set_bytes_per_datum(buffer, size);
549
550 return 0;
551}
552EXPORT_SYMBOL(iio_sw_buffer_preenable);
553
554
555/* note NULL used as error indicator as it doesn't make sense. */
556static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
557 unsigned int masklength,
558 unsigned long *mask)
559{
560 if (bitmap_empty(mask, masklength))
561 return NULL;
562 while (*av_masks) {
563 if (bitmap_subset(mask, av_masks, masklength))
564 return av_masks;
565 av_masks += BITS_TO_LONGS(masklength);
566 }
567 return NULL;
568}
569
570/**
571 * iio_scan_mask_set() - set particular bit in the scan mask
572 * @buffer: the buffer whose scan mask we are interested in
573 * @bit: the bit to be set.
574 **/
575int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
576{
577 struct iio_dev *dev_info = buffer->indio_dev;
578 unsigned long *mask;
579 unsigned long *trialmask;
580
581 trialmask = kmalloc(sizeof(*trialmask)*
582 BITS_TO_LONGS(dev_info->masklength),
583 GFP_KERNEL);
584
585 if (trialmask == NULL)
586 return -ENOMEM;
587 if (!dev_info->masklength) {
588 WARN_ON("trying to set scanmask prior to registering buffer\n");
589 kfree(trialmask);
590 return -EINVAL;
591 }
592 bitmap_copy(trialmask, buffer->scan_mask, dev_info->masklength);
593 set_bit(bit, trialmask);
594
595 if (dev_info->available_scan_masks) {
596 mask = iio_scan_mask_match(dev_info->available_scan_masks,
597 dev_info->masklength,
598 trialmask);
599 if (!mask) {
600 kfree(trialmask);
601 return -EINVAL;
602 }
603 }
604 bitmap_copy(buffer->scan_mask, trialmask, dev_info->masklength);
605 buffer->scan_count++;
606
607 kfree(trialmask);
608
609 return 0;
610};
611EXPORT_SYMBOL_GPL(iio_scan_mask_set);
612
613int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
614{
615 struct iio_dev *dev_info = buffer->indio_dev;
616 long *mask;
617
618 if (bit > dev_info->masklength)
619 return -EINVAL;
620
621 if (!buffer->scan_mask)
622 return 0;
623 if (dev_info->available_scan_masks)
624 mask = iio_scan_mask_match(dev_info->available_scan_masks,
625 dev_info->masklength,
626 buffer->scan_mask);
627 else
628 mask = buffer->scan_mask;
629 if (!mask)
630 return 0;
631
632 return test_bit(bit, mask);
633};
634EXPORT_SYMBOL_GPL(iio_scan_mask_query);