blob: 3a45f9a52de803a1b0a434a899defc5b0b5767d9 [file] [log] [blame]
Jonathan Cameron2235acb2009-08-18 18:06:27 +01001/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010011#include <linux/kernel.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010012#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
Jonathan Camerona7348342011-05-18 14:40:55 +010015#include <linux/sched.h>
Jonathan Camerond5857d62011-02-11 13:09:09 +000016#include <linux/poll.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010017#include "ring_sw.h"
Jonathan Cameron06458e22012-04-25 15:54:58 +010018#include <linux/iio/trigger.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010019
Jonathan Cameron5565a452011-05-18 14:42:24 +010020/**
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
Jonathan Cameron5565a452011-05-18 14:42:24 +010026 * @half_p: half buffer length behind write_p (event generation)
Peter Meerwald60ba16e32012-06-15 19:25:27 +020027 * @update_needed: flag to indicate change in size requested
Jonathan Cameron5565a452011-05-18 14:42:24 +010028 *
29 * Note that the first element of all ring buffers must be a
Jonathan Cameron14555b12011-09-21 11:15:57 +010030 * struct iio_buffer.
Jonathan Cameron5565a452011-05-18 14:42:24 +010031**/
32struct iio_sw_ring_buffer {
Jonathan Cameron14555b12011-09-21 11:15:57 +010033 struct iio_buffer buf;
Jonathan Cameron5565a452011-05-18 14:42:24 +010034 unsigned char *data;
35 unsigned char *read_p;
36 unsigned char *write_p;
Jonathan Cameron5565a452011-05-18 14:42:24 +010037 /* used to act as a point at which to signal an event */
38 unsigned char *half_p;
Jonathan Cameron5565a452011-05-18 14:42:24 +010039 int update_needed;
Jonathan Cameron5565a452011-05-18 14:42:24 +010040};
41
42#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
43
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000044static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
45 int bytes_per_datum, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010046{
47 if ((length == 0) || (bytes_per_datum == 0))
48 return -EINVAL;
Jonathan Cameron14555b12011-09-21 11:15:57 +010049 __iio_update_buffer(&ring->buf, bytes_per_datum, length);
Manuel Stahlffcab072010-08-31 11:32:50 +020050 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070051 ring->read_p = NULL;
52 ring->write_p = NULL;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070053 ring->half_p = NULL;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010054 return ring->data ? 0 : -ENOMEM;
55}
56
57static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
58{
59 kfree(ring->data);
60}
61
Jonathan Cameron2235acb2009-08-18 18:06:27 +010062/* Ring buffer related functionality */
63/* Store to ring is typically called in the bh of a data ready interrupt handler
64 * in the device driver */
65/* Lock always held if their is a chance this may be called */
66/* Only one of these per ring may run concurrently - enforced by drivers */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070067static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
Lars-Peter Clausence56ade2012-09-04 13:38:00 +010068 unsigned char *data)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010069{
70 int ret = 0;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010071 unsigned char *temp_ptr, *change_test_ptr;
72
73 /* initial store */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070074 if (unlikely(ring->write_p == NULL)) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +010075 ring->write_p = ring->data;
76 /* Doesn't actually matter if this is out of the set
77 * as long as the read pointer is valid before this
78 * passes it - guaranteed as set later in this function.
79 */
Manuel Stahlffcab072010-08-31 11:32:50 +020080 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010081 }
82 /* Copy data to where ever the current write pointer says */
Manuel Stahlffcab072010-08-31 11:32:50 +020083 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +010084 barrier();
85 /* Update the pointer used to get most recent value.
86 * Always valid as either points to latest or second latest value.
87 * Before this runs it is null and read attempts fail with -EAGAIN.
88 */
Jonathan Cameron2235acb2009-08-18 18:06:27 +010089 barrier();
90 /* temp_ptr used to ensure we never have an invalid pointer
91 * it may be slightly lagging, but never invalid
92 */
Manuel Stahlffcab072010-08-31 11:32:50 +020093 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010094 /* End of ring, back to the beginning */
Manuel Stahlffcab072010-08-31 11:32:50 +020095 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010096 temp_ptr = ring->data;
97 /* Update the write pointer
98 * always valid as long as this is the only function able to write.
99 * Care needed with smp systems to ensure more than one ring fill
100 * is never scheduled.
101 */
102 ring->write_p = temp_ptr;
103
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700104 if (ring->read_p == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100105 ring->read_p = ring->data;
106 /* Buffer full - move the read pointer and create / escalate
107 * ring event */
108 /* Tricky case - if the read pointer moves before we adjust it.
109 * Handle by not pushing if it has moved - may result in occasional
110 * unnecessary buffer full events when it wasn't quite true.
111 */
112 else if (ring->write_p == ring->read_p) {
113 change_test_ptr = ring->read_p;
Manuel Stahlffcab072010-08-31 11:32:50 +0200114 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100115 if (temp_ptr
Manuel Stahlffcab072010-08-31 11:32:50 +0200116 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100117 temp_ptr = ring->data;
118 }
119 /* We are moving pointer on one because the ring is full. Any
120 * change to the read pointer will be this or greater.
121 */
122 if (change_test_ptr == ring->read_p)
123 ring->read_p = temp_ptr;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100124 }
125 /* investigate if our event barrier has been passed */
126 /* There are definite 'issues' with this and chances of
127 * simultaneous read */
128 /* Also need to use loop count to ensure this only happens once */
Manuel Stahlffcab072010-08-31 11:32:50 +0200129 ring->half_p += ring->buf.bytes_per_datum;
130 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100131 ring->half_p = ring->data;
132 if (ring->half_p == ring->read_p) {
Jonathan Camerona7348342011-05-18 14:40:55 +0100133 ring->buf.stufftoread = true;
134 wake_up_interruptible(&ring->buf.pollq);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100135 }
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100136 return ret;
137}
138
Jonathan Cameron14555b12011-09-21 11:15:57 +0100139static int iio_read_first_n_sw_rb(struct iio_buffer *r,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100140 size_t n, char __user *buf)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100141{
142 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
143
144 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000145 u8 *data;
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100146 int ret, max_copied, bytes_to_rip, dead_offset;
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100147 size_t data_available, buffer_size;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100148
149 /* A userspace program has probably made an error if it tries to
Justin P. Mattock4abf6f82012-02-29 22:00:38 -0800150 * read something that is not a whole number of bpds.
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100151 * Return an error.
152 */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100153 if (n % ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100154 ret = -EINVAL;
155 printk(KERN_INFO "Ring buffer read request not whole number of"
Manuel Stahlffcab072010-08-31 11:32:50 +0200156 "samples: Request bytes %zd, Current bytes per datum %d\n",
Jonathan Cameronb4281732011-04-15 18:55:55 +0100157 n, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100158 goto error_ret;
159 }
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100160
161 buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
162
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100163 /* Limit size to whole of ring buffer */
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100164 bytes_to_rip = min_t(size_t, buffer_size, n);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100165
Jonathan Camerond5857d62011-02-11 13:09:09 +0000166 data = kmalloc(bytes_to_rip, GFP_KERNEL);
167 if (data == NULL) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100168 ret = -ENOMEM;
169 goto error_ret;
170 }
171
172 /* build local copy */
173 initial_read_p = ring->read_p;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700174 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100175 ret = 0;
176 goto error_free_data_cpy;
177 }
178
179 initial_write_p = ring->write_p;
180
181 /* Need a consistent pair */
182 while ((initial_read_p != ring->read_p)
183 || (initial_write_p != ring->write_p)) {
184 initial_read_p = ring->read_p;
185 initial_write_p = ring->write_p;
186 }
187 if (initial_write_p == initial_read_p) {
188 /* No new data available.*/
189 ret = 0;
190 goto error_free_data_cpy;
191 }
192
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100193 if (initial_write_p >= initial_read_p)
194 data_available = initial_write_p - initial_read_p;
195 else
196 data_available = buffer_size - (initial_read_p - initial_write_p);
197
198 if (data_available < bytes_to_rip)
199 bytes_to_rip = data_available;
200
201 if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
202 max_copied = ring->data + buffer_size - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000203 memcpy(data, initial_read_p, max_copied);
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100204 memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
205 end_read_p = ring->data + bytes_to_rip - max_copied;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100206 } else {
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100207 memcpy(data, initial_read_p, bytes_to_rip);
208 end_read_p = initial_read_p + bytes_to_rip;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100209 }
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100210
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100211 /* Now to verify which section was cleanly copied - i.e. how far
212 * read pointer has been pushed */
213 current_read_p = ring->read_p;
214
215 if (initial_read_p <= current_read_p)
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100216 dead_offset = current_read_p - initial_read_p;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100217 else
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100218 dead_offset = buffer_size - (initial_read_p - current_read_p);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100219
220 /* possible issue if the initial write has been lapped or indeed
221 * the point we were reading to has been passed */
222 /* No valid data read.
223 * In this case the read pointer is already correct having been
224 * pushed further than we would look. */
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100225 if (bytes_to_rip - dead_offset < 0) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100226 ret = 0;
227 goto error_free_data_cpy;
228 }
229
230 /* setup the next read position */
231 /* Beware, this may fail due to concurrency fun and games.
Justin P. Mattock4abf6f82012-02-29 22:00:38 -0800232 * Possible that sufficient fill commands have run to push the read
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100233 * pointer past where we would be after the rip. If this occurs, leave
234 * it be.
235 */
236 /* Tricky - deal with loops */
237
238 while (ring->read_p != end_read_p)
239 ring->read_p = end_read_p;
240
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100241 ret = bytes_to_rip - dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100242
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100243 if (copy_to_user(buf, data + dead_offset, ret)) {
Jonathan Camerond5857d62011-02-11 13:09:09 +0000244 ret = -EFAULT;
245 goto error_free_data_cpy;
246 }
Jonathan Camerona7348342011-05-18 14:40:55 +0100247
248 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
249 ring->buf.stufftoread = 0;
250
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100251error_free_data_cpy:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000252 kfree(data);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100253error_ret:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000254
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100255 return ret;
256}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100257
Jonathan Cameron14555b12011-09-21 11:15:57 +0100258static int iio_store_to_sw_rb(struct iio_buffer *r,
Lars-Peter Clausence56ade2012-09-04 13:38:00 +0100259 u8 *data)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100260{
261 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
Lars-Peter Clausence56ade2012-09-04 13:38:00 +0100262 return iio_store_to_sw_ring(ring, data);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100263}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100264
Jonathan Cameron14555b12011-09-21 11:15:57 +0100265static int iio_request_update_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100266{
267 int ret = 0;
268 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
269
Jonathan Camerona7348342011-05-18 14:40:55 +0100270 r->stufftoread = false;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100271 if (!ring->update_needed)
272 goto error_ret;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100273 __iio_free_sw_ring_buffer(ring);
Manuel Stahlffcab072010-08-31 11:32:50 +0200274 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000275 ring->buf.length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100276error_ret:
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100277 return ret;
278}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100279
Jonathan Cameron14555b12011-09-21 11:15:57 +0100280static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100281{
282 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
Manuel Stahlffcab072010-08-31 11:32:50 +0200283 return ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100284}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100285
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100286static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
287{
288 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
289 ring->update_needed = true;
290 return 0;
291}
292
Jonathan Cameron14555b12011-09-21 11:15:57 +0100293static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100294{
Manuel Stahlffcab072010-08-31 11:32:50 +0200295 if (r->bytes_per_datum != bpd) {
296 r->bytes_per_datum = bpd;
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100297 iio_mark_update_needed_sw_rb(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100298 }
299 return 0;
300}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100301
Jonathan Cameron14555b12011-09-21 11:15:57 +0100302static int iio_get_length_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100303{
304 return r->length;
305}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100306
Jonathan Cameron14555b12011-09-21 11:15:57 +0100307static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100308{
309 if (r->length != length) {
310 r->length = length;
Lars-Peter Clausen869871b2011-12-19 15:23:48 +0100311 iio_mark_update_needed_sw_rb(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100312 }
313 return 0;
314}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100315
Jonathan Cameron14555b12011-09-21 11:15:57 +0100316static IIO_BUFFER_ENABLE_ATTR;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100317static IIO_BUFFER_LENGTH_ATTR;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100318
319/* Standard set of ring buffer attributes */
320static struct attribute *iio_ring_attributes[] = {
321 &dev_attr_length.attr,
Manuel Stahlffcab072010-08-31 11:32:50 +0200322 &dev_attr_enable.attr,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100323 NULL,
324};
325
326static struct attribute_group iio_ring_attribute_group = {
327 .attrs = iio_ring_attributes,
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100328 .name = "buffer",
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100329};
330
Lars-Peter Clausen7e632342012-01-03 11:02:51 +0100331static const struct iio_buffer_access_funcs ring_sw_access_funcs = {
332 .store_to = &iio_store_to_sw_rb,
333 .read_first_n = &iio_read_first_n_sw_rb,
334 .request_update = &iio_request_update_sw_rb,
335 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
336 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
337 .get_length = &iio_get_length_sw_rb,
338 .set_length = &iio_set_length_sw_rb,
339};
340
Jonathan Cameron14555b12011-09-21 11:15:57 +0100341struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100342{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100343 struct iio_buffer *buf;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100344 struct iio_sw_ring_buffer *ring;
345
346 ring = kzalloc(sizeof *ring, GFP_KERNEL);
347 if (!ring)
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700348 return NULL;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100349 ring->update_needed = true;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100350 buf = &ring->buf;
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000351 iio_buffer_init(buf);
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100352 buf->attrs = &iio_ring_attribute_group;
Lars-Peter Clausen7e632342012-01-03 11:02:51 +0100353 buf->access = &ring_sw_access_funcs;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100354
355 return buf;
356}
357EXPORT_SYMBOL(iio_sw_rb_allocate);
358
Jonathan Cameron14555b12011-09-21 11:15:57 +0100359void iio_sw_rb_free(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100360{
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100361 kfree(iio_to_sw_ring(r));
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100362}
363EXPORT_SYMBOL(iio_sw_rb_free);
Barry Songad577f82010-07-11 16:39:16 +0100364
Peter Meerwald60ba16e32012-06-15 19:25:27 +0200365MODULE_DESCRIPTION("Industrial I/O software ring buffer");
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100366MODULE_LICENSE("GPL");