blob: a541a73a2105836b30208895c6999253c21a78aa [file] [log] [blame]
Jonathan Cameron2235acb2009-08-18 18:06:27 +01001/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010011#include <linux/kernel.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010012#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
Jonathan Camerona7348342011-05-18 14:40:55 +010015#include <linux/sched.h>
Jonathan Camerond5857d62011-02-11 13:09:09 +000016#include <linux/poll.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010017#include "ring_sw.h"
Jonathan Cameron59883ba2010-07-11 16:39:18 +010018#include "trigger.h"
Jonathan Cameron2235acb2009-08-18 18:06:27 +010019
Jonathan Cameron5565a452011-05-18 14:42:24 +010020/**
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
Jonathan Cameron5565a452011-05-18 14:42:24 +010026 * @half_p: half buffer length behind write_p (event generation)
27 * @use_count: reference count to prevent resizing when in use
28 * @update_needed: flag to indicated change in size requested
29 * @use_lock: lock to prevent change in size when in use
30 *
31 * Note that the first element of all ring buffers must be a
Jonathan Cameron14555b12011-09-21 11:15:57 +010032 * struct iio_buffer.
Jonathan Cameron5565a452011-05-18 14:42:24 +010033**/
34struct iio_sw_ring_buffer {
Jonathan Cameron14555b12011-09-21 11:15:57 +010035 struct iio_buffer buf;
Jonathan Cameron5565a452011-05-18 14:42:24 +010036 unsigned char *data;
37 unsigned char *read_p;
38 unsigned char *write_p;
Jonathan Cameron5565a452011-05-18 14:42:24 +010039 /* used to act as a point at which to signal an event */
40 unsigned char *half_p;
41 int use_count;
42 int update_needed;
43 spinlock_t use_lock;
44};
45
46#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
47
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000048static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
49 int bytes_per_datum, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010050{
51 if ((length == 0) || (bytes_per_datum == 0))
52 return -EINVAL;
Jonathan Cameron14555b12011-09-21 11:15:57 +010053 __iio_update_buffer(&ring->buf, bytes_per_datum, length);
Manuel Stahlffcab072010-08-31 11:32:50 +020054 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070055 ring->read_p = NULL;
56 ring->write_p = NULL;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070057 ring->half_p = NULL;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010058 return ring->data ? 0 : -ENOMEM;
59}
60
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000061static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
62{
63 spin_lock_init(&ring->use_lock);
64}
65
Jonathan Cameron2235acb2009-08-18 18:06:27 +010066static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
67{
68 kfree(ring->data);
69}
70
Jonathan Cameron14555b12011-09-21 11:15:57 +010071static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010072{
73 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
74 spin_lock(&ring->use_lock);
75 ring->use_count++;
76 spin_unlock(&ring->use_lock);
77}
Jonathan Cameron2235acb2009-08-18 18:06:27 +010078
Jonathan Cameron14555b12011-09-21 11:15:57 +010079static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010080{
81 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
82 spin_lock(&ring->use_lock);
83 ring->use_count--;
84 spin_unlock(&ring->use_lock);
85}
Jonathan Cameron2235acb2009-08-18 18:06:27 +010086
87
88/* Ring buffer related functionality */
89/* Store to ring is typically called in the bh of a data ready interrupt handler
90 * in the device driver */
91/* Lock always held if their is a chance this may be called */
92/* Only one of these per ring may run concurrently - enforced by drivers */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070093static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
94 unsigned char *data, s64 timestamp)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010095{
96 int ret = 0;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010097 unsigned char *temp_ptr, *change_test_ptr;
98
99 /* initial store */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700100 if (unlikely(ring->write_p == NULL)) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100101 ring->write_p = ring->data;
102 /* Doesn't actually matter if this is out of the set
103 * as long as the read pointer is valid before this
104 * passes it - guaranteed as set later in this function.
105 */
Manuel Stahlffcab072010-08-31 11:32:50 +0200106 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100107 }
108 /* Copy data to where ever the current write pointer says */
Manuel Stahlffcab072010-08-31 11:32:50 +0200109 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100110 barrier();
111 /* Update the pointer used to get most recent value.
112 * Always valid as either points to latest or second latest value.
113 * Before this runs it is null and read attempts fail with -EAGAIN.
114 */
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100115 barrier();
116 /* temp_ptr used to ensure we never have an invalid pointer
117 * it may be slightly lagging, but never invalid
118 */
Manuel Stahlffcab072010-08-31 11:32:50 +0200119 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100120 /* End of ring, back to the beginning */
Manuel Stahlffcab072010-08-31 11:32:50 +0200121 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100122 temp_ptr = ring->data;
123 /* Update the write pointer
124 * always valid as long as this is the only function able to write.
125 * Care needed with smp systems to ensure more than one ring fill
126 * is never scheduled.
127 */
128 ring->write_p = temp_ptr;
129
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700130 if (ring->read_p == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100131 ring->read_p = ring->data;
132 /* Buffer full - move the read pointer and create / escalate
133 * ring event */
134 /* Tricky case - if the read pointer moves before we adjust it.
135 * Handle by not pushing if it has moved - may result in occasional
136 * unnecessary buffer full events when it wasn't quite true.
137 */
138 else if (ring->write_p == ring->read_p) {
139 change_test_ptr = ring->read_p;
Manuel Stahlffcab072010-08-31 11:32:50 +0200140 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100141 if (temp_ptr
Manuel Stahlffcab072010-08-31 11:32:50 +0200142 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100143 temp_ptr = ring->data;
144 }
145 /* We are moving pointer on one because the ring is full. Any
146 * change to the read pointer will be this or greater.
147 */
148 if (change_test_ptr == ring->read_p)
149 ring->read_p = temp_ptr;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100150 }
151 /* investigate if our event barrier has been passed */
152 /* There are definite 'issues' with this and chances of
153 * simultaneous read */
154 /* Also need to use loop count to ensure this only happens once */
Manuel Stahlffcab072010-08-31 11:32:50 +0200155 ring->half_p += ring->buf.bytes_per_datum;
156 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100157 ring->half_p = ring->data;
158 if (ring->half_p == ring->read_p) {
Jonathan Camerona7348342011-05-18 14:40:55 +0100159 ring->buf.stufftoread = true;
160 wake_up_interruptible(&ring->buf.pollq);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100161 }
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100162 return ret;
163}
164
Jonathan Cameron14555b12011-09-21 11:15:57 +0100165static int iio_read_first_n_sw_rb(struct iio_buffer *r,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100166 size_t n, char __user *buf)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100167{
168 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
169
170 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000171 u8 *data;
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100172 int ret, max_copied, bytes_to_rip, dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100173
174 /* A userspace program has probably made an error if it tries to
175 * read something that is not a whole number of bpds.
176 * Return an error.
177 */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100178 if (n % ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100179 ret = -EINVAL;
180 printk(KERN_INFO "Ring buffer read request not whole number of"
Manuel Stahlffcab072010-08-31 11:32:50 +0200181 "samples: Request bytes %zd, Current bytes per datum %d\n",
Jonathan Cameronb4281732011-04-15 18:55:55 +0100182 n, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100183 goto error_ret;
184 }
185 /* Limit size to whole of ring buffer */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100186 bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
187 n);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100188
Jonathan Camerond5857d62011-02-11 13:09:09 +0000189 data = kmalloc(bytes_to_rip, GFP_KERNEL);
190 if (data == NULL) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100191 ret = -ENOMEM;
192 goto error_ret;
193 }
194
195 /* build local copy */
196 initial_read_p = ring->read_p;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700197 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100198 ret = 0;
199 goto error_free_data_cpy;
200 }
201
202 initial_write_p = ring->write_p;
203
204 /* Need a consistent pair */
205 while ((initial_read_p != ring->read_p)
206 || (initial_write_p != ring->write_p)) {
207 initial_read_p = ring->read_p;
208 initial_write_p = ring->write_p;
209 }
210 if (initial_write_p == initial_read_p) {
211 /* No new data available.*/
212 ret = 0;
213 goto error_free_data_cpy;
214 }
215
216 if (initial_write_p >= initial_read_p + bytes_to_rip) {
217 /* write_p is greater than necessary, all is easy */
218 max_copied = bytes_to_rip;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000219 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100220 end_read_p = initial_read_p + max_copied;
221 } else if (initial_write_p > initial_read_p) {
222 /*not enough data to cpy */
223 max_copied = initial_write_p - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000224 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100225 end_read_p = initial_write_p;
226 } else {
227 /* going through 'end' of ring buffer */
228 max_copied = ring->data
Manuel Stahlffcab072010-08-31 11:32:50 +0200229 + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000230 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100231 /* possible we are done if we align precisely with end */
232 if (max_copied == bytes_to_rip)
233 end_read_p = ring->data;
234 else if (initial_write_p
235 > ring->data + bytes_to_rip - max_copied) {
236 /* enough data to finish */
Jonathan Camerond5857d62011-02-11 13:09:09 +0000237 memcpy(data + max_copied, ring->data,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100238 bytes_to_rip - max_copied);
239 max_copied = bytes_to_rip;
240 end_read_p = ring->data + (bytes_to_rip - max_copied);
241 } else { /* not enough data */
Jonathan Camerond5857d62011-02-11 13:09:09 +0000242 memcpy(data + max_copied, ring->data,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100243 initial_write_p - ring->data);
244 max_copied += initial_write_p - ring->data;
245 end_read_p = initial_write_p;
246 }
247 }
248 /* Now to verify which section was cleanly copied - i.e. how far
249 * read pointer has been pushed */
250 current_read_p = ring->read_p;
251
252 if (initial_read_p <= current_read_p)
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100253 dead_offset = current_read_p - initial_read_p;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100254 else
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100255 dead_offset = ring->buf.length*ring->buf.bytes_per_datum
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100256 - (initial_read_p - current_read_p);
257
258 /* possible issue if the initial write has been lapped or indeed
259 * the point we were reading to has been passed */
260 /* No valid data read.
261 * In this case the read pointer is already correct having been
262 * pushed further than we would look. */
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100263 if (max_copied - dead_offset < 0) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100264 ret = 0;
265 goto error_free_data_cpy;
266 }
267
268 /* setup the next read position */
269 /* Beware, this may fail due to concurrency fun and games.
270 * Possible that sufficient fill commands have run to push the read
271 * pointer past where we would be after the rip. If this occurs, leave
272 * it be.
273 */
274 /* Tricky - deal with loops */
275
276 while (ring->read_p != end_read_p)
277 ring->read_p = end_read_p;
278
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100279 ret = max_copied - dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100280
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100281 if (copy_to_user(buf, data + dead_offset, ret)) {
Jonathan Camerond5857d62011-02-11 13:09:09 +0000282 ret = -EFAULT;
283 goto error_free_data_cpy;
284 }
Jonathan Camerona7348342011-05-18 14:40:55 +0100285
286 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
287 ring->buf.stufftoread = 0;
288
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100289error_free_data_cpy:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000290 kfree(data);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100291error_ret:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000292
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100293 return ret;
294}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100295
Jonathan Cameron14555b12011-09-21 11:15:57 +0100296static int iio_store_to_sw_rb(struct iio_buffer *r,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100297 u8 *data,
298 s64 timestamp)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100299{
300 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
301 return iio_store_to_sw_ring(ring, data, timestamp);
302}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100303
Jonathan Cameron14555b12011-09-21 11:15:57 +0100304static int iio_request_update_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100305{
306 int ret = 0;
307 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
308
Jonathan Camerona7348342011-05-18 14:40:55 +0100309 r->stufftoread = false;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100310 spin_lock(&ring->use_lock);
311 if (!ring->update_needed)
312 goto error_ret;
313 if (ring->use_count) {
314 ret = -EAGAIN;
315 goto error_ret;
316 }
317 __iio_free_sw_ring_buffer(ring);
Manuel Stahlffcab072010-08-31 11:32:50 +0200318 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000319 ring->buf.length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100320error_ret:
321 spin_unlock(&ring->use_lock);
322 return ret;
323}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100324
Jonathan Cameron14555b12011-09-21 11:15:57 +0100325static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100326{
327 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
Manuel Stahlffcab072010-08-31 11:32:50 +0200328 return ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100329}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100330
Jonathan Cameron14555b12011-09-21 11:15:57 +0100331static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100332{
Manuel Stahlffcab072010-08-31 11:32:50 +0200333 if (r->bytes_per_datum != bpd) {
334 r->bytes_per_datum = bpd;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100335 if (r->access->mark_param_change)
336 r->access->mark_param_change(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100337 }
338 return 0;
339}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100340
Jonathan Cameron14555b12011-09-21 11:15:57 +0100341static int iio_get_length_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100342{
343 return r->length;
344}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100345
Jonathan Cameron14555b12011-09-21 11:15:57 +0100346static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100347{
348 if (r->length != length) {
349 r->length = length;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100350 if (r->access->mark_param_change)
351 r->access->mark_param_change(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100352 }
353 return 0;
354}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100355
Jonathan Cameron14555b12011-09-21 11:15:57 +0100356static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100357{
358 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
359 ring->update_needed = true;
360 return 0;
361}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100362
Jonathan Cameron14555b12011-09-21 11:15:57 +0100363static IIO_BUFFER_ENABLE_ATTR;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100364static IIO_BUFFER_LENGTH_ATTR;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100365
366/* Standard set of ring buffer attributes */
367static struct attribute *iio_ring_attributes[] = {
368 &dev_attr_length.attr,
Manuel Stahlffcab072010-08-31 11:32:50 +0200369 &dev_attr_enable.attr,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100370 NULL,
371};
372
373static struct attribute_group iio_ring_attribute_group = {
374 .attrs = iio_ring_attributes,
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100375 .name = "buffer",
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100376};
377
Jonathan Cameron14555b12011-09-21 11:15:57 +0100378struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100379{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100380 struct iio_buffer *buf;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100381 struct iio_sw_ring_buffer *ring;
382
383 ring = kzalloc(sizeof *ring, GFP_KERNEL);
384 if (!ring)
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700385 return NULL;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100386 ring->update_needed = true;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100387 buf = &ring->buf;
Jonathan Cameronf79a9092011-12-05 22:18:29 +0000388 iio_buffer_init(buf);
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000389 __iio_init_sw_ring_buffer(ring);
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100390 buf->attrs = &iio_ring_attribute_group;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100391
392 return buf;
393}
394EXPORT_SYMBOL(iio_sw_rb_allocate);
395
Jonathan Cameron14555b12011-09-21 11:15:57 +0100396void iio_sw_rb_free(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100397{
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100398 kfree(iio_to_sw_ring(r));
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100399}
400EXPORT_SYMBOL(iio_sw_rb_free);
Barry Songad577f82010-07-11 16:39:16 +0100401
Jonathan Cameron14555b12011-09-21 11:15:57 +0100402const struct iio_buffer_access_funcs ring_sw_access_funcs = {
Jonathan Cameron5565a452011-05-18 14:42:24 +0100403 .mark_in_use = &iio_mark_sw_rb_in_use,
404 .unmark_in_use = &iio_unmark_sw_rb_in_use,
405 .store_to = &iio_store_to_sw_rb,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100406 .read_first_n = &iio_read_first_n_sw_rb,
407 .mark_param_change = &iio_mark_update_needed_sw_rb,
408 .request_update = &iio_request_update_sw_rb,
409 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
410 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
411 .get_length = &iio_get_length_sw_rb,
412 .set_length = &iio_set_length_sw_rb,
413};
414EXPORT_SYMBOL(ring_sw_access_funcs);
415
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100416MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
417MODULE_LICENSE("GPL");