blob: 2f11531dd282894c3bf2b1fb7c63df4da39551c0 [file] [log] [blame]
Jonathan Cameron2235acb2009-08-18 18:06:27 +01001/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010011#include <linux/kernel.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010012#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
Jonathan Camerona7348342011-05-18 14:40:55 +010015#include <linux/sched.h>
Jonathan Camerond5857d62011-02-11 13:09:09 +000016#include <linux/poll.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010017#include "ring_sw.h"
Jonathan Cameron59883ba2010-07-11 16:39:18 +010018#include "trigger.h"
Jonathan Cameron2235acb2009-08-18 18:06:27 +010019
Jonathan Cameron5565a452011-05-18 14:42:24 +010020/**
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
26 * @last_written_p: read pointer (newest available)
27 * @half_p: half buffer length behind write_p (event generation)
28 * @use_count: reference count to prevent resizing when in use
29 * @update_needed: flag to indicated change in size requested
30 * @use_lock: lock to prevent change in size when in use
31 *
32 * Note that the first element of all ring buffers must be a
Jonathan Cameron14555b12011-09-21 11:15:57 +010033 * struct iio_buffer.
Jonathan Cameron5565a452011-05-18 14:42:24 +010034**/
35struct iio_sw_ring_buffer {
Jonathan Cameron14555b12011-09-21 11:15:57 +010036 struct iio_buffer buf;
Jonathan Cameron5565a452011-05-18 14:42:24 +010037 unsigned char *data;
38 unsigned char *read_p;
39 unsigned char *write_p;
40 unsigned char *last_written_p;
41 /* used to act as a point at which to signal an event */
42 unsigned char *half_p;
43 int use_count;
44 int update_needed;
45 spinlock_t use_lock;
46};
47
48#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
49
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000050static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
51 int bytes_per_datum, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010052{
53 if ((length == 0) || (bytes_per_datum == 0))
54 return -EINVAL;
Jonathan Cameron14555b12011-09-21 11:15:57 +010055 __iio_update_buffer(&ring->buf, bytes_per_datum, length);
Manuel Stahlffcab072010-08-31 11:32:50 +020056 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070057 ring->read_p = NULL;
58 ring->write_p = NULL;
59 ring->last_written_p = NULL;
60 ring->half_p = NULL;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010061 return ring->data ? 0 : -ENOMEM;
62}
63
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000064static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
65{
66 spin_lock_init(&ring->use_lock);
67}
68
Jonathan Cameron2235acb2009-08-18 18:06:27 +010069static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
70{
71 kfree(ring->data);
72}
73
Jonathan Cameron14555b12011-09-21 11:15:57 +010074static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010075{
76 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
77 spin_lock(&ring->use_lock);
78 ring->use_count++;
79 spin_unlock(&ring->use_lock);
80}
Jonathan Cameron2235acb2009-08-18 18:06:27 +010081
Jonathan Cameron14555b12011-09-21 11:15:57 +010082static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010083{
84 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
85 spin_lock(&ring->use_lock);
86 ring->use_count--;
87 spin_unlock(&ring->use_lock);
88}
Jonathan Cameron2235acb2009-08-18 18:06:27 +010089
90
91/* Ring buffer related functionality */
92/* Store to ring is typically called in the bh of a data ready interrupt handler
93 * in the device driver */
94/* Lock always held if their is a chance this may be called */
95/* Only one of these per ring may run concurrently - enforced by drivers */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070096static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
97 unsigned char *data, s64 timestamp)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010098{
99 int ret = 0;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100100 unsigned char *temp_ptr, *change_test_ptr;
101
102 /* initial store */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700103 if (unlikely(ring->write_p == NULL)) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100104 ring->write_p = ring->data;
105 /* Doesn't actually matter if this is out of the set
106 * as long as the read pointer is valid before this
107 * passes it - guaranteed as set later in this function.
108 */
Manuel Stahlffcab072010-08-31 11:32:50 +0200109 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100110 }
111 /* Copy data to where ever the current write pointer says */
Manuel Stahlffcab072010-08-31 11:32:50 +0200112 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100113 barrier();
114 /* Update the pointer used to get most recent value.
115 * Always valid as either points to latest or second latest value.
116 * Before this runs it is null and read attempts fail with -EAGAIN.
117 */
118 ring->last_written_p = ring->write_p;
119 barrier();
120 /* temp_ptr used to ensure we never have an invalid pointer
121 * it may be slightly lagging, but never invalid
122 */
Manuel Stahlffcab072010-08-31 11:32:50 +0200123 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100124 /* End of ring, back to the beginning */
Manuel Stahlffcab072010-08-31 11:32:50 +0200125 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100126 temp_ptr = ring->data;
127 /* Update the write pointer
128 * always valid as long as this is the only function able to write.
129 * Care needed with smp systems to ensure more than one ring fill
130 * is never scheduled.
131 */
132 ring->write_p = temp_ptr;
133
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700134 if (ring->read_p == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100135 ring->read_p = ring->data;
136 /* Buffer full - move the read pointer and create / escalate
137 * ring event */
138 /* Tricky case - if the read pointer moves before we adjust it.
139 * Handle by not pushing if it has moved - may result in occasional
140 * unnecessary buffer full events when it wasn't quite true.
141 */
142 else if (ring->write_p == ring->read_p) {
143 change_test_ptr = ring->read_p;
Manuel Stahlffcab072010-08-31 11:32:50 +0200144 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100145 if (temp_ptr
Manuel Stahlffcab072010-08-31 11:32:50 +0200146 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100147 temp_ptr = ring->data;
148 }
149 /* We are moving pointer on one because the ring is full. Any
150 * change to the read pointer will be this or greater.
151 */
152 if (change_test_ptr == ring->read_p)
153 ring->read_p = temp_ptr;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100154 }
155 /* investigate if our event barrier has been passed */
156 /* There are definite 'issues' with this and chances of
157 * simultaneous read */
158 /* Also need to use loop count to ensure this only happens once */
Manuel Stahlffcab072010-08-31 11:32:50 +0200159 ring->half_p += ring->buf.bytes_per_datum;
160 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100161 ring->half_p = ring->data;
162 if (ring->half_p == ring->read_p) {
Jonathan Camerona7348342011-05-18 14:40:55 +0100163 ring->buf.stufftoread = true;
164 wake_up_interruptible(&ring->buf.pollq);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100165 }
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100166 return ret;
167}
168
Jonathan Cameron14555b12011-09-21 11:15:57 +0100169static int iio_read_first_n_sw_rb(struct iio_buffer *r,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100170 size_t n, char __user *buf)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100171{
172 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
173
174 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000175 u8 *data;
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100176 int ret, max_copied, bytes_to_rip, dead_offset;
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100177 size_t data_available, buffer_size;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100178
179 /* A userspace program has probably made an error if it tries to
180 * read something that is not a whole number of bpds.
181 * Return an error.
182 */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100183 if (n % ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100184 ret = -EINVAL;
185 printk(KERN_INFO "Ring buffer read request not whole number of"
Manuel Stahlffcab072010-08-31 11:32:50 +0200186 "samples: Request bytes %zd, Current bytes per datum %d\n",
Jonathan Cameronb4281732011-04-15 18:55:55 +0100187 n, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100188 goto error_ret;
189 }
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100190
191 buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
192
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100193 /* Limit size to whole of ring buffer */
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100194 bytes_to_rip = min_t(size_t, buffer_size, n);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100195
Jonathan Camerond5857d62011-02-11 13:09:09 +0000196 data = kmalloc(bytes_to_rip, GFP_KERNEL);
197 if (data == NULL) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100198 ret = -ENOMEM;
199 goto error_ret;
200 }
201
202 /* build local copy */
203 initial_read_p = ring->read_p;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700204 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100205 ret = 0;
206 goto error_free_data_cpy;
207 }
208
209 initial_write_p = ring->write_p;
210
211 /* Need a consistent pair */
212 while ((initial_read_p != ring->read_p)
213 || (initial_write_p != ring->write_p)) {
214 initial_read_p = ring->read_p;
215 initial_write_p = ring->write_p;
216 }
217 if (initial_write_p == initial_read_p) {
218 /* No new data available.*/
219 ret = 0;
220 goto error_free_data_cpy;
221 }
222
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100223 if (initial_write_p >= initial_read_p)
224 data_available = initial_write_p - initial_read_p;
225 else
226 data_available = buffer_size - (initial_read_p - initial_write_p);
227
228 if (data_available < bytes_to_rip)
229 bytes_to_rip = data_available;
230
231 if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
232 max_copied = ring->data + buffer_size - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000233 memcpy(data, initial_read_p, max_copied);
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100234 memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
235 end_read_p = ring->data + bytes_to_rip - max_copied;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100236 } else {
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100237 memcpy(data, initial_read_p, bytes_to_rip);
238 end_read_p = initial_read_p + bytes_to_rip;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100239 }
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100240
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100241 /* Now to verify which section was cleanly copied - i.e. how far
242 * read pointer has been pushed */
243 current_read_p = ring->read_p;
244
245 if (initial_read_p <= current_read_p)
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100246 dead_offset = current_read_p - initial_read_p;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100247 else
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100248 dead_offset = buffer_size - (initial_read_p - current_read_p);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100249
250 /* possible issue if the initial write has been lapped or indeed
251 * the point we were reading to has been passed */
252 /* No valid data read.
253 * In this case the read pointer is already correct having been
254 * pushed further than we would look. */
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100255 if (bytes_to_rip - dead_offset < 0) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100256 ret = 0;
257 goto error_free_data_cpy;
258 }
259
260 /* setup the next read position */
261 /* Beware, this may fail due to concurrency fun and games.
262 * Possible that sufficient fill commands have run to push the read
263 * pointer past where we would be after the rip. If this occurs, leave
264 * it be.
265 */
266 /* Tricky - deal with loops */
267
268 while (ring->read_p != end_read_p)
269 ring->read_p = end_read_p;
270
Lars-Peter Clausena1e44d62011-12-08 18:35:51 +0100271 ret = bytes_to_rip - dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100272
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100273 if (copy_to_user(buf, data + dead_offset, ret)) {
Jonathan Camerond5857d62011-02-11 13:09:09 +0000274 ret = -EFAULT;
275 goto error_free_data_cpy;
276 }
Jonathan Camerona7348342011-05-18 14:40:55 +0100277
278 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
279 ring->buf.stufftoread = 0;
280
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100281error_free_data_cpy:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000282 kfree(data);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100283error_ret:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000284
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100285 return ret;
286}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100287
Jonathan Cameron14555b12011-09-21 11:15:57 +0100288static int iio_store_to_sw_rb(struct iio_buffer *r,
Jonathan Cameron5565a452011-05-18 14:42:24 +0100289 u8 *data,
290 s64 timestamp)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100291{
292 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
293 return iio_store_to_sw_ring(ring, data, timestamp);
294}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100295
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700296static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
297 unsigned char *data)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100298{
299 unsigned char *last_written_p_copy;
300
301 iio_mark_sw_rb_in_use(&ring->buf);
302again:
303 barrier();
304 last_written_p_copy = ring->last_written_p;
305 barrier(); /*unnessecary? */
306 /* Check there is anything here */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700307 if (last_written_p_copy == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100308 return -EAGAIN;
Manuel Stahlffcab072010-08-31 11:32:50 +0200309 memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100310
Jonathan Cameron8474ddd2010-05-04 14:43:11 +0100311 if (unlikely(ring->last_written_p != last_written_p_copy))
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100312 goto again;
313
314 iio_unmark_sw_rb_in_use(&ring->buf);
315 return 0;
316}
317
Jonathan Cameron14555b12011-09-21 11:15:57 +0100318static int iio_read_last_from_sw_rb(struct iio_buffer *r,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100319 unsigned char *data)
320{
321 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
322}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100323
Jonathan Cameron14555b12011-09-21 11:15:57 +0100324static int iio_request_update_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100325{
326 int ret = 0;
327 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
328
Jonathan Camerona7348342011-05-18 14:40:55 +0100329 r->stufftoread = false;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100330 spin_lock(&ring->use_lock);
331 if (!ring->update_needed)
332 goto error_ret;
333 if (ring->use_count) {
334 ret = -EAGAIN;
335 goto error_ret;
336 }
337 __iio_free_sw_ring_buffer(ring);
Manuel Stahlffcab072010-08-31 11:32:50 +0200338 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000339 ring->buf.length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100340error_ret:
341 spin_unlock(&ring->use_lock);
342 return ret;
343}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100344
Jonathan Cameron14555b12011-09-21 11:15:57 +0100345static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100346{
347 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
Manuel Stahlffcab072010-08-31 11:32:50 +0200348 return ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100349}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100350
Jonathan Cameron14555b12011-09-21 11:15:57 +0100351static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100352{
Manuel Stahlffcab072010-08-31 11:32:50 +0200353 if (r->bytes_per_datum != bpd) {
354 r->bytes_per_datum = bpd;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100355 if (r->access->mark_param_change)
356 r->access->mark_param_change(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100357 }
358 return 0;
359}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100360
Jonathan Cameron14555b12011-09-21 11:15:57 +0100361static int iio_get_length_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100362{
363 return r->length;
364}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100365
Jonathan Cameron14555b12011-09-21 11:15:57 +0100366static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100367{
368 if (r->length != length) {
369 r->length = length;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100370 if (r->access->mark_param_change)
371 r->access->mark_param_change(r);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100372 }
373 return 0;
374}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100375
Jonathan Cameron14555b12011-09-21 11:15:57 +0100376static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100377{
378 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
379 ring->update_needed = true;
380 return 0;
381}
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100382
Jonathan Cameron14555b12011-09-21 11:15:57 +0100383static IIO_BUFFER_ENABLE_ATTR;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100384static IIO_BUFFER_LENGTH_ATTR;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100385
386/* Standard set of ring buffer attributes */
387static struct attribute *iio_ring_attributes[] = {
388 &dev_attr_length.attr,
Manuel Stahlffcab072010-08-31 11:32:50 +0200389 &dev_attr_enable.attr,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100390 NULL,
391};
392
393static struct attribute_group iio_ring_attribute_group = {
394 .attrs = iio_ring_attributes,
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100395 .name = "buffer",
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100396};
397
Jonathan Cameron14555b12011-09-21 11:15:57 +0100398struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100399{
Jonathan Cameron14555b12011-09-21 11:15:57 +0100400 struct iio_buffer *buf;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100401 struct iio_sw_ring_buffer *ring;
402
403 ring = kzalloc(sizeof *ring, GFP_KERNEL);
404 if (!ring)
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700405 return NULL;
Jonathan Cameron5565a452011-05-18 14:42:24 +0100406 ring->update_needed = true;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100407 buf = &ring->buf;
Jonathan Cameron14555b12011-09-21 11:15:57 +0100408 iio_buffer_init(buf, indio_dev);
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000409 __iio_init_sw_ring_buffer(ring);
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100410 buf->attrs = &iio_ring_attribute_group;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100411
412 return buf;
413}
414EXPORT_SYMBOL(iio_sw_rb_allocate);
415
Jonathan Cameron14555b12011-09-21 11:15:57 +0100416void iio_sw_rb_free(struct iio_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100417{
Jonathan Cameron1aa04272011-08-30 12:32:47 +0100418 kfree(iio_to_sw_ring(r));
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100419}
420EXPORT_SYMBOL(iio_sw_rb_free);
Barry Songad577f82010-07-11 16:39:16 +0100421
Jonathan Cameron14555b12011-09-21 11:15:57 +0100422const struct iio_buffer_access_funcs ring_sw_access_funcs = {
Jonathan Cameron5565a452011-05-18 14:42:24 +0100423 .mark_in_use = &iio_mark_sw_rb_in_use,
424 .unmark_in_use = &iio_unmark_sw_rb_in_use,
425 .store_to = &iio_store_to_sw_rb,
426 .read_last = &iio_read_last_from_sw_rb,
427 .read_first_n = &iio_read_first_n_sw_rb,
428 .mark_param_change = &iio_mark_update_needed_sw_rb,
429 .request_update = &iio_request_update_sw_rb,
430 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
431 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
432 .get_length = &iio_get_length_sw_rb,
433 .set_length = &iio_set_length_sw_rb,
434};
435EXPORT_SYMBOL(ring_sw_access_funcs);
436
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100437MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
438MODULE_LICENSE("GPL");