blob: b83507c0640c14b2646b682b2d0f62c021e790be [file] [log] [blame]
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +03001/*
2 * Definitions for the 'struct ptr_ring' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
16 *
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
19 */
20
21#ifndef _LINUX_PTR_RING_H
22#define _LINUX_PTR_RING_H 1
23
24#ifdef __KERNEL__
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/types.h>
28#include <linux/compiler.h>
29#include <linux/cache.h>
30#include <linux/slab.h>
31#include <asm/errno.h>
32#endif
33
34struct ptr_ring {
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
37 int consumer ____cacheline_aligned_in_smp;
38 spinlock_t consumer_lock;
39 /* Shared consumer/producer data */
40 /* Read-only by both the producer and the consumer */
41 int size ____cacheline_aligned_in_smp; /* max entries in queue */
42 void **queue;
43};
44
45/* Note: callers invoking this in a loop must use a compiler barrier,
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +030046 * for example cpu_relax(). If ring is ever resized, callers must hold
47 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
48 * producer_lock, the next call to __ptr_ring_produce may fail.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +030049 */
50static inline bool __ptr_ring_full(struct ptr_ring *r)
51{
52 return r->queue[r->producer];
53}
54
55static inline bool ptr_ring_full(struct ptr_ring *r)
56{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +030057 bool ret;
58
59 spin_lock(&r->producer_lock);
60 ret = __ptr_ring_full(r);
61 spin_unlock(&r->producer_lock);
62
63 return ret;
64}
65
66static inline bool ptr_ring_full_irq(struct ptr_ring *r)
67{
68 bool ret;
69
70 spin_lock_irq(&r->producer_lock);
71 ret = __ptr_ring_full(r);
72 spin_unlock_irq(&r->producer_lock);
73
74 return ret;
75}
76
77static inline bool ptr_ring_full_any(struct ptr_ring *r)
78{
79 unsigned long flags;
80 bool ret;
81
82 spin_lock_irqsave(&r->producer_lock, flags);
83 ret = __ptr_ring_full(r);
84 spin_unlock_irqrestore(&r->producer_lock, flags);
85
86 return ret;
87}
88
89static inline bool ptr_ring_full_bh(struct ptr_ring *r)
90{
91 bool ret;
92
93 spin_lock_bh(&r->producer_lock);
94 ret = __ptr_ring_full(r);
95 spin_unlock_bh(&r->producer_lock);
96
97 return ret;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +030098}
99
100/* Note: callers invoking this in a loop must use a compiler barrier,
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300101 * for example cpu_relax(). Callers must hold producer_lock.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300102 */
103static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
104{
Jason Wang982fb492016-06-30 14:45:31 +0800105 if (unlikely(!r->size) || r->queue[r->producer])
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300106 return -ENOSPC;
107
108 r->queue[r->producer++] = ptr;
109 if (unlikely(r->producer >= r->size))
110 r->producer = 0;
111 return 0;
112}
113
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200114/*
115 * Note: resize (below) nests producer lock within consumer lock, so if you
116 * consume in interrupt or BH context, you must disable interrupts/BH when
117 * calling this.
118 */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300119static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
120{
121 int ret;
122
123 spin_lock(&r->producer_lock);
124 ret = __ptr_ring_produce(r, ptr);
125 spin_unlock(&r->producer_lock);
126
127 return ret;
128}
129
130static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
131{
132 int ret;
133
134 spin_lock_irq(&r->producer_lock);
135 ret = __ptr_ring_produce(r, ptr);
136 spin_unlock_irq(&r->producer_lock);
137
138 return ret;
139}
140
141static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
142{
143 unsigned long flags;
144 int ret;
145
146 spin_lock_irqsave(&r->producer_lock, flags);
147 ret = __ptr_ring_produce(r, ptr);
148 spin_unlock_irqrestore(&r->producer_lock, flags);
149
150 return ret;
151}
152
153static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
154{
155 int ret;
156
157 spin_lock_bh(&r->producer_lock);
158 ret = __ptr_ring_produce(r, ptr);
159 spin_unlock_bh(&r->producer_lock);
160
161 return ret;
162}
163
164/* Note: callers invoking this in a loop must use a compiler barrier,
165 * for example cpu_relax(). Callers must take consumer_lock
166 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300167 * If ring is never resized, and if the pointer is merely
168 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300169 */
170static inline void *__ptr_ring_peek(struct ptr_ring *r)
171{
Jason Wang982fb492016-06-30 14:45:31 +0800172 if (likely(r->size))
173 return r->queue[r->consumer];
174 return NULL;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300175}
176
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300177/* Note: callers invoking this in a loop must use a compiler barrier,
178 * for example cpu_relax(). Callers must take consumer_lock
179 * if the ring is ever resized - see e.g. ptr_ring_empty.
180 */
181static inline bool __ptr_ring_empty(struct ptr_ring *r)
182{
183 return !__ptr_ring_peek(r);
184}
185
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300186static inline bool ptr_ring_empty(struct ptr_ring *r)
187{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300188 bool ret;
189
190 spin_lock(&r->consumer_lock);
191 ret = __ptr_ring_empty(r);
192 spin_unlock(&r->consumer_lock);
193
194 return ret;
195}
196
197static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
198{
199 bool ret;
200
201 spin_lock_irq(&r->consumer_lock);
202 ret = __ptr_ring_empty(r);
203 spin_unlock_irq(&r->consumer_lock);
204
205 return ret;
206}
207
208static inline bool ptr_ring_empty_any(struct ptr_ring *r)
209{
210 unsigned long flags;
211 bool ret;
212
213 spin_lock_irqsave(&r->consumer_lock, flags);
214 ret = __ptr_ring_empty(r);
215 spin_unlock_irqrestore(&r->consumer_lock, flags);
216
217 return ret;
218}
219
220static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
221{
222 bool ret;
223
224 spin_lock_bh(&r->consumer_lock);
225 ret = __ptr_ring_empty(r);
226 spin_unlock_bh(&r->consumer_lock);
227
228 return ret;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300229}
230
231/* Must only be called after __ptr_ring_peek returned !NULL */
232static inline void __ptr_ring_discard_one(struct ptr_ring *r)
233{
234 r->queue[r->consumer++] = NULL;
235 if (unlikely(r->consumer >= r->size))
236 r->consumer = 0;
237}
238
239static inline void *__ptr_ring_consume(struct ptr_ring *r)
240{
241 void *ptr;
242
243 ptr = __ptr_ring_peek(r);
244 if (ptr)
245 __ptr_ring_discard_one(r);
246
247 return ptr;
248}
249
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200250/*
251 * Note: resize (below) nests producer lock within consumer lock, so if you
252 * call this in interrupt or BH context, you must disable interrupts/BH when
253 * producing.
254 */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300255static inline void *ptr_ring_consume(struct ptr_ring *r)
256{
257 void *ptr;
258
259 spin_lock(&r->consumer_lock);
260 ptr = __ptr_ring_consume(r);
261 spin_unlock(&r->consumer_lock);
262
263 return ptr;
264}
265
266static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
267{
268 void *ptr;
269
270 spin_lock_irq(&r->consumer_lock);
271 ptr = __ptr_ring_consume(r);
272 spin_unlock_irq(&r->consumer_lock);
273
274 return ptr;
275}
276
277static inline void *ptr_ring_consume_any(struct ptr_ring *r)
278{
279 unsigned long flags;
280 void *ptr;
281
282 spin_lock_irqsave(&r->consumer_lock, flags);
283 ptr = __ptr_ring_consume(r);
284 spin_unlock_irqrestore(&r->consumer_lock, flags);
285
286 return ptr;
287}
288
289static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
290{
291 void *ptr;
292
293 spin_lock_bh(&r->consumer_lock);
294 ptr = __ptr_ring_consume(r);
295 spin_unlock_bh(&r->consumer_lock);
296
297 return ptr;
298}
299
300/* Cast to structure type and call a function without discarding from FIFO.
301 * Function must return a value.
302 * Callers must take consumer_lock.
303 */
304#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
305
306#define PTR_RING_PEEK_CALL(r, f) ({ \
307 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
308 \
309 spin_lock(&(r)->consumer_lock); \
310 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
311 spin_unlock(&(r)->consumer_lock); \
312 __PTR_RING_PEEK_CALL_v; \
313})
314
315#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
316 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
317 \
318 spin_lock_irq(&(r)->consumer_lock); \
319 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
320 spin_unlock_irq(&(r)->consumer_lock); \
321 __PTR_RING_PEEK_CALL_v; \
322})
323
324#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
325 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
326 \
327 spin_lock_bh(&(r)->consumer_lock); \
328 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
329 spin_unlock_bh(&(r)->consumer_lock); \
330 __PTR_RING_PEEK_CALL_v; \
331})
332
333#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
334 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
335 unsigned long __PTR_RING_PEEK_CALL_f;\
336 \
337 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
338 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
339 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
340 __PTR_RING_PEEK_CALL_v; \
341})
342
Eric Dumazet59af5b872017-08-16 10:36:47 -0700343static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300344{
Eric Dumazet59af5b872017-08-16 10:36:47 -0700345 return kcalloc(size, sizeof(void *), gfp);
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300346}
347
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300348static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
349{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300350 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300351 if (!r->queue)
352 return -ENOMEM;
353
354 r->size = size;
355 r->producer = r->consumer = 0;
356 spin_lock_init(&r->producer_lock);
357 spin_lock_init(&r->consumer_lock);
358
359 return 0;
360}
361
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800362static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
363 int size, gfp_t gfp,
364 void (*destroy)(void *))
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300365{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300366 int producer = 0;
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300367 void **old;
368 void *ptr;
369
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200370 while ((ptr = __ptr_ring_consume(r)))
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300371 if (producer < size)
372 queue[producer++] = ptr;
373 else if (destroy)
374 destroy(ptr);
375
376 r->size = size;
377 r->producer = producer;
378 r->consumer = 0;
379 old = r->queue;
380 r->queue = queue;
381
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800382 return old;
383}
384
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200385/*
386 * Note: producer lock is nested within consumer lock, so if you
387 * resize you must make sure all uses nest correctly.
388 * In particular if you consume ring in interrupt or BH context, you must
389 * disable interrupts/BH when doing so.
390 */
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800391static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
392 void (*destroy)(void *))
393{
394 unsigned long flags;
395 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
396 void **old;
397
398 if (!queue)
399 return -ENOMEM;
400
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200401 spin_lock_irqsave(&(r)->consumer_lock, flags);
402 spin_lock(&(r)->producer_lock);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800403
404 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
405
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200406 spin_unlock(&(r)->producer_lock);
407 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300408
409 kfree(old);
410
411 return 0;
412}
413
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200414/*
415 * Note: producer lock is nested within consumer lock, so if you
416 * resize you must make sure all uses nest correctly.
417 * In particular if you consume ring in interrupt or BH context, you must
418 * disable interrupts/BH when doing so.
419 */
Eric Dumazet59af5b872017-08-16 10:36:47 -0700420static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
421 unsigned int nrings,
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800422 int size,
423 gfp_t gfp, void (*destroy)(void *))
424{
425 unsigned long flags;
426 void ***queues;
427 int i;
428
Eric Dumazet59af5b872017-08-16 10:36:47 -0700429 queues = kmalloc_array(nrings, sizeof(*queues), gfp);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800430 if (!queues)
431 goto noqueues;
432
433 for (i = 0; i < nrings; ++i) {
434 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
435 if (!queues[i])
436 goto nomem;
437 }
438
439 for (i = 0; i < nrings; ++i) {
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200440 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
441 spin_lock(&(rings[i])->producer_lock);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800442 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
443 size, gfp, destroy);
Michael S. Tsirkin7c560122017-02-19 07:17:17 +0200444 spin_unlock(&(rings[i])->producer_lock);
445 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800446 }
447
448 for (i = 0; i < nrings; ++i)
449 kfree(queues[i]);
450
451 kfree(queues);
452
453 return 0;
454
455nomem:
456 while (--i >= 0)
457 kfree(queues[i]);
458
459 kfree(queues);
460
461noqueues:
462 return -ENOMEM;
463}
464
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300465static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
466{
467 void *ptr;
468
469 if (destroy)
470 while ((ptr = ptr_ring_consume(r)))
471 destroy(ptr);
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300472 kfree(r->queue);
473}
474
475#endif /* _LINUX_PTR_RING_H */