blob: a6b6e8bb3d7b8a7b37577d6376d10ea102298a99 [file] [log] [blame]
Michael S. Tsirkinad69f352016-06-13 23:54:41 +03001/*
2 * Definitions for the 'struct skb_array' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * Limited-size FIFO of skbs. Can be used more or less whenever
15 * sk_buff_head can be used, except you need to know the queue size in
16 * advance.
17 * Implemented as a type-safe wrapper around ptr_ring.
18 */
19
20#ifndef _LINUX_SKB_ARRAY_H
21#define _LINUX_SKB_ARRAY_H 1
22
23#ifdef __KERNEL__
24#include <linux/ptr_ring.h>
25#include <linux/skbuff.h>
26#include <linux/if_vlan.h>
27#endif
28
29struct skb_array {
30 struct ptr_ring ring;
31};
32
33/* Might be slightly faster than skb_array_full below, but callers invoking
34 * this in a loop must use a compiler barrier, for example cpu_relax().
35 */
36static inline bool __skb_array_full(struct skb_array *a)
37{
38 return __ptr_ring_full(&a->ring);
39}
40
41static inline bool skb_array_full(struct skb_array *a)
42{
43 return ptr_ring_full(&a->ring);
44}
45
46static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
47{
48 return ptr_ring_produce(&a->ring, skb);
49}
50
51static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
52{
53 return ptr_ring_produce_irq(&a->ring, skb);
54}
55
56static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
57{
58 return ptr_ring_produce_bh(&a->ring, skb);
59}
60
61static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
62{
63 return ptr_ring_produce_any(&a->ring, skb);
64}
65
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +030066/* Might be slightly faster than skb_array_empty below, but only safe if the
67 * array is never resized. Also, callers invoking this in a loop must take care
68 * to use a compiler barrier, for example cpu_relax().
Michael S. Tsirkinad69f352016-06-13 23:54:41 +030069 */
70static inline bool __skb_array_empty(struct skb_array *a)
71{
Michael S. Tsirkinf417dc22018-01-26 01:36:36 +020072 return __ptr_ring_empty(&a->ring);
Michael S. Tsirkinad69f352016-06-13 23:54:41 +030073}
74
John Fastabend4a86a4c2017-12-07 09:57:59 -080075static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
76{
77 return __ptr_ring_peek(&a->ring);
78}
79
Michael S. Tsirkinad69f352016-06-13 23:54:41 +030080static inline bool skb_array_empty(struct skb_array *a)
81{
82 return ptr_ring_empty(&a->ring);
83}
84
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +030085static inline bool skb_array_empty_bh(struct skb_array *a)
86{
87 return ptr_ring_empty_bh(&a->ring);
88}
89
90static inline bool skb_array_empty_irq(struct skb_array *a)
91{
92 return ptr_ring_empty_irq(&a->ring);
93}
94
95static inline bool skb_array_empty_any(struct skb_array *a)
96{
97 return ptr_ring_empty_any(&a->ring);
98}
99
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300100static inline struct sk_buff *skb_array_consume(struct skb_array *a)
101{
102 return ptr_ring_consume(&a->ring);
103}
104
Jason Wang3528c1a2017-05-17 12:14:40 +0800105static inline int skb_array_consume_batched(struct skb_array *a,
106 struct sk_buff **array, int n)
107{
108 return ptr_ring_consume_batched(&a->ring, (void **)array, n);
109}
110
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300111static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
112{
113 return ptr_ring_consume_irq(&a->ring);
114}
115
Jason Wang3528c1a2017-05-17 12:14:40 +0800116static inline int skb_array_consume_batched_irq(struct skb_array *a,
117 struct sk_buff **array, int n)
118{
119 return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
120}
121
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300122static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
123{
124 return ptr_ring_consume_any(&a->ring);
125}
126
Jason Wang3528c1a2017-05-17 12:14:40 +0800127static inline int skb_array_consume_batched_any(struct skb_array *a,
128 struct sk_buff **array, int n)
129{
130 return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
131}
132
133
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300134static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
135{
136 return ptr_ring_consume_bh(&a->ring);
137}
138
Jason Wang3528c1a2017-05-17 12:14:40 +0800139static inline int skb_array_consume_batched_bh(struct skb_array *a,
140 struct sk_buff **array, int n)
141{
142 return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
143}
144
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300145static inline int __skb_array_len_with_tag(struct sk_buff *skb)
146{
147 if (likely(skb)) {
148 int len = skb->len;
149
150 if (skb_vlan_tag_present(skb))
151 len += VLAN_HLEN;
152
153 return len;
154 } else {
155 return 0;
156 }
157}
158
159static inline int skb_array_peek_len(struct skb_array *a)
160{
161 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
162}
163
164static inline int skb_array_peek_len_irq(struct skb_array *a)
165{
166 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
167}
168
169static inline int skb_array_peek_len_bh(struct skb_array *a)
170{
171 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
172}
173
174static inline int skb_array_peek_len_any(struct skb_array *a)
175{
176 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
177}
178
179static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
180{
181 return ptr_ring_init(&a->ring, size, gfp);
182}
183
Jason Wangfd68ade2016-06-30 14:45:32 +0800184static void __skb_array_destroy_skb(void *ptr)
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300185{
186 kfree_skb(ptr);
187}
188
Jason Wang3acb6962017-05-17 12:14:38 +0800189static inline void skb_array_unconsume(struct skb_array *a,
190 struct sk_buff **skbs, int n)
191{
192 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
193}
194
Jason Wangfd68ade2016-06-30 14:45:32 +0800195static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300196{
197 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
198}
199
Jason Wangbf900b32016-06-30 14:45:34 +0800200static inline int skb_array_resize_multiple(struct skb_array **rings,
Eric Dumazet81fbfe82017-08-16 10:36:47 -0700201 int nrings, unsigned int size,
202 gfp_t gfp)
Jason Wangbf900b32016-06-30 14:45:34 +0800203{
204 BUILD_BUG_ON(offsetof(struct skb_array, ring));
205 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
206 nrings, size, gfp,
207 __skb_array_destroy_skb);
208}
209
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300210static inline void skb_array_cleanup(struct skb_array *a)
211{
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300212 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300213}
214
215#endif /* _LINUX_SKB_ARRAY_H */