blob: 35226cd4efb0f32afcf38cc4a97441e9524424f0 [file] [log] [blame]
Michael S. Tsirkinad69f352016-06-13 23:54:41 +03001/*
2 * Definitions for the 'struct skb_array' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * Limited-size FIFO of skbs. Can be used more or less whenever
15 * sk_buff_head can be used, except you need to know the queue size in
16 * advance.
17 * Implemented as a type-safe wrapper around ptr_ring.
18 */
19
20#ifndef _LINUX_SKB_ARRAY_H
21#define _LINUX_SKB_ARRAY_H 1
22
23#ifdef __KERNEL__
24#include <linux/ptr_ring.h>
25#include <linux/skbuff.h>
26#include <linux/if_vlan.h>
27#endif
28
29struct skb_array {
30 struct ptr_ring ring;
31};
32
33/* Might be slightly faster than skb_array_full below, but callers invoking
34 * this in a loop must use a compiler barrier, for example cpu_relax().
35 */
36static inline bool __skb_array_full(struct skb_array *a)
37{
38 return __ptr_ring_full(&a->ring);
39}
40
41static inline bool skb_array_full(struct skb_array *a)
42{
43 return ptr_ring_full(&a->ring);
44}
45
46static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
47{
48 return ptr_ring_produce(&a->ring, skb);
49}
50
51static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
52{
53 return ptr_ring_produce_irq(&a->ring, skb);
54}
55
56static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
57{
58 return ptr_ring_produce_bh(&a->ring, skb);
59}
60
61static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
62{
63 return ptr_ring_produce_any(&a->ring, skb);
64}
65
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +030066/* Might be slightly faster than skb_array_empty below, but only safe if the
67 * array is never resized. Also, callers invoking this in a loop must take care
68 * to use a compiler barrier, for example cpu_relax().
Michael S. Tsirkinad69f352016-06-13 23:54:41 +030069 */
70static inline bool __skb_array_empty(struct skb_array *a)
71{
72 return !__ptr_ring_peek(&a->ring);
73}
74
75static inline bool skb_array_empty(struct skb_array *a)
76{
77 return ptr_ring_empty(&a->ring);
78}
79
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +030080static inline bool skb_array_empty_bh(struct skb_array *a)
81{
82 return ptr_ring_empty_bh(&a->ring);
83}
84
85static inline bool skb_array_empty_irq(struct skb_array *a)
86{
87 return ptr_ring_empty_irq(&a->ring);
88}
89
90static inline bool skb_array_empty_any(struct skb_array *a)
91{
92 return ptr_ring_empty_any(&a->ring);
93}
94
Michael S. Tsirkinad69f352016-06-13 23:54:41 +030095static inline struct sk_buff *skb_array_consume(struct skb_array *a)
96{
97 return ptr_ring_consume(&a->ring);
98}
99
Jason Wang3528c1a2017-05-17 12:14:40 +0800100static inline int skb_array_consume_batched(struct skb_array *a,
101 struct sk_buff **array, int n)
102{
103 return ptr_ring_consume_batched(&a->ring, (void **)array, n);
104}
105
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300106static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
107{
108 return ptr_ring_consume_irq(&a->ring);
109}
110
Jason Wang3528c1a2017-05-17 12:14:40 +0800111static inline int skb_array_consume_batched_irq(struct skb_array *a,
112 struct sk_buff **array, int n)
113{
114 return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
115}
116
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300117static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
118{
119 return ptr_ring_consume_any(&a->ring);
120}
121
Jason Wang3528c1a2017-05-17 12:14:40 +0800122static inline int skb_array_consume_batched_any(struct skb_array *a,
123 struct sk_buff **array, int n)
124{
125 return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
126}
127
128
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300129static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
130{
131 return ptr_ring_consume_bh(&a->ring);
132}
133
Jason Wang3528c1a2017-05-17 12:14:40 +0800134static inline int skb_array_consume_batched_bh(struct skb_array *a,
135 struct sk_buff **array, int n)
136{
137 return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
138}
139
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300140static inline int __skb_array_len_with_tag(struct sk_buff *skb)
141{
142 if (likely(skb)) {
143 int len = skb->len;
144
145 if (skb_vlan_tag_present(skb))
146 len += VLAN_HLEN;
147
148 return len;
149 } else {
150 return 0;
151 }
152}
153
154static inline int skb_array_peek_len(struct skb_array *a)
155{
156 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
157}
158
159static inline int skb_array_peek_len_irq(struct skb_array *a)
160{
161 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
162}
163
164static inline int skb_array_peek_len_bh(struct skb_array *a)
165{
166 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
167}
168
169static inline int skb_array_peek_len_any(struct skb_array *a)
170{
171 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
172}
173
174static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
175{
176 return ptr_ring_init(&a->ring, size, gfp);
177}
178
Jason Wangfd68ade2016-06-30 14:45:32 +0800179static void __skb_array_destroy_skb(void *ptr)
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300180{
181 kfree_skb(ptr);
182}
183
Jason Wang3acb6962017-05-17 12:14:38 +0800184static inline void skb_array_unconsume(struct skb_array *a,
185 struct sk_buff **skbs, int n)
186{
187 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
188}
189
Jason Wangfd68ade2016-06-30 14:45:32 +0800190static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300191{
192 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
193}
194
Jason Wangbf900b32016-06-30 14:45:34 +0800195static inline int skb_array_resize_multiple(struct skb_array **rings,
196 int nrings, int size, gfp_t gfp)
197{
198 BUILD_BUG_ON(offsetof(struct skb_array, ring));
199 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
200 nrings, size, gfp,
201 __skb_array_destroy_skb);
202}
203
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300204static inline void skb_array_cleanup(struct skb_array *a)
205{
Michael S. Tsirkin7d7072e2016-06-13 23:54:50 +0300206 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
Michael S. Tsirkinad69f352016-06-13 23:54:41 +0300207}
208
209#endif /* _LINUX_SKB_ARRAY_H */