blob: cb8e5be351104ce35393ffc5f1dff0ec8c306751 [file] [log] [blame]
Björn Töpeldac09142018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
Magnus Karlsson423f3832018-05-02 13:01:24 +02003 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
11
12#include "xdp_umem_props.h"
13
Björn Töpelc4971762018-05-02 13:01:27 +020014#define RX_BATCH_SIZE 16
15
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020016struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
19};
20
21/* Used for the RX and TX queues for packets */
22struct xdp_rxtx_ring {
23 struct xdp_ring ptrs;
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
25};
26
27/* Used for the fill and completion queues for buffers */
28struct xdp_umem_ring {
29 struct xdp_ring ptrs;
30 u32 desc[0] ____cacheline_aligned_in_smp;
31};
32
Magnus Karlsson423f3832018-05-02 13:01:24 +020033struct xsk_queue {
34 struct xdp_umem_props umem_props;
35 u32 ring_mask;
36 u32 nentries;
37 u32 prod_head;
38 u32 prod_tail;
39 u32 cons_head;
40 u32 cons_tail;
41 struct xdp_ring *ring;
42 u64 invalid_descs;
43};
44
Björn Töpelc4971762018-05-02 13:01:27 +020045/* Common functions operating for both RXTX and umem queues */
46
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020047static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48{
49 return q ? q->invalid_descs : 0;
50}
51
Björn Töpelc4971762018-05-02 13:01:27 +020052static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53{
54 u32 entries = q->prod_tail - q->cons_tail;
55
56 if (entries == 0) {
57 /* Refresh the local pointer */
58 q->prod_tail = READ_ONCE(q->ring->producer);
59 entries = q->prod_tail - q->cons_tail;
60 }
61
62 return (entries > dcnt) ? dcnt : entries;
63}
64
65static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
66{
67 u32 free_entries = q->nentries - (producer - q->cons_tail);
68
69 if (free_entries >= dcnt)
70 return free_entries;
71
72 /* Refresh the local tail pointer */
73 q->cons_tail = READ_ONCE(q->ring->consumer);
74 return q->nentries - (producer - q->cons_tail);
75}
76
77/* UMEM queue */
78
79static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
80{
81 if (unlikely(idx >= q->umem_props.nframes)) {
82 q->invalid_descs++;
83 return false;
84 }
85 return true;
86}
87
88static inline u32 *xskq_validate_id(struct xsk_queue *q)
89{
90 while (q->cons_tail != q->cons_head) {
91 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
92 unsigned int idx = q->cons_tail & q->ring_mask;
93
94 if (xskq_is_valid_id(q, ring->desc[idx]))
95 return &ring->desc[idx];
96
97 q->cons_tail++;
98 }
99
100 return NULL;
101}
102
103static inline u32 *xskq_peek_id(struct xsk_queue *q)
104{
105 struct xdp_umem_ring *ring;
106
107 if (q->cons_tail == q->cons_head) {
108 WRITE_ONCE(q->ring->consumer, q->cons_tail);
109 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
110
111 /* Order consumer and data */
112 smp_rmb();
113
114 return xskq_validate_id(q);
115 }
116
117 ring = (struct xdp_umem_ring *)q->ring;
118 return &ring->desc[q->cons_tail & q->ring_mask];
119}
120
121static inline void xskq_discard_id(struct xsk_queue *q)
122{
123 q->cons_tail++;
124 (void)xskq_validate_id(q);
125}
126
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200127static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
128{
129 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
130
131 ring->desc[q->prod_tail++ & q->ring_mask] = id;
132
133 /* Order producer and data */
134 smp_wmb();
135
136 WRITE_ONCE(q->ring->producer, q->prod_tail);
137 return 0;
138}
139
140static inline int xskq_reserve_id(struct xsk_queue *q)
141{
142 if (xskq_nb_free(q, q->prod_head, 1) == 0)
143 return -ENOSPC;
144
145 q->prod_head++;
146 return 0;
147}
148
149/* Rx/Tx queue */
150
151static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
152{
153 u32 buff_len;
154
155 if (unlikely(d->idx >= q->umem_props.nframes)) {
156 q->invalid_descs++;
157 return false;
158 }
159
160 buff_len = q->umem_props.frame_size;
161 if (unlikely(d->len > buff_len || d->len == 0 ||
162 d->offset > buff_len || d->offset + d->len > buff_len)) {
163 q->invalid_descs++;
164 return false;
165 }
166
167 return true;
168}
169
170static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
171 struct xdp_desc *desc)
172{
173 while (q->cons_tail != q->cons_head) {
174 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
175 unsigned int idx = q->cons_tail & q->ring_mask;
176
177 if (xskq_is_valid_desc(q, &ring->desc[idx])) {
178 if (desc)
179 *desc = ring->desc[idx];
180 return desc;
181 }
182
183 q->cons_tail++;
184 }
185
186 return NULL;
187}
188
189static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
190 struct xdp_desc *desc)
191{
192 struct xdp_rxtx_ring *ring;
193
194 if (q->cons_tail == q->cons_head) {
195 WRITE_ONCE(q->ring->consumer, q->cons_tail);
196 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
197
198 /* Order consumer and data */
199 smp_rmb();
200
201 return xskq_validate_desc(q, desc);
202 }
203
204 ring = (struct xdp_rxtx_ring *)q->ring;
205 *desc = ring->desc[q->cons_tail & q->ring_mask];
206 return desc;
207}
208
209static inline void xskq_discard_desc(struct xsk_queue *q)
210{
211 q->cons_tail++;
212 (void)xskq_validate_desc(q, NULL);
213}
Björn Töpelc4971762018-05-02 13:01:27 +0200214
215static inline int xskq_produce_batch_desc(struct xsk_queue *q,
216 u32 id, u32 len, u16 offset)
217{
218 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
219 unsigned int idx;
220
221 if (xskq_nb_free(q, q->prod_head, 1) == 0)
222 return -ENOSPC;
223
224 idx = (q->prod_head++) & q->ring_mask;
225 ring->desc[idx].idx = id;
226 ring->desc[idx].len = len;
227 ring->desc[idx].offset = offset;
228
229 return 0;
230}
231
232static inline void xskq_produce_flush_desc(struct xsk_queue *q)
233{
234 /* Order producer and data */
235 smp_wmb();
236
237 q->prod_tail = q->prod_head,
238 WRITE_ONCE(q->ring->producer, q->prod_tail);
239}
240
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200241static inline bool xskq_full_desc(struct xsk_queue *q)
242{
Björn Töpelda60cf02018-05-18 14:00:23 +0200243 return xskq_nb_avail(q, q->nentries) == q->nentries;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200244}
245
Björn Töpelc4971762018-05-02 13:01:27 +0200246static inline bool xskq_empty_desc(struct xsk_queue *q)
247{
Björn Töpelda60cf02018-05-18 14:00:23 +0200248 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
Björn Töpelc4971762018-05-02 13:01:27 +0200249}
250
Magnus Karlsson965a9902018-05-02 13:01:26 +0200251void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200252struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200253void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200254
255#endif /* _LINUX_XSK_QUEUE_H */