blob: 3497e8808608f5d4e8d9072bcc24fcb7759d92bc [file] [log] [blame]
Magnus Karlsson423f3832018-05-02 13:01:24 +02001/* SPDX-License-Identifier: GPL-2.0
2 * XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _LINUX_XSK_QUEUE_H
16#define _LINUX_XSK_QUEUE_H
17
18#include <linux/types.h>
19#include <linux/if_xdp.h>
20
21#include "xdp_umem_props.h"
22
Björn Töpelc4971762018-05-02 13:01:27 +020023#define RX_BATCH_SIZE 16
24
Magnus Karlsson423f3832018-05-02 13:01:24 +020025struct xsk_queue {
26 struct xdp_umem_props umem_props;
27 u32 ring_mask;
28 u32 nentries;
29 u32 prod_head;
30 u32 prod_tail;
31 u32 cons_head;
32 u32 cons_tail;
33 struct xdp_ring *ring;
34 u64 invalid_descs;
35};
36
Björn Töpelc4971762018-05-02 13:01:27 +020037/* Common functions operating for both RXTX and umem queues */
38
39static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
40{
41 u32 entries = q->prod_tail - q->cons_tail;
42
43 if (entries == 0) {
44 /* Refresh the local pointer */
45 q->prod_tail = READ_ONCE(q->ring->producer);
46 entries = q->prod_tail - q->cons_tail;
47 }
48
49 return (entries > dcnt) ? dcnt : entries;
50}
51
52static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
53{
54 u32 free_entries = q->nentries - (producer - q->cons_tail);
55
56 if (free_entries >= dcnt)
57 return free_entries;
58
59 /* Refresh the local tail pointer */
60 q->cons_tail = READ_ONCE(q->ring->consumer);
61 return q->nentries - (producer - q->cons_tail);
62}
63
64/* UMEM queue */
65
66static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
67{
68 if (unlikely(idx >= q->umem_props.nframes)) {
69 q->invalid_descs++;
70 return false;
71 }
72 return true;
73}
74
75static inline u32 *xskq_validate_id(struct xsk_queue *q)
76{
77 while (q->cons_tail != q->cons_head) {
78 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
79 unsigned int idx = q->cons_tail & q->ring_mask;
80
81 if (xskq_is_valid_id(q, ring->desc[idx]))
82 return &ring->desc[idx];
83
84 q->cons_tail++;
85 }
86
87 return NULL;
88}
89
90static inline u32 *xskq_peek_id(struct xsk_queue *q)
91{
92 struct xdp_umem_ring *ring;
93
94 if (q->cons_tail == q->cons_head) {
95 WRITE_ONCE(q->ring->consumer, q->cons_tail);
96 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
97
98 /* Order consumer and data */
99 smp_rmb();
100
101 return xskq_validate_id(q);
102 }
103
104 ring = (struct xdp_umem_ring *)q->ring;
105 return &ring->desc[q->cons_tail & q->ring_mask];
106}
107
108static inline void xskq_discard_id(struct xsk_queue *q)
109{
110 q->cons_tail++;
111 (void)xskq_validate_id(q);
112}
113
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200114static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
115{
116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
117
118 ring->desc[q->prod_tail++ & q->ring_mask] = id;
119
120 /* Order producer and data */
121 smp_wmb();
122
123 WRITE_ONCE(q->ring->producer, q->prod_tail);
124 return 0;
125}
126
127static inline int xskq_reserve_id(struct xsk_queue *q)
128{
129 if (xskq_nb_free(q, q->prod_head, 1) == 0)
130 return -ENOSPC;
131
132 q->prod_head++;
133 return 0;
134}
135
136/* Rx/Tx queue */
137
138static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
139{
140 u32 buff_len;
141
142 if (unlikely(d->idx >= q->umem_props.nframes)) {
143 q->invalid_descs++;
144 return false;
145 }
146
147 buff_len = q->umem_props.frame_size;
148 if (unlikely(d->len > buff_len || d->len == 0 ||
149 d->offset > buff_len || d->offset + d->len > buff_len)) {
150 q->invalid_descs++;
151 return false;
152 }
153
154 return true;
155}
156
157static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
158 struct xdp_desc *desc)
159{
160 while (q->cons_tail != q->cons_head) {
161 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
162 unsigned int idx = q->cons_tail & q->ring_mask;
163
164 if (xskq_is_valid_desc(q, &ring->desc[idx])) {
165 if (desc)
166 *desc = ring->desc[idx];
167 return desc;
168 }
169
170 q->cons_tail++;
171 }
172
173 return NULL;
174}
175
176static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
177 struct xdp_desc *desc)
178{
179 struct xdp_rxtx_ring *ring;
180
181 if (q->cons_tail == q->cons_head) {
182 WRITE_ONCE(q->ring->consumer, q->cons_tail);
183 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
184
185 /* Order consumer and data */
186 smp_rmb();
187
188 return xskq_validate_desc(q, desc);
189 }
190
191 ring = (struct xdp_rxtx_ring *)q->ring;
192 *desc = ring->desc[q->cons_tail & q->ring_mask];
193 return desc;
194}
195
196static inline void xskq_discard_desc(struct xsk_queue *q)
197{
198 q->cons_tail++;
199 (void)xskq_validate_desc(q, NULL);
200}
Björn Töpelc4971762018-05-02 13:01:27 +0200201
202static inline int xskq_produce_batch_desc(struct xsk_queue *q,
203 u32 id, u32 len, u16 offset)
204{
205 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
206 unsigned int idx;
207
208 if (xskq_nb_free(q, q->prod_head, 1) == 0)
209 return -ENOSPC;
210
211 idx = (q->prod_head++) & q->ring_mask;
212 ring->desc[idx].idx = id;
213 ring->desc[idx].len = len;
214 ring->desc[idx].offset = offset;
215
216 return 0;
217}
218
219static inline void xskq_produce_flush_desc(struct xsk_queue *q)
220{
221 /* Order producer and data */
222 smp_wmb();
223
224 q->prod_tail = q->prod_head,
225 WRITE_ONCE(q->ring->producer, q->prod_tail);
226}
227
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200228static inline bool xskq_full_desc(struct xsk_queue *q)
229{
230 return (xskq_nb_avail(q, q->nentries) == q->nentries);
231}
232
Björn Töpelc4971762018-05-02 13:01:27 +0200233static inline bool xskq_empty_desc(struct xsk_queue *q)
234{
235 return (xskq_nb_free(q, q->prod_tail, 1) == q->nentries);
236}
237
Magnus Karlsson965a9902018-05-02 13:01:26 +0200238void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200239struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200240void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200241
242#endif /* _LINUX_XSK_QUEUE_H */