blob: 7aa9a535db0e95d59bba65e46cccba470351d922 [file] [log] [blame]
Magnus Karlsson423f3832018-05-02 13:01:24 +02001/* SPDX-License-Identifier: GPL-2.0
2 * XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _LINUX_XSK_QUEUE_H
16#define _LINUX_XSK_QUEUE_H
17
18#include <linux/types.h>
19#include <linux/if_xdp.h>
20
21#include "xdp_umem_props.h"
22
Björn Töpelc4971762018-05-02 13:01:27 +020023#define RX_BATCH_SIZE 16
24
Magnus Karlsson423f3832018-05-02 13:01:24 +020025struct xsk_queue {
26 struct xdp_umem_props umem_props;
27 u32 ring_mask;
28 u32 nentries;
29 u32 prod_head;
30 u32 prod_tail;
31 u32 cons_head;
32 u32 cons_tail;
33 struct xdp_ring *ring;
34 u64 invalid_descs;
35};
36
Björn Töpelc4971762018-05-02 13:01:27 +020037/* Common functions operating for both RXTX and umem queues */
38
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020039static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
40{
41 return q ? q->invalid_descs : 0;
42}
43
Björn Töpelc4971762018-05-02 13:01:27 +020044static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
45{
46 u32 entries = q->prod_tail - q->cons_tail;
47
48 if (entries == 0) {
49 /* Refresh the local pointer */
50 q->prod_tail = READ_ONCE(q->ring->producer);
51 entries = q->prod_tail - q->cons_tail;
52 }
53
54 return (entries > dcnt) ? dcnt : entries;
55}
56
57static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
58{
59 u32 free_entries = q->nentries - (producer - q->cons_tail);
60
61 if (free_entries >= dcnt)
62 return free_entries;
63
64 /* Refresh the local tail pointer */
65 q->cons_tail = READ_ONCE(q->ring->consumer);
66 return q->nentries - (producer - q->cons_tail);
67}
68
69/* UMEM queue */
70
71static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
72{
73 if (unlikely(idx >= q->umem_props.nframes)) {
74 q->invalid_descs++;
75 return false;
76 }
77 return true;
78}
79
80static inline u32 *xskq_validate_id(struct xsk_queue *q)
81{
82 while (q->cons_tail != q->cons_head) {
83 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
84 unsigned int idx = q->cons_tail & q->ring_mask;
85
86 if (xskq_is_valid_id(q, ring->desc[idx]))
87 return &ring->desc[idx];
88
89 q->cons_tail++;
90 }
91
92 return NULL;
93}
94
95static inline u32 *xskq_peek_id(struct xsk_queue *q)
96{
97 struct xdp_umem_ring *ring;
98
99 if (q->cons_tail == q->cons_head) {
100 WRITE_ONCE(q->ring->consumer, q->cons_tail);
101 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
102
103 /* Order consumer and data */
104 smp_rmb();
105
106 return xskq_validate_id(q);
107 }
108
109 ring = (struct xdp_umem_ring *)q->ring;
110 return &ring->desc[q->cons_tail & q->ring_mask];
111}
112
113static inline void xskq_discard_id(struct xsk_queue *q)
114{
115 q->cons_tail++;
116 (void)xskq_validate_id(q);
117}
118
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200119static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
120{
121 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
122
123 ring->desc[q->prod_tail++ & q->ring_mask] = id;
124
125 /* Order producer and data */
126 smp_wmb();
127
128 WRITE_ONCE(q->ring->producer, q->prod_tail);
129 return 0;
130}
131
132static inline int xskq_reserve_id(struct xsk_queue *q)
133{
134 if (xskq_nb_free(q, q->prod_head, 1) == 0)
135 return -ENOSPC;
136
137 q->prod_head++;
138 return 0;
139}
140
141/* Rx/Tx queue */
142
143static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
144{
145 u32 buff_len;
146
147 if (unlikely(d->idx >= q->umem_props.nframes)) {
148 q->invalid_descs++;
149 return false;
150 }
151
152 buff_len = q->umem_props.frame_size;
153 if (unlikely(d->len > buff_len || d->len == 0 ||
154 d->offset > buff_len || d->offset + d->len > buff_len)) {
155 q->invalid_descs++;
156 return false;
157 }
158
159 return true;
160}
161
162static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
163 struct xdp_desc *desc)
164{
165 while (q->cons_tail != q->cons_head) {
166 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
167 unsigned int idx = q->cons_tail & q->ring_mask;
168
169 if (xskq_is_valid_desc(q, &ring->desc[idx])) {
170 if (desc)
171 *desc = ring->desc[idx];
172 return desc;
173 }
174
175 q->cons_tail++;
176 }
177
178 return NULL;
179}
180
181static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
182 struct xdp_desc *desc)
183{
184 struct xdp_rxtx_ring *ring;
185
186 if (q->cons_tail == q->cons_head) {
187 WRITE_ONCE(q->ring->consumer, q->cons_tail);
188 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
189
190 /* Order consumer and data */
191 smp_rmb();
192
193 return xskq_validate_desc(q, desc);
194 }
195
196 ring = (struct xdp_rxtx_ring *)q->ring;
197 *desc = ring->desc[q->cons_tail & q->ring_mask];
198 return desc;
199}
200
201static inline void xskq_discard_desc(struct xsk_queue *q)
202{
203 q->cons_tail++;
204 (void)xskq_validate_desc(q, NULL);
205}
Björn Töpelc4971762018-05-02 13:01:27 +0200206
207static inline int xskq_produce_batch_desc(struct xsk_queue *q,
208 u32 id, u32 len, u16 offset)
209{
210 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
211 unsigned int idx;
212
213 if (xskq_nb_free(q, q->prod_head, 1) == 0)
214 return -ENOSPC;
215
216 idx = (q->prod_head++) & q->ring_mask;
217 ring->desc[idx].idx = id;
218 ring->desc[idx].len = len;
219 ring->desc[idx].offset = offset;
220
221 return 0;
222}
223
224static inline void xskq_produce_flush_desc(struct xsk_queue *q)
225{
226 /* Order producer and data */
227 smp_wmb();
228
229 q->prod_tail = q->prod_head,
230 WRITE_ONCE(q->ring->producer, q->prod_tail);
231}
232
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200233static inline bool xskq_full_desc(struct xsk_queue *q)
234{
235 return (xskq_nb_avail(q, q->nentries) == q->nentries);
236}
237
Björn Töpelc4971762018-05-02 13:01:27 +0200238static inline bool xskq_empty_desc(struct xsk_queue *q)
239{
240 return (xskq_nb_free(q, q->prod_tail, 1) == q->nentries);
241}
242
Magnus Karlsson965a9902018-05-02 13:01:26 +0200243void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200244struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200245void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200246
247#endif /* _LINUX_XSK_QUEUE_H */