blob: ef6a6f0ec949049de2fc03d1a675ee0c1f48ba5e [file] [log] [blame]
Björn Töpeldac09142018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
Magnus Karlsson423f3832018-05-02 13:01:24 +02003 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
Björn Töpele61e62b2018-06-04 14:05:51 +020011#include <net/xdp_sock.h>
Magnus Karlsson423f3832018-05-02 13:01:24 +020012
Björn Töpelc4971762018-05-02 13:01:27 +020013#define RX_BATCH_SIZE 16
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020014#define LAZY_UPDATE_THRESHOLD 128
Björn Töpelc4971762018-05-02 13:01:27 +020015
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020016struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
19};
20
21/* Used for the RX and TX queues for packets */
22struct xdp_rxtx_ring {
23 struct xdp_ring ptrs;
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
25};
26
27/* Used for the fill and completion queues for buffers */
28struct xdp_umem_ring {
29 struct xdp_ring ptrs;
Björn Töpelbbff2f32018-06-04 13:57:13 +020030 u64 desc[0] ____cacheline_aligned_in_smp;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020031};
32
Magnus Karlsson423f3832018-05-02 13:01:24 +020033struct xsk_queue {
34 struct xdp_umem_props umem_props;
35 u32 ring_mask;
36 u32 nentries;
37 u32 prod_head;
38 u32 prod_tail;
39 u32 cons_head;
40 u32 cons_tail;
41 struct xdp_ring *ring;
42 u64 invalid_descs;
43};
44
Björn Töpelc4971762018-05-02 13:01:27 +020045/* Common functions operating for both RXTX and umem queues */
46
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020047static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48{
49 return q ? q->invalid_descs : 0;
50}
51
Björn Töpelc4971762018-05-02 13:01:27 +020052static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53{
54 u32 entries = q->prod_tail - q->cons_tail;
55
56 if (entries == 0) {
57 /* Refresh the local pointer */
58 q->prod_tail = READ_ONCE(q->ring->producer);
59 entries = q->prod_tail - q->cons_tail;
60 }
61
62 return (entries > dcnt) ? dcnt : entries;
63}
64
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020065static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
66{
67 return q->nentries - (producer - q->cons_tail);
68}
69
Björn Töpelc4971762018-05-02 13:01:27 +020070static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
71{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020072 u32 free_entries = xskq_nb_free_lazy(q, producer);
Björn Töpelc4971762018-05-02 13:01:27 +020073
74 if (free_entries >= dcnt)
75 return free_entries;
76
77 /* Refresh the local tail pointer */
78 q->cons_tail = READ_ONCE(q->ring->consumer);
79 return q->nentries - (producer - q->cons_tail);
80}
81
82/* UMEM queue */
83
Björn Töpelbbff2f32018-06-04 13:57:13 +020084static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
Björn Töpelc4971762018-05-02 13:01:27 +020085{
Björn Töpelbbff2f32018-06-04 13:57:13 +020086 if (addr >= q->umem_props.size) {
Björn Töpelc4971762018-05-02 13:01:27 +020087 q->invalid_descs++;
88 return false;
89 }
Björn Töpelbbff2f32018-06-04 13:57:13 +020090
Björn Töpelc4971762018-05-02 13:01:27 +020091 return true;
92}
93
Björn Töpelbbff2f32018-06-04 13:57:13 +020094static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020095{
96 while (q->cons_tail != q->cons_head) {
97 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
98 unsigned int idx = q->cons_tail & q->ring_mask;
99
Björn Töpelbbff2f32018-06-04 13:57:13 +0200100 *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
101 if (xskq_is_valid_addr(q, *addr))
102 return addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200103
104 q->cons_tail++;
105 }
106
107 return NULL;
108}
109
Björn Töpelbbff2f32018-06-04 13:57:13 +0200110static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +0200111{
Björn Töpelc4971762018-05-02 13:01:27 +0200112 if (q->cons_tail == q->cons_head) {
113 WRITE_ONCE(q->ring->consumer, q->cons_tail);
114 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
115
116 /* Order consumer and data */
117 smp_rmb();
Björn Töpelc4971762018-05-02 13:01:27 +0200118 }
119
Björn Töpelbbff2f32018-06-04 13:57:13 +0200120 return xskq_validate_addr(q, addr);
Björn Töpelc4971762018-05-02 13:01:27 +0200121}
122
Björn Töpelbbff2f32018-06-04 13:57:13 +0200123static inline void xskq_discard_addr(struct xsk_queue *q)
Björn Töpelc4971762018-05-02 13:01:27 +0200124{
125 q->cons_tail++;
Björn Töpelc4971762018-05-02 13:01:27 +0200126}
127
Björn Töpelbbff2f32018-06-04 13:57:13 +0200128static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200129{
130 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
131
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200132 if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
133 return -ENOSPC;
134
Björn Töpelbbff2f32018-06-04 13:57:13 +0200135 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200136
137 /* Order producer and data */
138 smp_wmb();
139
140 WRITE_ONCE(q->ring->producer, q->prod_tail);
141 return 0;
142}
143
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200144static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
145{
146 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
147
148 if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
149 return -ENOSPC;
150
151 ring->desc[q->prod_head++ & q->ring_mask] = addr;
152 return 0;
153}
154
155static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
156 u32 nb_entries)
157{
158 /* Order producer and data */
159 smp_wmb();
160
161 q->prod_tail += nb_entries;
162 WRITE_ONCE(q->ring->producer, q->prod_tail);
163}
164
Björn Töpelbbff2f32018-06-04 13:57:13 +0200165static inline int xskq_reserve_addr(struct xsk_queue *q)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200166{
167 if (xskq_nb_free(q, q->prod_head, 1) == 0)
168 return -ENOSPC;
169
170 q->prod_head++;
171 return 0;
172}
173
174/* Rx/Tx queue */
175
176static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
177{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200178 if (!xskq_is_valid_addr(q, d->addr))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200179 return false;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200180
Björn Töpelbbff2f32018-06-04 13:57:13 +0200181 if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
182 (d->addr & q->umem_props.chunk_mask)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200183 q->invalid_descs++;
184 return false;
185 }
186
187 return true;
188}
189
190static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
191 struct xdp_desc *desc)
192{
193 while (q->cons_tail != q->cons_head) {
194 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
195 unsigned int idx = q->cons_tail & q->ring_mask;
196
Björn Töpel4e64c832018-06-04 13:57:11 +0200197 *desc = READ_ONCE(ring->desc[idx]);
198 if (xskq_is_valid_desc(q, desc))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200199 return desc;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200200
201 q->cons_tail++;
202 }
203
204 return NULL;
205}
206
207static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
208 struct xdp_desc *desc)
209{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200210 if (q->cons_tail == q->cons_head) {
211 WRITE_ONCE(q->ring->consumer, q->cons_tail);
212 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
213
214 /* Order consumer and data */
215 smp_rmb();
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200216 }
217
Björn Töpel4e64c832018-06-04 13:57:11 +0200218 return xskq_validate_desc(q, desc);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200219}
220
221static inline void xskq_discard_desc(struct xsk_queue *q)
222{
223 q->cons_tail++;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200224}
Björn Töpelc4971762018-05-02 13:01:27 +0200225
226static inline int xskq_produce_batch_desc(struct xsk_queue *q,
Björn Töpelbbff2f32018-06-04 13:57:13 +0200227 u64 addr, u32 len)
Björn Töpelc4971762018-05-02 13:01:27 +0200228{
229 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
230 unsigned int idx;
231
232 if (xskq_nb_free(q, q->prod_head, 1) == 0)
233 return -ENOSPC;
234
235 idx = (q->prod_head++) & q->ring_mask;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200236 ring->desc[idx].addr = addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200237 ring->desc[idx].len = len;
Björn Töpelc4971762018-05-02 13:01:27 +0200238
239 return 0;
240}
241
242static inline void xskq_produce_flush_desc(struct xsk_queue *q)
243{
244 /* Order producer and data */
245 smp_wmb();
246
247 q->prod_tail = q->prod_head,
248 WRITE_ONCE(q->ring->producer, q->prod_tail);
249}
250
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200251static inline bool xskq_full_desc(struct xsk_queue *q)
252{
Björn Töpelda60cf02018-05-18 14:00:23 +0200253 return xskq_nb_avail(q, q->nentries) == q->nentries;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200254}
255
Björn Töpelc4971762018-05-02 13:01:27 +0200256static inline bool xskq_empty_desc(struct xsk_queue *q)
257{
Björn Töpelda60cf02018-05-18 14:00:23 +0200258 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
Björn Töpelc4971762018-05-02 13:01:27 +0200259}
260
Magnus Karlsson965a9902018-05-02 13:01:26 +0200261void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200262struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200263void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200264
265#endif /* _LINUX_XSK_QUEUE_H */