blob: 337e5ad3b10e9714716f730df7f753d402f56826 [file] [log] [blame]
Björn Töpeldac09142018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
Magnus Karlsson423f3832018-05-02 13:01:24 +02003 * Copyright(c) 2018 Intel Corporation.
Magnus Karlsson423f3832018-05-02 13:01:24 +02004 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
11
12#include "xdp_umem_props.h"
13
Björn Töpelc4971762018-05-02 13:01:27 +020014#define RX_BATCH_SIZE 16
15
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020016struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
19};
20
21/* Used for the RX and TX queues for packets */
22struct xdp_rxtx_ring {
23 struct xdp_ring ptrs;
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
25};
26
27/* Used for the fill and completion queues for buffers */
28struct xdp_umem_ring {
29 struct xdp_ring ptrs;
Björn Töpelbbff2f32018-06-04 13:57:13 +020030 u64 desc[0] ____cacheline_aligned_in_smp;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +020031};
32
Magnus Karlsson423f3832018-05-02 13:01:24 +020033struct xsk_queue {
34 struct xdp_umem_props umem_props;
35 u32 ring_mask;
36 u32 nentries;
37 u32 prod_head;
38 u32 prod_tail;
39 u32 cons_head;
40 u32 cons_tail;
41 struct xdp_ring *ring;
42 u64 invalid_descs;
43};
44
Björn Töpelc4971762018-05-02 13:01:27 +020045/* Common functions operating for both RXTX and umem queues */
46
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +020047static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48{
49 return q ? q->invalid_descs : 0;
50}
51
Björn Töpelc4971762018-05-02 13:01:27 +020052static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53{
54 u32 entries = q->prod_tail - q->cons_tail;
55
56 if (entries == 0) {
57 /* Refresh the local pointer */
58 q->prod_tail = READ_ONCE(q->ring->producer);
59 entries = q->prod_tail - q->cons_tail;
60 }
61
62 return (entries > dcnt) ? dcnt : entries;
63}
64
65static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
66{
67 u32 free_entries = q->nentries - (producer - q->cons_tail);
68
69 if (free_entries >= dcnt)
70 return free_entries;
71
72 /* Refresh the local tail pointer */
73 q->cons_tail = READ_ONCE(q->ring->consumer);
74 return q->nentries - (producer - q->cons_tail);
75}
76
77/* UMEM queue */
78
Björn Töpelbbff2f32018-06-04 13:57:13 +020079static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
Björn Töpelc4971762018-05-02 13:01:27 +020080{
Björn Töpelbbff2f32018-06-04 13:57:13 +020081 if (addr >= q->umem_props.size) {
Björn Töpelc4971762018-05-02 13:01:27 +020082 q->invalid_descs++;
83 return false;
84 }
Björn Töpelbbff2f32018-06-04 13:57:13 +020085
Björn Töpelc4971762018-05-02 13:01:27 +020086 return true;
87}
88
Björn Töpelbbff2f32018-06-04 13:57:13 +020089static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020090{
91 while (q->cons_tail != q->cons_head) {
92 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
93 unsigned int idx = q->cons_tail & q->ring_mask;
94
Björn Töpelbbff2f32018-06-04 13:57:13 +020095 *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
96 if (xskq_is_valid_addr(q, *addr))
97 return addr;
Björn Töpelc4971762018-05-02 13:01:27 +020098
99 q->cons_tail++;
100 }
101
102 return NULL;
103}
104
Björn Töpelbbff2f32018-06-04 13:57:13 +0200105static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +0200106{
Björn Töpelc4971762018-05-02 13:01:27 +0200107 if (q->cons_tail == q->cons_head) {
108 WRITE_ONCE(q->ring->consumer, q->cons_tail);
109 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
110
111 /* Order consumer and data */
112 smp_rmb();
Björn Töpelc4971762018-05-02 13:01:27 +0200113 }
114
Björn Töpelbbff2f32018-06-04 13:57:13 +0200115 return xskq_validate_addr(q, addr);
Björn Töpelc4971762018-05-02 13:01:27 +0200116}
117
Björn Töpelbbff2f32018-06-04 13:57:13 +0200118static inline void xskq_discard_addr(struct xsk_queue *q)
Björn Töpelc4971762018-05-02 13:01:27 +0200119{
120 q->cons_tail++;
Björn Töpelc4971762018-05-02 13:01:27 +0200121}
122
Björn Töpelbbff2f32018-06-04 13:57:13 +0200123static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200124{
125 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
126
Björn Töpelbbff2f32018-06-04 13:57:13 +0200127 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200128
129 /* Order producer and data */
130 smp_wmb();
131
132 WRITE_ONCE(q->ring->producer, q->prod_tail);
133 return 0;
134}
135
Björn Töpelbbff2f32018-06-04 13:57:13 +0200136static inline int xskq_reserve_addr(struct xsk_queue *q)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200137{
138 if (xskq_nb_free(q, q->prod_head, 1) == 0)
139 return -ENOSPC;
140
141 q->prod_head++;
142 return 0;
143}
144
145/* Rx/Tx queue */
146
147static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
148{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200149 if (!xskq_is_valid_addr(q, d->addr))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200150 return false;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200151
Björn Töpelbbff2f32018-06-04 13:57:13 +0200152 if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
153 (d->addr & q->umem_props.chunk_mask)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200154 q->invalid_descs++;
155 return false;
156 }
157
158 return true;
159}
160
161static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
162 struct xdp_desc *desc)
163{
164 while (q->cons_tail != q->cons_head) {
165 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
166 unsigned int idx = q->cons_tail & q->ring_mask;
167
Björn Töpel4e64c832018-06-04 13:57:11 +0200168 *desc = READ_ONCE(ring->desc[idx]);
169 if (xskq_is_valid_desc(q, desc))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200170 return desc;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200171
172 q->cons_tail++;
173 }
174
175 return NULL;
176}
177
178static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
179 struct xdp_desc *desc)
180{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200181 if (q->cons_tail == q->cons_head) {
182 WRITE_ONCE(q->ring->consumer, q->cons_tail);
183 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
184
185 /* Order consumer and data */
186 smp_rmb();
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200187 }
188
Björn Töpel4e64c832018-06-04 13:57:11 +0200189 return xskq_validate_desc(q, desc);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200190}
191
192static inline void xskq_discard_desc(struct xsk_queue *q)
193{
194 q->cons_tail++;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200195}
Björn Töpelc4971762018-05-02 13:01:27 +0200196
197static inline int xskq_produce_batch_desc(struct xsk_queue *q,
Björn Töpelbbff2f32018-06-04 13:57:13 +0200198 u64 addr, u32 len)
Björn Töpelc4971762018-05-02 13:01:27 +0200199{
200 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
201 unsigned int idx;
202
203 if (xskq_nb_free(q, q->prod_head, 1) == 0)
204 return -ENOSPC;
205
206 idx = (q->prod_head++) & q->ring_mask;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200207 ring->desc[idx].addr = addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200208 ring->desc[idx].len = len;
Björn Töpelc4971762018-05-02 13:01:27 +0200209
210 return 0;
211}
212
213static inline void xskq_produce_flush_desc(struct xsk_queue *q)
214{
215 /* Order producer and data */
216 smp_wmb();
217
218 q->prod_tail = q->prod_head,
219 WRITE_ONCE(q->ring->producer, q->prod_tail);
220}
221
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200222static inline bool xskq_full_desc(struct xsk_queue *q)
223{
Björn Töpelda60cf02018-05-18 14:00:23 +0200224 return xskq_nb_avail(q, q->nentries) == q->nentries;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200225}
226
Björn Töpelc4971762018-05-02 13:01:27 +0200227static inline bool xskq_empty_desc(struct xsk_queue *q)
228{
Björn Töpelda60cf02018-05-18 14:00:23 +0200229 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
Björn Töpelc4971762018-05-02 13:01:27 +0200230}
231
Magnus Karlsson965a9902018-05-02 13:01:26 +0200232void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200233struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200234void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200235
236#endif /* _LINUX_XSK_QUEUE_H */