blob: 7494f60fbff8c1627fc9d0f287cd04ed108d9d93 [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac09142018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
4#include <assert.h>
5#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
9#include <linux/if_link.h>
10#include <linux/if_xdp.h>
11#include <linux/if_ether.h>
12#include <net/if.h>
13#include <signal.h>
14#include <stdbool.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <net/ethernet.h>
19#include <sys/resource.h>
20#include <sys/socket.h>
21#include <sys/mman.h>
22#include <time.h>
23#include <unistd.h>
24#include <pthread.h>
25#include <locale.h>
26#include <sys/types.h>
27#include <poll.h>
28
29#include "bpf_load.h"
30#include "bpf_util.h"
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070031#include <bpf/bpf.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020032
33#include "xdpsock.h"
34
35#ifndef SOL_XDP
36#define SOL_XDP 283
37#endif
38
39#ifndef AF_XDP
40#define AF_XDP 44
41#endif
42
43#ifndef PF_XDP
44#define PF_XDP AF_XDP
45#endif
46
47#define NUM_FRAMES 131072
48#define FRAME_HEADROOM 0
Björn Töpela412ef52018-06-04 13:57:14 +020049#define FRAME_SHIFT 11
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020050#define FRAME_SIZE 2048
51#define NUM_DESCS 1024
52#define BATCH_SIZE 16
53
54#define FQ_NUM_DESCS 1024
55#define CQ_NUM_DESCS 1024
56
57#define DEBUG_HEXDUMP 0
58
Björn Töpela412ef52018-06-04 13:57:14 +020059typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020060typedef __u32 u32;
61
62static unsigned long prev_time;
63
64enum benchmark_type {
65 BENCH_RXDROP = 0,
66 BENCH_TXONLY = 1,
67 BENCH_L2FWD = 2,
68};
69
70static enum benchmark_type opt_bench = BENCH_RXDROP;
71static u32 opt_xdp_flags;
72static const char *opt_if = "";
73static int opt_ifindex;
74static int opt_queue;
75static int opt_poll;
76static int opt_shared_packet_buffer;
77static int opt_interval = 1;
78
79struct xdp_umem_uqueue {
80 u32 cached_prod;
81 u32 cached_cons;
82 u32 mask;
83 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +020084 u32 *producer;
85 u32 *consumer;
Björn Töpela412ef52018-06-04 13:57:14 +020086 u64 *ring;
Björn Töpel1c4917d2018-05-22 09:35:00 +020087 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020088};
89
90struct xdp_umem {
Björn Töpela412ef52018-06-04 13:57:14 +020091 char *frames;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020092 struct xdp_umem_uqueue fq;
93 struct xdp_umem_uqueue cq;
94 int fd;
95};
96
97struct xdp_uqueue {
98 u32 cached_prod;
99 u32 cached_cons;
100 u32 mask;
101 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200102 u32 *producer;
103 u32 *consumer;
104 struct xdp_desc *ring;
105 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200106};
107
108struct xdpsock {
109 struct xdp_uqueue rx;
110 struct xdp_uqueue tx;
111 int sfd;
112 struct xdp_umem *umem;
113 u32 outstanding_tx;
114 unsigned long rx_npkts;
115 unsigned long tx_npkts;
116 unsigned long prev_rx_npkts;
117 unsigned long prev_tx_npkts;
118};
119
120#define MAX_SOCKS 4
121static int num_socks;
122struct xdpsock *xsks[MAX_SOCKS];
123
124static unsigned long get_nsecs(void)
125{
126 struct timespec ts;
127
128 clock_gettime(CLOCK_MONOTONIC, &ts);
129 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
130}
131
132static void dump_stats(void);
133
134#define lassert(expr) \
135 do { \
136 if (!(expr)) { \
137 fprintf(stderr, "%s:%s:%i: Assertion failed: " \
138 #expr ": errno: %d/\"%s\"\n", \
139 __FILE__, __func__, __LINE__, \
140 errno, strerror(errno)); \
141 dump_stats(); \
142 exit(EXIT_FAILURE); \
143 } \
144 } while (0)
145
146#define barrier() __asm__ __volatile__("": : :"memory")
147#define u_smp_rmb() barrier()
148#define u_smp_wmb() barrier()
149#define likely(x) __builtin_expect(!!(x), 1)
150#define unlikely(x) __builtin_expect(!!(x), 0)
151
152static const char pkt_data[] =
153 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
154 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
155 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
156 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
157
158static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
159{
Magnus Karlssona65ea682018-06-04 13:57:15 +0200160 u32 free_entries = q->cached_cons - q->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200161
162 if (free_entries >= nb)
163 return free_entries;
164
165 /* Refresh the local tail pointer */
Magnus Karlssona65ea682018-06-04 13:57:15 +0200166 q->cached_cons = *q->consumer + q->size;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200167
Magnus Karlssona65ea682018-06-04 13:57:15 +0200168 return q->cached_cons - q->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200169}
170
171static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
172{
173 u32 free_entries = q->cached_cons - q->cached_prod;
174
175 if (free_entries >= ndescs)
176 return free_entries;
177
178 /* Refresh the local tail pointer */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200179 q->cached_cons = *q->consumer + q->size;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200180 return q->cached_cons - q->cached_prod;
181}
182
183static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
184{
185 u32 entries = q->cached_prod - q->cached_cons;
186
187 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200188 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200189 entries = q->cached_prod - q->cached_cons;
190 }
191
192 return (entries > nb) ? nb : entries;
193}
194
195static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
196{
197 u32 entries = q->cached_prod - q->cached_cons;
198
199 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200200 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200201 entries = q->cached_prod - q->cached_cons;
202 }
203
204 return (entries > ndescs) ? ndescs : entries;
205}
206
207static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
208 struct xdp_desc *d,
209 size_t nb)
210{
211 u32 i;
212
213 if (umem_nb_free(fq, nb) < nb)
214 return -ENOSPC;
215
216 for (i = 0; i < nb; i++) {
217 u32 idx = fq->cached_prod++ & fq->mask;
218
Björn Töpela412ef52018-06-04 13:57:14 +0200219 fq->ring[idx] = d[i].addr;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200220 }
221
222 u_smp_wmb();
223
Björn Töpel1c4917d2018-05-22 09:35:00 +0200224 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200225
226 return 0;
227}
228
Björn Töpela412ef52018-06-04 13:57:14 +0200229static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200230 size_t nb)
231{
232 u32 i;
233
234 if (umem_nb_free(fq, nb) < nb)
235 return -ENOSPC;
236
237 for (i = 0; i < nb; i++) {
238 u32 idx = fq->cached_prod++ & fq->mask;
239
Björn Töpel1c4917d2018-05-22 09:35:00 +0200240 fq->ring[idx] = d[i];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200241 }
242
243 u_smp_wmb();
244
Björn Töpel1c4917d2018-05-22 09:35:00 +0200245 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200246
247 return 0;
248}
249
250static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
Björn Töpela412ef52018-06-04 13:57:14 +0200251 u64 *d, size_t nb)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200252{
253 u32 idx, i, entries = umem_nb_avail(cq, nb);
254
255 u_smp_rmb();
256
257 for (i = 0; i < entries; i++) {
258 idx = cq->cached_cons++ & cq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200259 d[i] = cq->ring[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200260 }
261
262 if (entries > 0) {
263 u_smp_wmb();
264
Björn Töpel1c4917d2018-05-22 09:35:00 +0200265 *cq->consumer = cq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200266 }
267
268 return entries;
269}
270
Björn Töpela412ef52018-06-04 13:57:14 +0200271static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200272{
Björn Töpela412ef52018-06-04 13:57:14 +0200273 return &xsk->umem->frames[addr];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200274}
275
276static inline int xq_enq(struct xdp_uqueue *uq,
277 const struct xdp_desc *descs,
278 unsigned int ndescs)
279{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200280 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200281 unsigned int i;
282
283 if (xq_nb_free(uq, ndescs) < ndescs)
284 return -ENOSPC;
285
286 for (i = 0; i < ndescs; i++) {
287 u32 idx = uq->cached_prod++ & uq->mask;
288
Björn Töpela412ef52018-06-04 13:57:14 +0200289 r[idx].addr = descs[i].addr;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200290 r[idx].len = descs[i].len;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200291 }
292
293 u_smp_wmb();
294
Björn Töpel1c4917d2018-05-22 09:35:00 +0200295 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200296 return 0;
297}
298
299static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
Björn Töpela412ef52018-06-04 13:57:14 +0200300 unsigned int id, unsigned int ndescs)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200301{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200302 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200303 unsigned int i;
304
305 if (xq_nb_free(uq, ndescs) < ndescs)
306 return -ENOSPC;
307
308 for (i = 0; i < ndescs; i++) {
309 u32 idx = uq->cached_prod++ & uq->mask;
310
Björn Töpela412ef52018-06-04 13:57:14 +0200311 r[idx].addr = (id + i) << FRAME_SHIFT;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200312 r[idx].len = sizeof(pkt_data) - 1;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200313 }
314
315 u_smp_wmb();
316
Björn Töpel1c4917d2018-05-22 09:35:00 +0200317 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200318 return 0;
319}
320
321static inline int xq_deq(struct xdp_uqueue *uq,
322 struct xdp_desc *descs,
323 int ndescs)
324{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200325 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200326 unsigned int idx;
327 int i, entries;
328
329 entries = xq_nb_avail(uq, ndescs);
330
331 u_smp_rmb();
332
333 for (i = 0; i < entries; i++) {
334 idx = uq->cached_cons++ & uq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200335 descs[i] = r[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200336 }
337
338 if (entries > 0) {
339 u_smp_wmb();
340
Björn Töpel1c4917d2018-05-22 09:35:00 +0200341 *uq->consumer = uq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200342 }
343
344 return entries;
345}
346
347static void swap_mac_addresses(void *data)
348{
349 struct ether_header *eth = (struct ether_header *)data;
350 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
351 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
352 struct ether_addr tmp;
353
354 tmp = *src_addr;
355 *src_addr = *dst_addr;
356 *dst_addr = tmp;
357}
358
Björn Töpela412ef52018-06-04 13:57:14 +0200359static void hex_dump(void *pkt, size_t length, u64 addr)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200360{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200361 const unsigned char *address = (unsigned char *)pkt;
362 const unsigned char *line = address;
363 size_t line_size = 32;
364 unsigned char c;
Björn Töpela412ef52018-06-04 13:57:14 +0200365 char buf[32];
366 int i = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200367
Björn Töpela412ef52018-06-04 13:57:14 +0200368 if (!DEBUG_HEXDUMP)
369 return;
370
371 sprintf(buf, "addr=%llu", addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200372 printf("length = %zu\n", length);
Björn Töpela412ef52018-06-04 13:57:14 +0200373 printf("%s | ", buf);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200374 while (length-- > 0) {
375 printf("%02X ", *address++);
376 if (!(++i % line_size) || (length == 0 && i % line_size)) {
377 if (length == 0) {
378 while (i++ % line_size)
379 printf("__ ");
380 }
381 printf(" | "); /* right close */
382 while (line < address) {
383 c = *line++;
384 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
385 }
386 printf("\n");
387 if (length > 0)
Björn Töpela412ef52018-06-04 13:57:14 +0200388 printf("%s | ", buf);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200389 }
390 }
391 printf("\n");
392}
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200393
394static size_t gen_eth_frame(char *frame)
395{
396 memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
397 return sizeof(pkt_data) - 1;
398}
399
400static struct xdp_umem *xdp_umem_configure(int sfd)
401{
402 int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200403 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200404 struct xdp_umem_reg mr;
405 struct xdp_umem *umem;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200406 socklen_t optlen;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200407 void *bufs;
408
409 umem = calloc(1, sizeof(*umem));
410 lassert(umem);
411
412 lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
413 NUM_FRAMES * FRAME_SIZE) == 0);
414
415 mr.addr = (__u64)bufs;
416 mr.len = NUM_FRAMES * FRAME_SIZE;
Björn Töpela412ef52018-06-04 13:57:14 +0200417 mr.chunk_size = FRAME_SIZE;
418 mr.headroom = FRAME_HEADROOM;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200419
420 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
421 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
422 sizeof(int)) == 0);
423 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
424 sizeof(int)) == 0);
425
Björn Töpel1c4917d2018-05-22 09:35:00 +0200426 optlen = sizeof(off);
427 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
428 &optlen) == 0);
429
430 umem->fq.map = mmap(0, off.fr.desc +
Björn Töpela412ef52018-06-04 13:57:14 +0200431 FQ_NUM_DESCS * sizeof(u64),
Björn Töpel1c4917d2018-05-22 09:35:00 +0200432 PROT_READ | PROT_WRITE,
433 MAP_SHARED | MAP_POPULATE, sfd,
434 XDP_UMEM_PGOFF_FILL_RING);
435 lassert(umem->fq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200436
437 umem->fq.mask = FQ_NUM_DESCS - 1;
438 umem->fq.size = FQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200439 umem->fq.producer = umem->fq.map + off.fr.producer;
440 umem->fq.consumer = umem->fq.map + off.fr.consumer;
441 umem->fq.ring = umem->fq.map + off.fr.desc;
Magnus Karlssona65ea682018-06-04 13:57:15 +0200442 umem->fq.cached_cons = FQ_NUM_DESCS;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200443
Björn Töpel1c4917d2018-05-22 09:35:00 +0200444 umem->cq.map = mmap(0, off.cr.desc +
Björn Töpela412ef52018-06-04 13:57:14 +0200445 CQ_NUM_DESCS * sizeof(u64),
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200446 PROT_READ | PROT_WRITE,
447 MAP_SHARED | MAP_POPULATE, sfd,
448 XDP_UMEM_PGOFF_COMPLETION_RING);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200449 lassert(umem->cq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200450
451 umem->cq.mask = CQ_NUM_DESCS - 1;
452 umem->cq.size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200453 umem->cq.producer = umem->cq.map + off.cr.producer;
454 umem->cq.consumer = umem->cq.map + off.cr.consumer;
455 umem->cq.ring = umem->cq.map + off.cr.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200456
Björn Töpela412ef52018-06-04 13:57:14 +0200457 umem->frames = bufs;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200458 umem->fd = sfd;
459
460 if (opt_bench == BENCH_TXONLY) {
461 int i;
462
Björn Töpela412ef52018-06-04 13:57:14 +0200463 for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
464 (void)gen_eth_frame(&umem->frames[i]);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200465 }
466
467 return umem;
468}
469
470static struct xdpsock *xsk_configure(struct xdp_umem *umem)
471{
472 struct sockaddr_xdp sxdp = {};
Björn Töpel1c4917d2018-05-22 09:35:00 +0200473 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200474 int sfd, ndescs = NUM_DESCS;
475 struct xdpsock *xsk;
476 bool shared = true;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200477 socklen_t optlen;
Björn Töpela412ef52018-06-04 13:57:14 +0200478 u64 i;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200479
480 sfd = socket(PF_XDP, SOCK_RAW, 0);
481 lassert(sfd >= 0);
482
483 xsk = calloc(1, sizeof(*xsk));
484 lassert(xsk);
485
486 xsk->sfd = sfd;
487 xsk->outstanding_tx = 0;
488
489 if (!umem) {
490 shared = false;
491 xsk->umem = xdp_umem_configure(sfd);
492 } else {
493 xsk->umem = umem;
494 }
495
496 lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
497 &ndescs, sizeof(int)) == 0);
498 lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
499 &ndescs, sizeof(int)) == 0);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200500 optlen = sizeof(off);
501 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
502 &optlen) == 0);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200503
504 /* Rx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200505 xsk->rx.map = mmap(NULL,
506 off.rx.desc +
507 NUM_DESCS * sizeof(struct xdp_desc),
508 PROT_READ | PROT_WRITE,
509 MAP_SHARED | MAP_POPULATE, sfd,
510 XDP_PGOFF_RX_RING);
511 lassert(xsk->rx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200512
513 if (!shared) {
Björn Töpela412ef52018-06-04 13:57:14 +0200514 for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200515 lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
516 == 0);
517 }
518
519 /* Tx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200520 xsk->tx.map = mmap(NULL,
521 off.tx.desc +
522 NUM_DESCS * sizeof(struct xdp_desc),
523 PROT_READ | PROT_WRITE,
524 MAP_SHARED | MAP_POPULATE, sfd,
525 XDP_PGOFF_TX_RING);
526 lassert(xsk->tx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200527
528 xsk->rx.mask = NUM_DESCS - 1;
529 xsk->rx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200530 xsk->rx.producer = xsk->rx.map + off.rx.producer;
531 xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
532 xsk->rx.ring = xsk->rx.map + off.rx.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200533
534 xsk->tx.mask = NUM_DESCS - 1;
535 xsk->tx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200536 xsk->tx.producer = xsk->tx.map + off.tx.producer;
537 xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
538 xsk->tx.ring = xsk->tx.map + off.tx.desc;
Magnus Karlssona65ea682018-06-04 13:57:15 +0200539 xsk->tx.cached_cons = NUM_DESCS;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200540
541 sxdp.sxdp_family = PF_XDP;
542 sxdp.sxdp_ifindex = opt_ifindex;
543 sxdp.sxdp_queue_id = opt_queue;
544 if (shared) {
545 sxdp.sxdp_flags = XDP_SHARED_UMEM;
546 sxdp.sxdp_shared_umem_fd = umem->fd;
547 }
548
549 lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
550
551 return xsk;
552}
553
554static void print_benchmark(bool running)
555{
556 const char *bench_str = "INVALID";
557
558 if (opt_bench == BENCH_RXDROP)
559 bench_str = "rxdrop";
560 else if (opt_bench == BENCH_TXONLY)
561 bench_str = "txonly";
562 else if (opt_bench == BENCH_L2FWD)
563 bench_str = "l2fwd";
564
565 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
566 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
567 printf("xdp-skb ");
568 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
569 printf("xdp-drv ");
570 else
571 printf(" ");
572
573 if (opt_poll)
574 printf("poll() ");
575
576 if (running) {
577 printf("running...");
578 fflush(stdout);
579 }
580}
581
582static void dump_stats(void)
583{
584 unsigned long now = get_nsecs();
585 long dt = now - prev_time;
586 int i;
587
588 prev_time = now;
589
590 for (i = 0; i < num_socks; i++) {
591 char *fmt = "%-15s %'-11.0f %'-11lu\n";
592 double rx_pps, tx_pps;
593
594 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
595 1000000000. / dt;
596 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
597 1000000000. / dt;
598
599 printf("\n sock%d@", i);
600 print_benchmark(false);
601 printf("\n");
602
603 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
604 dt / 1000000000.);
605 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
606 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
607
608 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
609 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
610 }
611}
612
613static void *poller(void *arg)
614{
615 (void)arg;
616 for (;;) {
617 sleep(opt_interval);
618 dump_stats();
619 }
620
621 return NULL;
622}
623
624static void int_exit(int sig)
625{
626 (void)sig;
627 dump_stats();
628 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
629 exit(EXIT_SUCCESS);
630}
631
632static struct option long_options[] = {
633 {"rxdrop", no_argument, 0, 'r'},
634 {"txonly", no_argument, 0, 't'},
635 {"l2fwd", no_argument, 0, 'l'},
636 {"interface", required_argument, 0, 'i'},
637 {"queue", required_argument, 0, 'q'},
638 {"poll", no_argument, 0, 'p'},
639 {"shared-buffer", no_argument, 0, 's'},
640 {"xdp-skb", no_argument, 0, 'S'},
641 {"xdp-native", no_argument, 0, 'N'},
642 {"interval", required_argument, 0, 'n'},
643 {0, 0, 0, 0}
644};
645
646static void usage(const char *prog)
647{
648 const char *str =
649 " Usage: %s [OPTIONS]\n"
650 " Options:\n"
651 " -r, --rxdrop Discard all incoming packets (default)\n"
652 " -t, --txonly Only send packets\n"
653 " -l, --l2fwd MAC swap L2 forwarding\n"
654 " -i, --interface=n Run on interface n\n"
655 " -q, --queue=n Use queue n (default 0)\n"
656 " -p, --poll Use poll syscall\n"
657 " -s, --shared-buffer Use shared packet buffer\n"
658 " -S, --xdp-skb=n Use XDP skb-mod\n"
659 " -N, --xdp-native=n Enfore XDP native mode\n"
660 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
661 "\n";
662 fprintf(stderr, str, prog);
663 exit(EXIT_FAILURE);
664}
665
666static void parse_command_line(int argc, char **argv)
667{
668 int option_index, c;
669
670 opterr = 0;
671
672 for (;;) {
673 c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
674 &option_index);
675 if (c == -1)
676 break;
677
678 switch (c) {
679 case 'r':
680 opt_bench = BENCH_RXDROP;
681 break;
682 case 't':
683 opt_bench = BENCH_TXONLY;
684 break;
685 case 'l':
686 opt_bench = BENCH_L2FWD;
687 break;
688 case 'i':
689 opt_if = optarg;
690 break;
691 case 'q':
692 opt_queue = atoi(optarg);
693 break;
694 case 's':
695 opt_shared_packet_buffer = 1;
696 break;
697 case 'p':
698 opt_poll = 1;
699 break;
700 case 'S':
701 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
702 break;
703 case 'N':
704 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
705 break;
706 case 'n':
707 opt_interval = atoi(optarg);
708 break;
709 default:
710 usage(basename(argv[0]));
711 }
712 }
713
714 opt_ifindex = if_nametoindex(opt_if);
715 if (!opt_ifindex) {
716 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
717 opt_if);
718 usage(basename(argv[0]));
719 }
720}
721
722static void kick_tx(int fd)
723{
724 int ret;
725
726 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
727 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
728 return;
729 lassert(0);
730}
731
732static inline void complete_tx_l2fwd(struct xdpsock *xsk)
733{
Björn Töpela412ef52018-06-04 13:57:14 +0200734 u64 descs[BATCH_SIZE];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200735 unsigned int rcvd;
736 size_t ndescs;
737
738 if (!xsk->outstanding_tx)
739 return;
740
741 kick_tx(xsk->sfd);
742 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
743 xsk->outstanding_tx;
744
745 /* re-add completed Tx buffers */
746 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
747 if (rcvd > 0) {
748 umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
749 xsk->outstanding_tx -= rcvd;
750 xsk->tx_npkts += rcvd;
751 }
752}
753
754static inline void complete_tx_only(struct xdpsock *xsk)
755{
Björn Töpela412ef52018-06-04 13:57:14 +0200756 u64 descs[BATCH_SIZE];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200757 unsigned int rcvd;
758
759 if (!xsk->outstanding_tx)
760 return;
761
762 kick_tx(xsk->sfd);
763
764 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
765 if (rcvd > 0) {
766 xsk->outstanding_tx -= rcvd;
767 xsk->tx_npkts += rcvd;
768 }
769}
770
771static void rx_drop(struct xdpsock *xsk)
772{
773 struct xdp_desc descs[BATCH_SIZE];
774 unsigned int rcvd, i;
775
776 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
777 if (!rcvd)
778 return;
779
780 for (i = 0; i < rcvd; i++) {
Björn Töpela412ef52018-06-04 13:57:14 +0200781 char *pkt = xq_get_data(xsk, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200782
Björn Töpela412ef52018-06-04 13:57:14 +0200783 hex_dump(pkt, descs[i].len, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200784 }
785
786 xsk->rx_npkts += rcvd;
787
788 umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
789}
790
791static void rx_drop_all(void)
792{
793 struct pollfd fds[MAX_SOCKS + 1];
794 int i, ret, timeout, nfds = 1;
795
796 memset(fds, 0, sizeof(fds));
797
798 for (i = 0; i < num_socks; i++) {
799 fds[i].fd = xsks[i]->sfd;
800 fds[i].events = POLLIN;
801 timeout = 1000; /* 1sn */
802 }
803
804 for (;;) {
805 if (opt_poll) {
806 ret = poll(fds, nfds, timeout);
807 if (ret <= 0)
808 continue;
809 }
810
811 for (i = 0; i < num_socks; i++)
812 rx_drop(xsks[i]);
813 }
814}
815
816static void tx_only(struct xdpsock *xsk)
817{
818 int timeout, ret, nfds = 1;
819 struct pollfd fds[nfds + 1];
820 unsigned int idx = 0;
821
822 memset(fds, 0, sizeof(fds));
823 fds[0].fd = xsk->sfd;
824 fds[0].events = POLLOUT;
825 timeout = 1000; /* 1sn */
826
827 for (;;) {
828 if (opt_poll) {
829 ret = poll(fds, nfds, timeout);
830 if (ret <= 0)
831 continue;
832
833 if (fds[0].fd != xsk->sfd ||
834 !(fds[0].revents & POLLOUT))
835 continue;
836 }
837
838 if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
839 lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
840
841 xsk->outstanding_tx += BATCH_SIZE;
842 idx += BATCH_SIZE;
843 idx %= NUM_FRAMES;
844 }
845
846 complete_tx_only(xsk);
847 }
848}
849
850static void l2fwd(struct xdpsock *xsk)
851{
852 for (;;) {
853 struct xdp_desc descs[BATCH_SIZE];
854 unsigned int rcvd, i;
855 int ret;
856
857 for (;;) {
858 complete_tx_l2fwd(xsk);
859
860 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
861 if (rcvd > 0)
862 break;
863 }
864
865 for (i = 0; i < rcvd; i++) {
Björn Töpela412ef52018-06-04 13:57:14 +0200866 char *pkt = xq_get_data(xsk, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200867
868 swap_mac_addresses(pkt);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200869
Björn Töpela412ef52018-06-04 13:57:14 +0200870 hex_dump(pkt, descs[i].len, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200871 }
872
873 xsk->rx_npkts += rcvd;
874
875 ret = xq_enq(&xsk->tx, descs, rcvd);
876 lassert(ret == 0);
877 xsk->outstanding_tx += rcvd;
878 }
879}
880
881int main(int argc, char **argv)
882{
883 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
884 char xdp_filename[256];
885 int i, ret, key = 0;
886 pthread_t pt;
887
888 parse_command_line(argc, argv);
889
890 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
891 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
892 strerror(errno));
893 exit(EXIT_FAILURE);
894 }
895
896 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
897
898 if (load_bpf_file(xdp_filename)) {
899 fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
900 exit(EXIT_FAILURE);
901 }
902
903 if (!prog_fd[0]) {
904 fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
905 strerror(errno));
906 exit(EXIT_FAILURE);
907 }
908
909 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
910 fprintf(stderr, "ERROR: link set xdp fd failed\n");
911 exit(EXIT_FAILURE);
912 }
913
914 ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
915 if (ret) {
916 fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
917 exit(EXIT_FAILURE);
918 }
919
920 /* Create sockets... */
921 xsks[num_socks++] = xsk_configure(NULL);
922
923#if RR_LB
924 for (i = 0; i < MAX_SOCKS - 1; i++)
925 xsks[num_socks++] = xsk_configure(xsks[0]->umem);
926#endif
927
928 /* ...and insert them into the map. */
929 for (i = 0; i < num_socks; i++) {
930 key = i;
931 ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
932 if (ret) {
933 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
934 exit(EXIT_FAILURE);
935 }
936 }
937
938 signal(SIGINT, int_exit);
939 signal(SIGTERM, int_exit);
940 signal(SIGABRT, int_exit);
941
942 setlocale(LC_ALL, "");
943
944 ret = pthread_create(&pt, NULL, poller, NULL);
945 lassert(ret == 0);
946
947 prev_time = get_nsecs();
948
949 if (opt_bench == BENCH_RXDROP)
950 rx_drop_all();
951 else if (opt_bench == BENCH_TXONLY)
952 tx_only(xsks[0]);
953 else
954 l2fwd(xsks[0]);
955
956 return 0;
957}