blob: b3906111bedb68fcbd9357bbd2ea531d0c109dde [file] [log] [blame]
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02001// SPDX-License-Identifier: GPL-2.0
Björn Töpeldac09142018-05-18 14:00:21 +02002/* Copyright(c) 2017 - 2018 Intel Corporation. */
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +02003
4#include <assert.h>
5#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
9#include <linux/if_link.h>
10#include <linux/if_xdp.h>
11#include <linux/if_ether.h>
12#include <net/if.h>
13#include <signal.h>
14#include <stdbool.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <net/ethernet.h>
19#include <sys/resource.h>
20#include <sys/socket.h>
21#include <sys/mman.h>
22#include <time.h>
23#include <unistd.h>
24#include <pthread.h>
25#include <locale.h>
26#include <sys/types.h>
27#include <poll.h>
28
Jakub Kicinski67481822018-07-26 14:32:21 -070029#include "bpf/libbpf.h"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020030#include "bpf_util.h"
Jakub Kicinski2bf3e2e2018-05-14 22:35:02 -070031#include <bpf/bpf.h>
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020032
33#include "xdpsock.h"
34
35#ifndef SOL_XDP
36#define SOL_XDP 283
37#endif
38
39#ifndef AF_XDP
40#define AF_XDP 44
41#endif
42
43#ifndef PF_XDP
44#define PF_XDP AF_XDP
45#endif
46
47#define NUM_FRAMES 131072
48#define FRAME_HEADROOM 0
Björn Töpela412ef52018-06-04 13:57:14 +020049#define FRAME_SHIFT 11
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020050#define FRAME_SIZE 2048
51#define NUM_DESCS 1024
52#define BATCH_SIZE 16
53
54#define FQ_NUM_DESCS 1024
55#define CQ_NUM_DESCS 1024
56
57#define DEBUG_HEXDUMP 0
58
Björn Töpela412ef52018-06-04 13:57:14 +020059typedef __u64 u64;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020060typedef __u32 u32;
61
62static unsigned long prev_time;
63
64enum benchmark_type {
65 BENCH_RXDROP = 0,
66 BENCH_TXONLY = 1,
67 BENCH_L2FWD = 2,
68};
69
70static enum benchmark_type opt_bench = BENCH_RXDROP;
71static u32 opt_xdp_flags;
72static const char *opt_if = "";
73static int opt_ifindex;
74static int opt_queue;
75static int opt_poll;
76static int opt_shared_packet_buffer;
77static int opt_interval = 1;
Björn Töpel9f5232c2018-06-04 14:06:01 +020078static u32 opt_xdp_bind_flags;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020079
80struct xdp_umem_uqueue {
81 u32 cached_prod;
82 u32 cached_cons;
83 u32 mask;
84 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +020085 u32 *producer;
86 u32 *consumer;
Björn Töpela412ef52018-06-04 13:57:14 +020087 u64 *ring;
Björn Töpel1c4917d2018-05-22 09:35:00 +020088 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020089};
90
91struct xdp_umem {
Björn Töpela412ef52018-06-04 13:57:14 +020092 char *frames;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +020093 struct xdp_umem_uqueue fq;
94 struct xdp_umem_uqueue cq;
95 int fd;
96};
97
98struct xdp_uqueue {
99 u32 cached_prod;
100 u32 cached_cons;
101 u32 mask;
102 u32 size;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200103 u32 *producer;
104 u32 *consumer;
105 struct xdp_desc *ring;
106 void *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200107};
108
109struct xdpsock {
110 struct xdp_uqueue rx;
111 struct xdp_uqueue tx;
112 int sfd;
113 struct xdp_umem *umem;
114 u32 outstanding_tx;
115 unsigned long rx_npkts;
116 unsigned long tx_npkts;
117 unsigned long prev_rx_npkts;
118 unsigned long prev_tx_npkts;
119};
120
121#define MAX_SOCKS 4
122static int num_socks;
123struct xdpsock *xsks[MAX_SOCKS];
124
125static unsigned long get_nsecs(void)
126{
127 struct timespec ts;
128
129 clock_gettime(CLOCK_MONOTONIC, &ts);
130 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
131}
132
133static void dump_stats(void);
134
135#define lassert(expr) \
136 do { \
137 if (!(expr)) { \
138 fprintf(stderr, "%s:%s:%i: Assertion failed: " \
139 #expr ": errno: %d/\"%s\"\n", \
140 __FILE__, __func__, __LINE__, \
141 errno, strerror(errno)); \
142 dump_stats(); \
143 exit(EXIT_FAILURE); \
144 } \
145 } while (0)
146
147#define barrier() __asm__ __volatile__("": : :"memory")
Brian Brooks598135e2018-07-25 16:08:19 -0500148#ifdef __aarch64__
149#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
150#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
151#else
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200152#define u_smp_rmb() barrier()
153#define u_smp_wmb() barrier()
Brian Brooks598135e2018-07-25 16:08:19 -0500154#endif
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200155#define likely(x) __builtin_expect(!!(x), 1)
156#define unlikely(x) __builtin_expect(!!(x), 0)
157
158static const char pkt_data[] =
159 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
160 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
161 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
162 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
163
164static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
165{
Magnus Karlssona65ea682018-06-04 13:57:15 +0200166 u32 free_entries = q->cached_cons - q->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200167
168 if (free_entries >= nb)
169 return free_entries;
170
171 /* Refresh the local tail pointer */
Magnus Karlssona65ea682018-06-04 13:57:15 +0200172 q->cached_cons = *q->consumer + q->size;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200173
Magnus Karlssona65ea682018-06-04 13:57:15 +0200174 return q->cached_cons - q->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200175}
176
177static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
178{
179 u32 free_entries = q->cached_cons - q->cached_prod;
180
181 if (free_entries >= ndescs)
182 return free_entries;
183
184 /* Refresh the local tail pointer */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200185 q->cached_cons = *q->consumer + q->size;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200186 return q->cached_cons - q->cached_prod;
187}
188
189static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
190{
191 u32 entries = q->cached_prod - q->cached_cons;
192
193 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200194 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200195 entries = q->cached_prod - q->cached_cons;
196 }
197
198 return (entries > nb) ? nb : entries;
199}
200
201static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
202{
203 u32 entries = q->cached_prod - q->cached_cons;
204
205 if (entries == 0) {
Björn Töpel1c4917d2018-05-22 09:35:00 +0200206 q->cached_prod = *q->producer;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200207 entries = q->cached_prod - q->cached_cons;
208 }
209
210 return (entries > ndescs) ? ndescs : entries;
211}
212
213static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
214 struct xdp_desc *d,
215 size_t nb)
216{
217 u32 i;
218
219 if (umem_nb_free(fq, nb) < nb)
220 return -ENOSPC;
221
222 for (i = 0; i < nb; i++) {
223 u32 idx = fq->cached_prod++ & fq->mask;
224
Björn Töpela412ef52018-06-04 13:57:14 +0200225 fq->ring[idx] = d[i].addr;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200226 }
227
228 u_smp_wmb();
229
Björn Töpel1c4917d2018-05-22 09:35:00 +0200230 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200231
232 return 0;
233}
234
Björn Töpela412ef52018-06-04 13:57:14 +0200235static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200236 size_t nb)
237{
238 u32 i;
239
240 if (umem_nb_free(fq, nb) < nb)
241 return -ENOSPC;
242
243 for (i = 0; i < nb; i++) {
244 u32 idx = fq->cached_prod++ & fq->mask;
245
Björn Töpel1c4917d2018-05-22 09:35:00 +0200246 fq->ring[idx] = d[i];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200247 }
248
249 u_smp_wmb();
250
Björn Töpel1c4917d2018-05-22 09:35:00 +0200251 *fq->producer = fq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200252
253 return 0;
254}
255
256static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
Björn Töpela412ef52018-06-04 13:57:14 +0200257 u64 *d, size_t nb)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200258{
259 u32 idx, i, entries = umem_nb_avail(cq, nb);
260
261 u_smp_rmb();
262
263 for (i = 0; i < entries; i++) {
264 idx = cq->cached_cons++ & cq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200265 d[i] = cq->ring[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200266 }
267
268 if (entries > 0) {
269 u_smp_wmb();
270
Björn Töpel1c4917d2018-05-22 09:35:00 +0200271 *cq->consumer = cq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200272 }
273
274 return entries;
275}
276
Björn Töpela412ef52018-06-04 13:57:14 +0200277static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200278{
Björn Töpela412ef52018-06-04 13:57:14 +0200279 return &xsk->umem->frames[addr];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200280}
281
282static inline int xq_enq(struct xdp_uqueue *uq,
283 const struct xdp_desc *descs,
284 unsigned int ndescs)
285{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200286 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200287 unsigned int i;
288
289 if (xq_nb_free(uq, ndescs) < ndescs)
290 return -ENOSPC;
291
292 for (i = 0; i < ndescs; i++) {
293 u32 idx = uq->cached_prod++ & uq->mask;
294
Björn Töpela412ef52018-06-04 13:57:14 +0200295 r[idx].addr = descs[i].addr;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200296 r[idx].len = descs[i].len;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200297 }
298
299 u_smp_wmb();
300
Björn Töpel1c4917d2018-05-22 09:35:00 +0200301 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200302 return 0;
303}
304
305static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
Björn Töpela412ef52018-06-04 13:57:14 +0200306 unsigned int id, unsigned int ndescs)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200307{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200308 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200309 unsigned int i;
310
311 if (xq_nb_free(uq, ndescs) < ndescs)
312 return -ENOSPC;
313
314 for (i = 0; i < ndescs; i++) {
315 u32 idx = uq->cached_prod++ & uq->mask;
316
Björn Töpela412ef52018-06-04 13:57:14 +0200317 r[idx].addr = (id + i) << FRAME_SHIFT;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200318 r[idx].len = sizeof(pkt_data) - 1;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200319 }
320
321 u_smp_wmb();
322
Björn Töpel1c4917d2018-05-22 09:35:00 +0200323 *uq->producer = uq->cached_prod;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200324 return 0;
325}
326
327static inline int xq_deq(struct xdp_uqueue *uq,
328 struct xdp_desc *descs,
329 int ndescs)
330{
Björn Töpel1c4917d2018-05-22 09:35:00 +0200331 struct xdp_desc *r = uq->ring;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200332 unsigned int idx;
333 int i, entries;
334
335 entries = xq_nb_avail(uq, ndescs);
336
337 u_smp_rmb();
338
339 for (i = 0; i < entries; i++) {
340 idx = uq->cached_cons++ & uq->mask;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200341 descs[i] = r[idx];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200342 }
343
344 if (entries > 0) {
345 u_smp_wmb();
346
Björn Töpel1c4917d2018-05-22 09:35:00 +0200347 *uq->consumer = uq->cached_cons;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200348 }
349
350 return entries;
351}
352
353static void swap_mac_addresses(void *data)
354{
355 struct ether_header *eth = (struct ether_header *)data;
356 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
357 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
358 struct ether_addr tmp;
359
360 tmp = *src_addr;
361 *src_addr = *dst_addr;
362 *dst_addr = tmp;
363}
364
Björn Töpela412ef52018-06-04 13:57:14 +0200365static void hex_dump(void *pkt, size_t length, u64 addr)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200366{
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200367 const unsigned char *address = (unsigned char *)pkt;
368 const unsigned char *line = address;
369 size_t line_size = 32;
370 unsigned char c;
Björn Töpela412ef52018-06-04 13:57:14 +0200371 char buf[32];
372 int i = 0;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200373
Björn Töpela412ef52018-06-04 13:57:14 +0200374 if (!DEBUG_HEXDUMP)
375 return;
376
377 sprintf(buf, "addr=%llu", addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200378 printf("length = %zu\n", length);
Björn Töpela412ef52018-06-04 13:57:14 +0200379 printf("%s | ", buf);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200380 while (length-- > 0) {
381 printf("%02X ", *address++);
382 if (!(++i % line_size) || (length == 0 && i % line_size)) {
383 if (length == 0) {
384 while (i++ % line_size)
385 printf("__ ");
386 }
387 printf(" | "); /* right close */
388 while (line < address) {
389 c = *line++;
390 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
391 }
392 printf("\n");
393 if (length > 0)
Björn Töpela412ef52018-06-04 13:57:14 +0200394 printf("%s | ", buf);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200395 }
396 }
397 printf("\n");
398}
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200399
400static size_t gen_eth_frame(char *frame)
401{
402 memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
403 return sizeof(pkt_data) - 1;
404}
405
406static struct xdp_umem *xdp_umem_configure(int sfd)
407{
408 int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200409 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200410 struct xdp_umem_reg mr;
411 struct xdp_umem *umem;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200412 socklen_t optlen;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200413 void *bufs;
414
415 umem = calloc(1, sizeof(*umem));
416 lassert(umem);
417
418 lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
419 NUM_FRAMES * FRAME_SIZE) == 0);
420
421 mr.addr = (__u64)bufs;
422 mr.len = NUM_FRAMES * FRAME_SIZE;
Björn Töpela412ef52018-06-04 13:57:14 +0200423 mr.chunk_size = FRAME_SIZE;
424 mr.headroom = FRAME_HEADROOM;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200425
426 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
427 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
428 sizeof(int)) == 0);
429 lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
430 sizeof(int)) == 0);
431
Björn Töpel1c4917d2018-05-22 09:35:00 +0200432 optlen = sizeof(off);
433 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
434 &optlen) == 0);
435
436 umem->fq.map = mmap(0, off.fr.desc +
Björn Töpela412ef52018-06-04 13:57:14 +0200437 FQ_NUM_DESCS * sizeof(u64),
Björn Töpel1c4917d2018-05-22 09:35:00 +0200438 PROT_READ | PROT_WRITE,
439 MAP_SHARED | MAP_POPULATE, sfd,
440 XDP_UMEM_PGOFF_FILL_RING);
441 lassert(umem->fq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200442
443 umem->fq.mask = FQ_NUM_DESCS - 1;
444 umem->fq.size = FQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200445 umem->fq.producer = umem->fq.map + off.fr.producer;
446 umem->fq.consumer = umem->fq.map + off.fr.consumer;
447 umem->fq.ring = umem->fq.map + off.fr.desc;
Magnus Karlssona65ea682018-06-04 13:57:15 +0200448 umem->fq.cached_cons = FQ_NUM_DESCS;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200449
Björn Töpel1c4917d2018-05-22 09:35:00 +0200450 umem->cq.map = mmap(0, off.cr.desc +
Björn Töpela412ef52018-06-04 13:57:14 +0200451 CQ_NUM_DESCS * sizeof(u64),
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200452 PROT_READ | PROT_WRITE,
453 MAP_SHARED | MAP_POPULATE, sfd,
454 XDP_UMEM_PGOFF_COMPLETION_RING);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200455 lassert(umem->cq.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200456
457 umem->cq.mask = CQ_NUM_DESCS - 1;
458 umem->cq.size = CQ_NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200459 umem->cq.producer = umem->cq.map + off.cr.producer;
460 umem->cq.consumer = umem->cq.map + off.cr.consumer;
461 umem->cq.ring = umem->cq.map + off.cr.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200462
Björn Töpela412ef52018-06-04 13:57:14 +0200463 umem->frames = bufs;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200464 umem->fd = sfd;
465
466 if (opt_bench == BENCH_TXONLY) {
467 int i;
468
Björn Töpela412ef52018-06-04 13:57:14 +0200469 for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
470 (void)gen_eth_frame(&umem->frames[i]);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200471 }
472
473 return umem;
474}
475
476static struct xdpsock *xsk_configure(struct xdp_umem *umem)
477{
478 struct sockaddr_xdp sxdp = {};
Björn Töpel1c4917d2018-05-22 09:35:00 +0200479 struct xdp_mmap_offsets off;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200480 int sfd, ndescs = NUM_DESCS;
481 struct xdpsock *xsk;
482 bool shared = true;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200483 socklen_t optlen;
Björn Töpela412ef52018-06-04 13:57:14 +0200484 u64 i;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200485
486 sfd = socket(PF_XDP, SOCK_RAW, 0);
487 lassert(sfd >= 0);
488
489 xsk = calloc(1, sizeof(*xsk));
490 lassert(xsk);
491
492 xsk->sfd = sfd;
493 xsk->outstanding_tx = 0;
494
495 if (!umem) {
496 shared = false;
497 xsk->umem = xdp_umem_configure(sfd);
498 } else {
499 xsk->umem = umem;
500 }
501
502 lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
503 &ndescs, sizeof(int)) == 0);
504 lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
505 &ndescs, sizeof(int)) == 0);
Björn Töpel1c4917d2018-05-22 09:35:00 +0200506 optlen = sizeof(off);
507 lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
508 &optlen) == 0);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200509
510 /* Rx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200511 xsk->rx.map = mmap(NULL,
512 off.rx.desc +
513 NUM_DESCS * sizeof(struct xdp_desc),
514 PROT_READ | PROT_WRITE,
515 MAP_SHARED | MAP_POPULATE, sfd,
516 XDP_PGOFF_RX_RING);
517 lassert(xsk->rx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200518
519 if (!shared) {
Björn Töpela412ef52018-06-04 13:57:14 +0200520 for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200521 lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
522 == 0);
523 }
524
525 /* Tx */
Björn Töpel1c4917d2018-05-22 09:35:00 +0200526 xsk->tx.map = mmap(NULL,
527 off.tx.desc +
528 NUM_DESCS * sizeof(struct xdp_desc),
529 PROT_READ | PROT_WRITE,
530 MAP_SHARED | MAP_POPULATE, sfd,
531 XDP_PGOFF_TX_RING);
532 lassert(xsk->tx.map != MAP_FAILED);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200533
534 xsk->rx.mask = NUM_DESCS - 1;
535 xsk->rx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200536 xsk->rx.producer = xsk->rx.map + off.rx.producer;
537 xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
538 xsk->rx.ring = xsk->rx.map + off.rx.desc;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200539
540 xsk->tx.mask = NUM_DESCS - 1;
541 xsk->tx.size = NUM_DESCS;
Björn Töpel1c4917d2018-05-22 09:35:00 +0200542 xsk->tx.producer = xsk->tx.map + off.tx.producer;
543 xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
544 xsk->tx.ring = xsk->tx.map + off.tx.desc;
Magnus Karlssona65ea682018-06-04 13:57:15 +0200545 xsk->tx.cached_cons = NUM_DESCS;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200546
547 sxdp.sxdp_family = PF_XDP;
548 sxdp.sxdp_ifindex = opt_ifindex;
549 sxdp.sxdp_queue_id = opt_queue;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200550
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200551 if (shared) {
552 sxdp.sxdp_flags = XDP_SHARED_UMEM;
553 sxdp.sxdp_shared_umem_fd = umem->fd;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200554 } else {
555 sxdp.sxdp_flags = opt_xdp_bind_flags;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200556 }
557
558 lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
559
560 return xsk;
561}
562
563static void print_benchmark(bool running)
564{
565 const char *bench_str = "INVALID";
566
567 if (opt_bench == BENCH_RXDROP)
568 bench_str = "rxdrop";
569 else if (opt_bench == BENCH_TXONLY)
570 bench_str = "txonly";
571 else if (opt_bench == BENCH_L2FWD)
572 bench_str = "l2fwd";
573
574 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
575 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
576 printf("xdp-skb ");
577 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
578 printf("xdp-drv ");
579 else
580 printf(" ");
581
582 if (opt_poll)
583 printf("poll() ");
584
585 if (running) {
586 printf("running...");
587 fflush(stdout);
588 }
589}
590
591static void dump_stats(void)
592{
593 unsigned long now = get_nsecs();
594 long dt = now - prev_time;
595 int i;
596
597 prev_time = now;
598
599 for (i = 0; i < num_socks; i++) {
600 char *fmt = "%-15s %'-11.0f %'-11lu\n";
601 double rx_pps, tx_pps;
602
603 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
604 1000000000. / dt;
605 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
606 1000000000. / dt;
607
608 printf("\n sock%d@", i);
609 print_benchmark(false);
610 printf("\n");
611
612 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
613 dt / 1000000000.);
614 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
615 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
616
617 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
618 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
619 }
620}
621
622static void *poller(void *arg)
623{
624 (void)arg;
625 for (;;) {
626 sleep(opt_interval);
627 dump_stats();
628 }
629
630 return NULL;
631}
632
633static void int_exit(int sig)
634{
635 (void)sig;
636 dump_stats();
637 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
638 exit(EXIT_SUCCESS);
639}
640
641static struct option long_options[] = {
642 {"rxdrop", no_argument, 0, 'r'},
643 {"txonly", no_argument, 0, 't'},
644 {"l2fwd", no_argument, 0, 'l'},
645 {"interface", required_argument, 0, 'i'},
646 {"queue", required_argument, 0, 'q'},
647 {"poll", no_argument, 0, 'p'},
648 {"shared-buffer", no_argument, 0, 's'},
649 {"xdp-skb", no_argument, 0, 'S'},
650 {"xdp-native", no_argument, 0, 'N'},
651 {"interval", required_argument, 0, 'n'},
Björn Töpel58c50ae2018-08-28 14:44:35 +0200652 {"zero-copy", no_argument, 0, 'z'},
653 {"copy", no_argument, 0, 'c'},
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200654 {0, 0, 0, 0}
655};
656
657static void usage(const char *prog)
658{
659 const char *str =
660 " Usage: %s [OPTIONS]\n"
661 " Options:\n"
662 " -r, --rxdrop Discard all incoming packets (default)\n"
663 " -t, --txonly Only send packets\n"
664 " -l, --l2fwd MAC swap L2 forwarding\n"
665 " -i, --interface=n Run on interface n\n"
666 " -q, --queue=n Use queue n (default 0)\n"
667 " -p, --poll Use poll syscall\n"
668 " -s, --shared-buffer Use shared packet buffer\n"
669 " -S, --xdp-skb=n Use XDP skb-mod\n"
670 " -N, --xdp-native=n Enfore XDP native mode\n"
671 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
Björn Töpel58c50ae2018-08-28 14:44:35 +0200672 " -z, --zero-copy Force zero-copy mode.\n"
673 " -c, --copy Force copy mode.\n"
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200674 "\n";
675 fprintf(stderr, str, prog);
676 exit(EXIT_FAILURE);
677}
678
679static void parse_command_line(int argc, char **argv)
680{
681 int option_index, c;
682
683 opterr = 0;
684
685 for (;;) {
Björn Töpel58c50ae2018-08-28 14:44:35 +0200686 c = getopt_long(argc, argv, "rtli:q:psSNn:cz", long_options,
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200687 &option_index);
688 if (c == -1)
689 break;
690
691 switch (c) {
692 case 'r':
693 opt_bench = BENCH_RXDROP;
694 break;
695 case 't':
696 opt_bench = BENCH_TXONLY;
697 break;
698 case 'l':
699 opt_bench = BENCH_L2FWD;
700 break;
701 case 'i':
702 opt_if = optarg;
703 break;
704 case 'q':
705 opt_queue = atoi(optarg);
706 break;
707 case 's':
708 opt_shared_packet_buffer = 1;
709 break;
710 case 'p':
711 opt_poll = 1;
712 break;
713 case 'S':
714 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
Björn Töpel9f5232c2018-06-04 14:06:01 +0200715 opt_xdp_bind_flags |= XDP_COPY;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200716 break;
717 case 'N':
718 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
719 break;
720 case 'n':
721 opt_interval = atoi(optarg);
722 break;
Björn Töpel58c50ae2018-08-28 14:44:35 +0200723 case 'z':
724 opt_xdp_bind_flags |= XDP_ZEROCOPY;
725 break;
726 case 'c':
727 opt_xdp_bind_flags |= XDP_COPY;
728 break;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200729 default:
730 usage(basename(argv[0]));
731 }
732 }
733
734 opt_ifindex = if_nametoindex(opt_if);
735 if (!opt_ifindex) {
736 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
737 opt_if);
738 usage(basename(argv[0]));
739 }
740}
741
742static void kick_tx(int fd)
743{
744 int ret;
745
746 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
Magnus Karlssonc03079c2018-06-29 09:48:19 +0200747 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200748 return;
749 lassert(0);
750}
751
752static inline void complete_tx_l2fwd(struct xdpsock *xsk)
753{
Björn Töpela412ef52018-06-04 13:57:14 +0200754 u64 descs[BATCH_SIZE];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200755 unsigned int rcvd;
756 size_t ndescs;
757
758 if (!xsk->outstanding_tx)
759 return;
760
761 kick_tx(xsk->sfd);
762 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
763 xsk->outstanding_tx;
764
765 /* re-add completed Tx buffers */
766 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
767 if (rcvd > 0) {
768 umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
769 xsk->outstanding_tx -= rcvd;
770 xsk->tx_npkts += rcvd;
771 }
772}
773
774static inline void complete_tx_only(struct xdpsock *xsk)
775{
Björn Töpela412ef52018-06-04 13:57:14 +0200776 u64 descs[BATCH_SIZE];
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200777 unsigned int rcvd;
778
779 if (!xsk->outstanding_tx)
780 return;
781
782 kick_tx(xsk->sfd);
783
784 rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
785 if (rcvd > 0) {
786 xsk->outstanding_tx -= rcvd;
787 xsk->tx_npkts += rcvd;
788 }
789}
790
791static void rx_drop(struct xdpsock *xsk)
792{
793 struct xdp_desc descs[BATCH_SIZE];
794 unsigned int rcvd, i;
795
796 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
797 if (!rcvd)
798 return;
799
800 for (i = 0; i < rcvd; i++) {
Björn Töpela412ef52018-06-04 13:57:14 +0200801 char *pkt = xq_get_data(xsk, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200802
Björn Töpela412ef52018-06-04 13:57:14 +0200803 hex_dump(pkt, descs[i].len, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200804 }
805
806 xsk->rx_npkts += rcvd;
807
808 umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
809}
810
811static void rx_drop_all(void)
812{
813 struct pollfd fds[MAX_SOCKS + 1];
814 int i, ret, timeout, nfds = 1;
815
816 memset(fds, 0, sizeof(fds));
817
818 for (i = 0; i < num_socks; i++) {
819 fds[i].fd = xsks[i]->sfd;
820 fds[i].events = POLLIN;
821 timeout = 1000; /* 1sn */
822 }
823
824 for (;;) {
825 if (opt_poll) {
826 ret = poll(fds, nfds, timeout);
827 if (ret <= 0)
828 continue;
829 }
830
831 for (i = 0; i < num_socks; i++)
832 rx_drop(xsks[i]);
833 }
834}
835
836static void tx_only(struct xdpsock *xsk)
837{
838 int timeout, ret, nfds = 1;
839 struct pollfd fds[nfds + 1];
840 unsigned int idx = 0;
841
842 memset(fds, 0, sizeof(fds));
843 fds[0].fd = xsk->sfd;
844 fds[0].events = POLLOUT;
845 timeout = 1000; /* 1sn */
846
847 for (;;) {
848 if (opt_poll) {
849 ret = poll(fds, nfds, timeout);
850 if (ret <= 0)
851 continue;
852
853 if (fds[0].fd != xsk->sfd ||
854 !(fds[0].revents & POLLOUT))
855 continue;
856 }
857
858 if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
859 lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
860
861 xsk->outstanding_tx += BATCH_SIZE;
862 idx += BATCH_SIZE;
863 idx %= NUM_FRAMES;
864 }
865
866 complete_tx_only(xsk);
867 }
868}
869
870static void l2fwd(struct xdpsock *xsk)
871{
872 for (;;) {
873 struct xdp_desc descs[BATCH_SIZE];
874 unsigned int rcvd, i;
875 int ret;
876
877 for (;;) {
878 complete_tx_l2fwd(xsk);
879
880 rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
881 if (rcvd > 0)
882 break;
883 }
884
885 for (i = 0; i < rcvd; i++) {
Björn Töpela412ef52018-06-04 13:57:14 +0200886 char *pkt = xq_get_data(xsk, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200887
888 swap_mac_addresses(pkt);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200889
Björn Töpela412ef52018-06-04 13:57:14 +0200890 hex_dump(pkt, descs[i].len, descs[i].addr);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200891 }
892
893 xsk->rx_npkts += rcvd;
894
895 ret = xq_enq(&xsk->tx, descs, rcvd);
896 lassert(ret == 0);
897 xsk->outstanding_tx += rcvd;
898 }
899}
900
901int main(int argc, char **argv)
902{
903 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
Jakub Kicinski67481822018-07-26 14:32:21 -0700904 struct bpf_prog_load_attr prog_load_attr = {
905 .prog_type = BPF_PROG_TYPE_XDP,
906 };
907 int prog_fd, qidconf_map, xsks_map;
908 struct bpf_object *obj;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200909 char xdp_filename[256];
Jakub Kicinski67481822018-07-26 14:32:21 -0700910 struct bpf_map *map;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200911 int i, ret, key = 0;
912 pthread_t pt;
913
914 parse_command_line(argc, argv);
915
916 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
917 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
918 strerror(errno));
919 exit(EXIT_FAILURE);
920 }
921
922 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
Jakub Kicinski67481822018-07-26 14:32:21 -0700923 prog_load_attr.file = xdp_filename;
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200924
Jakub Kicinski67481822018-07-26 14:32:21 -0700925 if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
926 exit(EXIT_FAILURE);
927 if (prog_fd < 0) {
928 fprintf(stderr, "ERROR: no program found: %s\n",
929 strerror(prog_fd));
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200930 exit(EXIT_FAILURE);
931 }
932
Jakub Kicinski67481822018-07-26 14:32:21 -0700933 map = bpf_object__find_map_by_name(obj, "qidconf_map");
934 qidconf_map = bpf_map__fd(map);
935 if (qidconf_map < 0) {
936 fprintf(stderr, "ERROR: no qidconf map found: %s\n",
937 strerror(qidconf_map));
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200938 exit(EXIT_FAILURE);
939 }
940
Jakub Kicinski67481822018-07-26 14:32:21 -0700941 map = bpf_object__find_map_by_name(obj, "xsks_map");
942 xsks_map = bpf_map__fd(map);
943 if (xsks_map < 0) {
944 fprintf(stderr, "ERROR: no xsks map found: %s\n",
945 strerror(xsks_map));
946 exit(EXIT_FAILURE);
947 }
948
949 if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200950 fprintf(stderr, "ERROR: link set xdp fd failed\n");
951 exit(EXIT_FAILURE);
952 }
953
Jakub Kicinski67481822018-07-26 14:32:21 -0700954 ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200955 if (ret) {
956 fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
957 exit(EXIT_FAILURE);
958 }
959
960 /* Create sockets... */
961 xsks[num_socks++] = xsk_configure(NULL);
962
963#if RR_LB
964 for (i = 0; i < MAX_SOCKS - 1; i++)
965 xsks[num_socks++] = xsk_configure(xsks[0]->umem);
966#endif
967
968 /* ...and insert them into the map. */
969 for (i = 0; i < num_socks; i++) {
970 key = i;
Jakub Kicinski67481822018-07-26 14:32:21 -0700971 ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
Magnus Karlssonb4b8faa2018-05-02 13:01:36 +0200972 if (ret) {
973 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
974 exit(EXIT_FAILURE);
975 }
976 }
977
978 signal(SIGINT, int_exit);
979 signal(SIGTERM, int_exit);
980 signal(SIGABRT, int_exit);
981
982 setlocale(LC_ALL, "");
983
984 ret = pthread_create(&pt, NULL, poller, NULL);
985 lassert(ret == 0);
986
987 prev_time = get_nsecs();
988
989 if (opt_bench == BENCH_RXDROP)
990 rx_drop_all();
991 else if (opt_bench == BENCH_TXONLY)
992 tx_only(xsks[0]);
993 else
994 l2fwd(xsks[0]);
995
996 return 0;
997}