blob: 1d7d2149163aa0a26eb629e4e6529be66b404a01 [file] [log] [blame]
Alexei Starovoitov68828042017-03-30 21:45:41 -07001/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <stdio.h>
8#include <unistd.h>
9#include <errno.h>
10#include <string.h>
11#include <assert.h>
12#include <stdlib.h>
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -070013#include <time.h>
Alexei Starovoitov68828042017-03-30 21:45:41 -070014
15#include <linux/types.h>
16typedef __u16 __sum16;
17#include <arpa/inet.h>
18#include <linux/if_ether.h>
19#include <linux/if_packet.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/tcp.h>
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -070023#include <linux/filter.h>
Yonghong Songd279f1f2017-12-11 11:39:03 -080024#include <linux/perf_event.h>
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -070025#include <linux/unistd.h>
Alexei Starovoitov68828042017-03-30 21:45:41 -070026
Yonghong Songd279f1f2017-12-11 11:39:03 -080027#include <sys/ioctl.h>
Alexei Starovoitov68828042017-03-30 21:45:41 -070028#include <sys/wait.h>
29#include <sys/resource.h>
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -070030#include <sys/types.h>
Martin KaFai Laufad07432017-06-08 22:30:16 -070031#include <fcntl.h>
Alexei Starovoitov68828042017-03-30 21:45:41 -070032
33#include <linux/bpf.h>
34#include <linux/err.h>
35#include <bpf/bpf.h>
36#include <bpf/libbpf.h>
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -070037#include "test_iptunnel_common.h"
Alexei Starovoitov37821612017-03-30 21:45:43 -070038#include "bpf_util.h"
David S. Millere06422c2017-05-01 12:58:21 -070039#include "bpf_endian.h"
Alexei Starovoitov68828042017-03-30 21:45:41 -070040
Alexei Starovoitov68828042017-03-30 21:45:41 -070041static int error_cnt, pass_cnt;
42
Alexei Starovoitov37821612017-03-30 21:45:43 -070043#define MAGIC_BYTES 123
44
Alexei Starovoitov68828042017-03-30 21:45:41 -070045/* ipv4 test vector */
46static struct {
47 struct ethhdr eth;
48 struct iphdr iph;
49 struct tcphdr tcp;
50} __packed pkt_v4 = {
Daniel Borkmann43bcf702017-04-27 01:39:34 +020051 .eth.h_proto = bpf_htons(ETH_P_IP),
Alexei Starovoitov68828042017-03-30 21:45:41 -070052 .iph.ihl = 5,
53 .iph.protocol = 6,
Daniel Borkmann43bcf702017-04-27 01:39:34 +020054 .iph.tot_len = bpf_htons(MAGIC_BYTES),
Alexei Starovoitov68828042017-03-30 21:45:41 -070055 .tcp.urg_ptr = 123,
56};
57
58/* ipv6 test vector */
59static struct {
60 struct ethhdr eth;
61 struct ipv6hdr iph;
62 struct tcphdr tcp;
63} __packed pkt_v6 = {
Daniel Borkmann43bcf702017-04-27 01:39:34 +020064 .eth.h_proto = bpf_htons(ETH_P_IPV6),
Alexei Starovoitov68828042017-03-30 21:45:41 -070065 .iph.nexthdr = 6,
Daniel Borkmann43bcf702017-04-27 01:39:34 +020066 .iph.payload_len = bpf_htons(MAGIC_BYTES),
Alexei Starovoitov68828042017-03-30 21:45:41 -070067 .tcp.urg_ptr = 123,
68};
69
70#define CHECK(condition, tag, format...) ({ \
71 int __ret = !!(condition); \
72 if (__ret) { \
73 error_cnt++; \
74 printf("%s:FAIL:%s ", __func__, tag); \
75 printf(format); \
76 } else { \
77 pass_cnt++; \
78 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
79 } \
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -070080 __ret; \
Alexei Starovoitov68828042017-03-30 21:45:41 -070081})
82
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -070083static int bpf_find_map(const char *test, struct bpf_object *obj,
84 const char *name)
85{
86 struct bpf_map *map;
87
88 map = bpf_object__find_map_by_name(obj, name);
89 if (!map) {
90 printf("%s:FAIL:map '%s' not found\n", test, name);
91 error_cnt++;
92 return -1;
93 }
94 return bpf_map__fd(map);
95}
96
Alexei Starovoitov68828042017-03-30 21:45:41 -070097static void test_pkt_access(void)
98{
99 const char *file = "./test_pkt_access.o";
100 struct bpf_object *obj;
101 __u32 duration, retval;
102 int err, prog_fd;
103
104 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
John Fastabend6f6d33f2017-08-15 22:34:22 -0700105 if (err) {
106 error_cnt++;
Alexei Starovoitov68828042017-03-30 21:45:41 -0700107 return;
John Fastabend6f6d33f2017-08-15 22:34:22 -0700108 }
Alexei Starovoitov68828042017-03-30 21:45:41 -0700109
110 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
111 NULL, NULL, &retval, &duration);
112 CHECK(err || errno || retval, "ipv4",
113 "err %d errno %d retval %d duration %d\n",
114 err, errno, retval, duration);
115
116 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
117 NULL, NULL, &retval, &duration);
118 CHECK(err || errno || retval, "ipv6",
119 "err %d errno %d retval %d duration %d\n",
120 err, errno, retval, duration);
121 bpf_object__close(obj);
122}
123
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -0700124static void test_xdp(void)
125{
126 struct vip key4 = {.protocol = 6, .family = AF_INET};
127 struct vip key6 = {.protocol = 6, .family = AF_INET6};
128 struct iptnl_info value4 = {.family = AF_INET};
129 struct iptnl_info value6 = {.family = AF_INET6};
130 const char *file = "./test_xdp.o";
131 struct bpf_object *obj;
132 char buf[128];
133 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
134 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
135 __u32 duration, retval, size;
136 int err, prog_fd, map_fd;
137
138 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
John Fastabend6f6d33f2017-08-15 22:34:22 -0700139 if (err) {
140 error_cnt++;
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -0700141 return;
John Fastabend6f6d33f2017-08-15 22:34:22 -0700142 }
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -0700143
144 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
145 if (map_fd < 0)
146 goto out;
147 bpf_map_update_elem(map_fd, &key4, &value4, 0);
148 bpf_map_update_elem(map_fd, &key6, &value6, 0);
149
150 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
151 buf, &size, &retval, &duration);
152
153 CHECK(err || errno || retval != XDP_TX || size != 74 ||
154 iph->protocol != IPPROTO_IPIP, "ipv4",
155 "err %d errno %d retval %d size %d\n",
156 err, errno, retval, size);
157
158 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
159 buf, &size, &retval, &duration);
160 CHECK(err || errno || retval != XDP_TX || size != 114 ||
161 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
162 "err %d errno %d retval %d size %d\n",
163 err, errno, retval, size);
164out:
165 bpf_object__close(obj);
166}
167
Alexei Starovoitov37821612017-03-30 21:45:43 -0700168#define MAGIC_VAL 0x1234
169#define NUM_ITER 100000
170#define VIP_NUM 5
171
172static void test_l4lb(void)
173{
174 unsigned int nr_cpus = bpf_num_possible_cpus();
175 const char *file = "./test_l4lb.o";
176 struct vip key = {.protocol = 6};
177 struct vip_meta {
178 __u32 flags;
179 __u32 vip_num;
180 } value = {.vip_num = VIP_NUM};
181 __u32 stats_key = VIP_NUM;
182 struct vip_stats {
183 __u64 bytes;
184 __u64 pkts;
185 } stats[nr_cpus];
186 struct real_definition {
187 union {
188 __be32 dst;
189 __be32 dstv6[4];
190 };
191 __u8 flags;
192 } real_def = {.dst = MAGIC_VAL};
193 __u32 ch_key = 11, real_num = 3;
194 __u32 duration, retval, size;
195 int err, i, prog_fd, map_fd;
196 __u64 bytes = 0, pkts = 0;
197 struct bpf_object *obj;
198 char buf[128];
199 u32 *magic = (u32 *)buf;
200
201 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
John Fastabend6f6d33f2017-08-15 22:34:22 -0700202 if (err) {
203 error_cnt++;
Alexei Starovoitov37821612017-03-30 21:45:43 -0700204 return;
John Fastabend6f6d33f2017-08-15 22:34:22 -0700205 }
Alexei Starovoitov37821612017-03-30 21:45:43 -0700206
207 map_fd = bpf_find_map(__func__, obj, "vip_map");
208 if (map_fd < 0)
209 goto out;
210 bpf_map_update_elem(map_fd, &key, &value, 0);
211
212 map_fd = bpf_find_map(__func__, obj, "ch_rings");
213 if (map_fd < 0)
214 goto out;
215 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
216
217 map_fd = bpf_find_map(__func__, obj, "reals");
218 if (map_fd < 0)
219 goto out;
220 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
221
222 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
223 buf, &size, &retval, &duration);
224 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
225 *magic != MAGIC_VAL, "ipv4",
226 "err %d errno %d retval %d size %d magic %x\n",
227 err, errno, retval, size, *magic);
228
229 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
230 buf, &size, &retval, &duration);
231 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
232 *magic != MAGIC_VAL, "ipv6",
233 "err %d errno %d retval %d size %d magic %x\n",
234 err, errno, retval, size, *magic);
235
236 map_fd = bpf_find_map(__func__, obj, "stats");
237 if (map_fd < 0)
238 goto out;
239 bpf_map_lookup_elem(map_fd, &stats_key, stats);
240 for (i = 0; i < nr_cpus; i++) {
241 bytes += stats[i].bytes;
242 pkts += stats[i].pkts;
243 }
244 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
245 error_cnt++;
246 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
247 }
248out:
249 bpf_object__close(obj);
250}
251
Yonghong Song6ead18f2017-05-02 19:58:14 -0700252static void test_tcp_estats(void)
253{
254 const char *file = "./test_tcp_estats.o";
255 int err, prog_fd;
256 struct bpf_object *obj;
257 __u32 duration = 0;
258
259 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
260 CHECK(err, "", "err %d errno %d\n", err, errno);
John Fastabend6f6d33f2017-08-15 22:34:22 -0700261 if (err) {
262 error_cnt++;
Yonghong Song6ead18f2017-05-02 19:58:14 -0700263 return;
John Fastabend6f6d33f2017-08-15 22:34:22 -0700264 }
Yonghong Song6ead18f2017-05-02 19:58:14 -0700265
266 bpf_object__close(obj);
267}
268
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700269static inline __u64 ptr_to_u64(const void *ptr)
270{
271 return (__u64) (unsigned long) ptr;
272}
273
274static void test_bpf_obj_id(void)
275{
276 const __u64 array_magic_value = 0xfaceb00c;
277 const __u32 array_key = 0;
278 const int nr_iters = 2;
279 const char *file = "./test_obj_id.o";
Martin KaFai Laufad07432017-06-08 22:30:16 -0700280 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700281 const char *expected_prog_name = "test_obj_id";
282 const char *expected_map_name = "test_map_id";
283 const __u64 nsec_per_sec = 1000000000;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700284
285 struct bpf_object *objs[nr_iters];
286 int prog_fds[nr_iters], map_fds[nr_iters];
287 /* +1 to test for the info_len returned by kernel */
288 struct bpf_prog_info prog_infos[nr_iters + 1];
289 struct bpf_map_info map_infos[nr_iters + 1];
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700290 /* Each prog only uses one map. +1 to test nr_map_ids
291 * returned by kernel.
292 */
293 __u32 map_ids[nr_iters + 1];
Jakub Kicinski7cadf2c2017-08-25 14:39:57 -0700294 char jited_insns[128], xlated_insns[128], zeros[128];
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700295 __u32 i, next_id, info_len, nr_id_found, duration = 0;
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700296 struct timespec real_time_ts, boot_time_ts;
Martin KaFai Laufad07432017-06-08 22:30:16 -0700297 int sysctl_fd, jit_enabled = 0, err = 0;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700298 __u64 array_value;
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700299 uid_t my_uid = getuid();
300 time_t now, load_time;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700301
Martin KaFai Laufad07432017-06-08 22:30:16 -0700302 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
303 if (sysctl_fd != -1) {
304 char tmpc;
305
306 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
307 jit_enabled = (tmpc != '0');
308 close(sysctl_fd);
309 }
310
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700311 err = bpf_prog_get_fd_by_id(0);
312 CHECK(err >= 0 || errno != ENOENT,
313 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
314
315 err = bpf_map_get_fd_by_id(0);
316 CHECK(err >= 0 || errno != ENOENT,
317 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
318
319 for (i = 0; i < nr_iters; i++)
320 objs[i] = NULL;
321
322 /* Check bpf_obj_get_info_by_fd() */
Jakub Kicinski7cadf2c2017-08-25 14:39:57 -0700323 bzero(zeros, sizeof(zeros));
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700324 for (i = 0; i < nr_iters; i++) {
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700325 now = time(NULL);
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700326 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
327 &objs[i], &prog_fds[i]);
328 /* test_obj_id.o is a dumb prog. It should never fail
329 * to load.
330 */
John Fastabend6f6d33f2017-08-15 22:34:22 -0700331 if (err)
332 error_cnt++;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700333 assert(!err);
334
Martin KaFai Lau6e525d02017-09-27 14:37:55 -0700335 /* Insert a magic value to the map */
336 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
337 assert(map_fds[i] >= 0);
338 err = bpf_map_update_elem(map_fds[i], &array_key,
339 &array_magic_value, 0);
340 assert(!err);
341
342 /* Check getting map info */
343 info_len = sizeof(struct bpf_map_info) * 2;
344 bzero(&map_infos[i], info_len);
345 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
346 &info_len);
347 if (CHECK(err ||
348 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
349 map_infos[i].key_size != sizeof(__u32) ||
350 map_infos[i].value_size != sizeof(__u64) ||
351 map_infos[i].max_entries != 1 ||
352 map_infos[i].map_flags != 0 ||
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700353 info_len != sizeof(struct bpf_map_info) ||
354 strcmp((char *)map_infos[i].name, expected_map_name),
Martin KaFai Lau6e525d02017-09-27 14:37:55 -0700355 "get-map-info(fd)",
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700356 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
Martin KaFai Lau6e525d02017-09-27 14:37:55 -0700357 err, errno,
358 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
359 info_len, sizeof(struct bpf_map_info),
360 map_infos[i].key_size,
361 map_infos[i].value_size,
362 map_infos[i].max_entries,
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700363 map_infos[i].map_flags,
364 map_infos[i].name, expected_map_name))
Martin KaFai Lau6e525d02017-09-27 14:37:55 -0700365 goto done;
366
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700367 /* Check getting prog info */
368 info_len = sizeof(struct bpf_prog_info) * 2;
Jakub Kicinskid777b2d2017-07-25 15:16:12 -0700369 bzero(&prog_infos[i], info_len);
Jakub Kicinski7cadf2c2017-08-25 14:39:57 -0700370 bzero(jited_insns, sizeof(jited_insns));
371 bzero(xlated_insns, sizeof(xlated_insns));
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700372 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
373 prog_infos[i].jited_prog_len = sizeof(jited_insns);
374 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
375 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700376 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
377 prog_infos[i].nr_map_ids = 2;
378 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
379 assert(!err);
380 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
381 assert(!err);
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700382 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
383 &info_len);
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700384 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
385 + (prog_infos[i].load_time / nsec_per_sec);
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700386 if (CHECK(err ||
387 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
388 info_len != sizeof(struct bpf_prog_info) ||
Martin KaFai Laufad07432017-06-08 22:30:16 -0700389 (jit_enabled && !prog_infos[i].jited_prog_len) ||
Jakub Kicinski7cadf2c2017-08-25 14:39:57 -0700390 (jit_enabled &&
391 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
392 !prog_infos[i].xlated_prog_len ||
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700393 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
394 load_time < now - 60 || load_time > now + 60 ||
395 prog_infos[i].created_by_uid != my_uid ||
396 prog_infos[i].nr_map_ids != 1 ||
397 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
398 strcmp((char *)prog_infos[i].name, expected_prog_name),
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700399 "get-prog-info(fd)",
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700400 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700401 err, errno, i,
402 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
403 info_len, sizeof(struct bpf_prog_info),
Martin KaFai Laufad07432017-06-08 22:30:16 -0700404 jit_enabled,
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700405 prog_infos[i].jited_prog_len,
Jakub Kicinski7cadf2c2017-08-25 14:39:57 -0700406 prog_infos[i].xlated_prog_len,
407 !!memcmp(jited_insns, zeros, sizeof(zeros)),
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700408 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
409 load_time, now,
410 prog_infos[i].created_by_uid, my_uid,
411 prog_infos[i].nr_map_ids, 1,
412 *(int *)prog_infos[i].map_ids, map_infos[i].id,
413 prog_infos[i].name, expected_prog_name))
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700414 goto done;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700415 }
416
417 /* Check bpf_prog_get_next_id() */
418 nr_id_found = 0;
419 next_id = 0;
420 while (!bpf_prog_get_next_id(next_id, &next_id)) {
Jakub Kicinskid777b2d2017-07-25 15:16:12 -0700421 struct bpf_prog_info prog_info = {};
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700422 __u32 saved_map_id;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700423 int prog_fd;
424
425 info_len = sizeof(prog_info);
426
427 prog_fd = bpf_prog_get_fd_by_id(next_id);
428 if (prog_fd < 0 && errno == ENOENT)
429 /* The bpf_prog is in the dead row */
430 continue;
431 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
432 "prog_fd %d next_id %d errno %d\n",
433 prog_fd, next_id, errno))
434 break;
435
436 for (i = 0; i < nr_iters; i++)
437 if (prog_infos[i].id == next_id)
438 break;
439
440 if (i == nr_iters)
441 continue;
442
443 nr_id_found++;
444
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700445 /* Negative test:
446 * prog_info.nr_map_ids = 1
447 * prog_info.map_ids = NULL
448 */
449 prog_info.nr_map_ids = 1;
450 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
451 if (CHECK(!err || errno != EFAULT,
452 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
453 err, errno, EFAULT))
454 break;
455 bzero(&prog_info, sizeof(prog_info));
456 info_len = sizeof(prog_info);
457
458 saved_map_id = *(int *)(prog_infos[i].map_ids);
459 prog_info.map_ids = prog_infos[i].map_ids;
460 prog_info.nr_map_ids = 2;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700461 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
Jakub Kicinskid777b2d2017-07-25 15:16:12 -0700462 prog_infos[i].jited_prog_insns = 0;
463 prog_infos[i].xlated_prog_insns = 0;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700464 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700465 memcmp(&prog_info, &prog_infos[i], info_len) ||
466 *(int *)prog_info.map_ids != saved_map_id,
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700467 "get-prog-info(next_id->fd)",
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700468 "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700469 err, errno, info_len, sizeof(struct bpf_prog_info),
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700470 memcmp(&prog_info, &prog_infos[i], info_len),
471 *(int *)prog_info.map_ids, saved_map_id);
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700472 close(prog_fd);
473 }
474 CHECK(nr_id_found != nr_iters,
475 "check total prog id found by get_next_id",
476 "nr_id_found %u(%u)\n",
477 nr_id_found, nr_iters);
478
479 /* Check bpf_map_get_next_id() */
480 nr_id_found = 0;
481 next_id = 0;
482 while (!bpf_map_get_next_id(next_id, &next_id)) {
Jakub Kicinskid777b2d2017-07-25 15:16:12 -0700483 struct bpf_map_info map_info = {};
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700484 int map_fd;
485
486 info_len = sizeof(map_info);
487
488 map_fd = bpf_map_get_fd_by_id(next_id);
489 if (map_fd < 0 && errno == ENOENT)
490 /* The bpf_map is in the dead row */
491 continue;
492 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
493 "map_fd %d next_id %u errno %d\n",
494 map_fd, next_id, errno))
495 break;
496
497 for (i = 0; i < nr_iters; i++)
498 if (map_infos[i].id == next_id)
499 break;
500
501 if (i == nr_iters)
502 continue;
503
504 nr_id_found++;
505
506 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
507 assert(!err);
508
509 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
510 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
511 memcmp(&map_info, &map_infos[i], info_len) ||
512 array_value != array_magic_value,
513 "check get-map-info(next_id->fd)",
514 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
515 err, errno, info_len, sizeof(struct bpf_map_info),
516 memcmp(&map_info, &map_infos[i], info_len),
517 array_value, array_magic_value);
518
519 close(map_fd);
520 }
521 CHECK(nr_id_found != nr_iters,
522 "check total map id found by get_next_id",
523 "nr_id_found %u(%u)\n",
524 nr_id_found, nr_iters);
525
526done:
527 for (i = 0; i < nr_iters; i++)
528 bpf_object__close(objs[i]);
529}
530
Yonghong Song18f3d6b2017-06-13 15:52:14 -0700531static void test_pkt_md_access(void)
532{
533 const char *file = "./test_pkt_md_access.o";
534 struct bpf_object *obj;
535 __u32 duration, retval;
536 int err, prog_fd;
537
538 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
John Fastabend6f6d33f2017-08-15 22:34:22 -0700539 if (err) {
540 error_cnt++;
Yonghong Song18f3d6b2017-06-13 15:52:14 -0700541 return;
John Fastabend6f6d33f2017-08-15 22:34:22 -0700542 }
Yonghong Song18f3d6b2017-06-13 15:52:14 -0700543
544 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
545 NULL, NULL, &retval, &duration);
546 CHECK(err || retval, "",
547 "err %d errno %d retval %d duration %d\n",
548 err, errno, retval, duration);
549
550 bpf_object__close(obj);
551}
552
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700553static void test_obj_name(void)
554{
555 struct {
556 const char *name;
557 int success;
558 int expected_errno;
559 } tests[] = {
560 { "", 1, 0 },
561 { "_123456789ABCDE", 1, 0 },
562 { "_123456789ABCDEF", 0, EINVAL },
563 { "_123456789ABCD\n", 0, EINVAL },
564 };
565 struct bpf_insn prog[] = {
566 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
567 BPF_EXIT_INSN(),
568 };
569 __u32 duration = 0;
570 int i;
571
572 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
573 size_t name_len = strlen(tests[i].name) + 1;
574 union bpf_attr attr;
575 size_t ncopy;
576 int fd;
577
578 /* test different attr.prog_name during BPF_PROG_LOAD */
579 ncopy = name_len < sizeof(attr.prog_name) ?
580 name_len : sizeof(attr.prog_name);
581 bzero(&attr, sizeof(attr));
582 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
583 attr.insn_cnt = 2;
584 attr.insns = ptr_to_u64(prog);
585 attr.license = ptr_to_u64("");
586 memcpy(attr.prog_name, tests[i].name, ncopy);
587
588 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
589 CHECK((tests[i].success && fd < 0) ||
590 (!tests[i].success && fd != -1) ||
591 (!tests[i].success && errno != tests[i].expected_errno),
592 "check-bpf-prog-name",
593 "fd %d(%d) errno %d(%d)\n",
594 fd, tests[i].success, errno, tests[i].expected_errno);
595
596 if (fd != -1)
597 close(fd);
598
599 /* test different attr.map_name during BPF_MAP_CREATE */
600 ncopy = name_len < sizeof(attr.map_name) ?
601 name_len : sizeof(attr.map_name);
602 bzero(&attr, sizeof(attr));
603 attr.map_type = BPF_MAP_TYPE_ARRAY;
604 attr.key_size = 4;
605 attr.value_size = 4;
606 attr.max_entries = 1;
607 attr.map_flags = 0;
608 memcpy(attr.map_name, tests[i].name, ncopy);
609 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
610 CHECK((tests[i].success && fd < 0) ||
611 (!tests[i].success && fd != -1) ||
612 (!tests[i].success && errno != tests[i].expected_errno),
613 "check-bpf-map-name",
614 "fd %d(%d) errno %d(%d)\n",
615 fd, tests[i].success, errno, tests[i].expected_errno);
616
617 if (fd != -1)
618 close(fd);
619 }
620}
621
Yonghong Songd279f1f2017-12-11 11:39:03 -0800622static void test_tp_attach_query(void)
623{
624 const int num_progs = 3;
625 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
626 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
627 const char *file = "./test_tracepoint.o";
628 struct perf_event_query_bpf *query;
629 struct perf_event_attr attr = {};
630 struct bpf_object *obj[num_progs];
631 struct bpf_prog_info prog_info;
632 char buf[256];
633
634 snprintf(buf, sizeof(buf),
635 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
636 efd = open(buf, O_RDONLY, 0);
637 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
638 return;
639 bytes = read(efd, buf, sizeof(buf));
640 close(efd);
641 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
642 "read", "bytes %d errno %d\n", bytes, errno))
643 return;
644
645 attr.config = strtol(buf, NULL, 0);
646 attr.type = PERF_TYPE_TRACEPOINT;
647 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
648 attr.sample_period = 1;
649 attr.wakeup_events = 1;
650
651 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
652 for (i = 0; i < num_progs; i++) {
653 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
654 &prog_fd[i]);
655 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
656 goto cleanup1;
657
658 bzero(&prog_info, sizeof(prog_info));
659 prog_info.jited_prog_len = 0;
660 prog_info.xlated_prog_len = 0;
661 prog_info.nr_map_ids = 0;
662 info_len = sizeof(prog_info);
663 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
664 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
665 err, errno))
666 goto cleanup1;
667 saved_prog_ids[i] = prog_info.id;
668
669 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
670 0 /* cpu 0 */, -1 /* group id */,
671 0 /* flags */);
672 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
673 pmu_fd[i], errno))
674 goto cleanup2;
675 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
676 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
677 err, errno))
678 goto cleanup3;
679
680 if (i == 0) {
681 /* check NULL prog array query */
682 query->ids_len = num_progs;
683 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
684 if (CHECK(err || query->prog_cnt != 0,
685 "perf_event_ioc_query_bpf",
686 "err %d errno %d query->prog_cnt %u\n",
687 err, errno, query->prog_cnt))
688 goto cleanup3;
689 }
690
691 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
692 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
693 err, errno))
694 goto cleanup3;
695
696 if (i == 1) {
697 /* try to get # of programs only */
698 query->ids_len = 0;
699 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
700 if (CHECK(err || query->prog_cnt != 2,
701 "perf_event_ioc_query_bpf",
702 "err %d errno %d query->prog_cnt %u\n",
703 err, errno, query->prog_cnt))
704 goto cleanup3;
705
706 /* try a few negative tests */
707 /* invalid query pointer */
708 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
709 (struct perf_event_query_bpf *)0x1);
710 if (CHECK(!err || errno != EFAULT,
711 "perf_event_ioc_query_bpf",
712 "err %d errno %d\n", err, errno))
713 goto cleanup3;
714
715 /* no enough space */
716 query->ids_len = 1;
717 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
718 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
719 "perf_event_ioc_query_bpf",
720 "err %d errno %d query->prog_cnt %u\n",
721 err, errno, query->prog_cnt))
722 goto cleanup3;
723 }
724
725 query->ids_len = num_progs;
726 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
727 if (CHECK(err || query->prog_cnt != (i + 1),
728 "perf_event_ioc_query_bpf",
729 "err %d errno %d query->prog_cnt %u\n",
730 err, errno, query->prog_cnt))
731 goto cleanup3;
732 for (j = 0; j < i + 1; j++)
733 if (CHECK(saved_prog_ids[j] != query->ids[j],
734 "perf_event_ioc_query_bpf",
735 "#%d saved_prog_id %x query prog_id %x\n",
736 j, saved_prog_ids[j], query->ids[j]))
737 goto cleanup3;
738 }
739
740 i = num_progs - 1;
741 for (; i >= 0; i--) {
742 cleanup3:
743 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
744 cleanup2:
745 close(pmu_fd[i]);
746 cleanup1:
747 bpf_object__close(obj[i]);
748 }
749 free(query);
750}
751
Alexei Starovoitov68828042017-03-30 21:45:41 -0700752int main(void)
753{
754 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
755
756 setrlimit(RLIMIT_MEMLOCK, &rinf);
757
758 test_pkt_access();
Alexei Starovoitov8d48f5e2017-03-30 21:45:42 -0700759 test_xdp();
Alexei Starovoitov37821612017-03-30 21:45:43 -0700760 test_l4lb();
Yonghong Song6ead18f2017-05-02 19:58:14 -0700761 test_tcp_estats();
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700762 test_bpf_obj_id();
Yonghong Song18f3d6b2017-06-13 15:52:14 -0700763 test_pkt_md_access();
Martin KaFai Lau3a8ad562017-09-27 14:37:56 -0700764 test_obj_name();
Yonghong Songd279f1f2017-12-11 11:39:03 -0800765 test_tp_attach_query();
Alexei Starovoitov68828042017-03-30 21:45:41 -0700766
767 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +0200768 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
Alexei Starovoitov68828042017-03-30 21:45:41 -0700769}