blob: 6ebfdee3e2c6d67f509708789a9cc0a3ad1e9e7d [file] [log] [blame]
Wang Nanba1fae42015-11-06 13:49:43 +00001#include <stdio.h>
2#include <sys/epoll.h>
3#include <util/bpf-loader.h>
4#include <util/evlist.h>
5#include "tests.h"
6#include "llvm.h"
7#include "debug.h"
8#define NR_ITERS 111
9
10#ifdef HAVE_LIBBPF_SUPPORT
11
12static int epoll_pwait_loop(void)
13{
14 int i;
15
16 /* Should fail NR_ITERS times */
17 for (i = 0; i < NR_ITERS; i++)
18 epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
19 return 0;
20}
21
22static struct {
23 enum test_llvm__testcase prog_id;
24 const char *desc;
25 const char *name;
26 const char *msg_compile_fail;
27 const char *msg_load_fail;
28 int (*target_func)(void);
29 int expect_result;
30} bpf_testcase_table[] = {
31 {
32 LLVM_TESTCASE_BASE,
33 "Test basic BPF filtering",
34 "[basic_bpf_test]",
35 "fix 'perf test LLVM' first",
36 "load bpf object failed",
37 &epoll_pwait_loop,
38 (NR_ITERS + 1) / 2,
39 },
40};
41
42static int do_test(struct bpf_object *obj, int (*func)(void),
43 int expect)
44{
45 struct record_opts opts = {
46 .target = {
47 .uid = UINT_MAX,
48 .uses_mmap = true,
49 },
50 .freq = 0,
51 .mmap_pages = 256,
52 .default_interval = 1,
53 };
54
55 char pid[16];
56 char sbuf[STRERR_BUFSIZE];
57 struct perf_evlist *evlist;
58 int i, ret = TEST_FAIL, err = 0, count = 0;
59
60 struct parse_events_evlist parse_evlist;
61 struct parse_events_error parse_error;
62
63 bzero(&parse_error, sizeof(parse_error));
64 bzero(&parse_evlist, sizeof(parse_evlist));
65 parse_evlist.error = &parse_error;
66 INIT_LIST_HEAD(&parse_evlist.list);
67
68 err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
69 if (err || list_empty(&parse_evlist.list)) {
70 pr_debug("Failed to add events selected by BPF\n");
71 if (!err)
72 return TEST_FAIL;
73 }
74
75 snprintf(pid, sizeof(pid), "%d", getpid());
76 pid[sizeof(pid) - 1] = '\0';
77 opts.target.tid = opts.target.pid = pid;
78
79 /* Instead of perf_evlist__new_default, don't add default events */
80 evlist = perf_evlist__new();
81 if (!evlist) {
82 pr_debug("No ehough memory to create evlist\n");
83 return TEST_FAIL;
84 }
85
86 err = perf_evlist__create_maps(evlist, &opts.target);
87 if (err < 0) {
88 pr_debug("Not enough memory to create thread/cpu maps\n");
89 goto out_delete_evlist;
90 }
91
92 perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
93 evlist->nr_groups = parse_evlist.nr_groups;
94
95 perf_evlist__config(evlist, &opts);
96
97 err = perf_evlist__open(evlist);
98 if (err < 0) {
99 pr_debug("perf_evlist__open: %s\n",
100 strerror_r(errno, sbuf, sizeof(sbuf)));
101 goto out_delete_evlist;
102 }
103
104 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
105 if (err < 0) {
106 pr_debug("perf_evlist__mmap: %s\n",
107 strerror_r(errno, sbuf, sizeof(sbuf)));
108 goto out_delete_evlist;
109 }
110
111 perf_evlist__enable(evlist);
112 (*func)();
113 perf_evlist__disable(evlist);
114
115 for (i = 0; i < evlist->nr_mmaps; i++) {
116 union perf_event *event;
117
118 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
119 const u32 type = event->header.type;
120
121 if (type == PERF_RECORD_SAMPLE)
122 count ++;
123 }
124 }
125
126 if (count != expect)
127 pr_debug("BPF filter result incorrect\n");
128
129 ret = TEST_OK;
130
131out_delete_evlist:
132 perf_evlist__delete(evlist);
133 return ret;
134}
135
136static struct bpf_object *
137prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
138{
139 struct bpf_object *obj;
140
141 obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
142 if (IS_ERR(obj)) {
143 pr_debug("Compile BPF program failed.\n");
144 return NULL;
145 }
146 return obj;
147}
148
Arnaldo Carvalho de Melo916d4092015-11-18 17:38:49 -0300149static int __test__bpf(int idx)
Wang Nanba1fae42015-11-06 13:49:43 +0000150{
151 int ret;
152 void *obj_buf;
153 size_t obj_buf_sz;
154 struct bpf_object *obj;
155
156 ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
Arnaldo Carvalho de Melo916d4092015-11-18 17:38:49 -0300157 bpf_testcase_table[idx].prog_id,
Wang Nanba1fae42015-11-06 13:49:43 +0000158 true);
159 if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
160 pr_debug("Unable to get BPF object, %s\n",
Arnaldo Carvalho de Melo916d4092015-11-18 17:38:49 -0300161 bpf_testcase_table[idx].msg_compile_fail);
162 if (idx == 0)
Wang Nanba1fae42015-11-06 13:49:43 +0000163 return TEST_SKIP;
164 else
165 return TEST_FAIL;
166 }
167
168 obj = prepare_bpf(obj_buf, obj_buf_sz,
Arnaldo Carvalho de Melo916d4092015-11-18 17:38:49 -0300169 bpf_testcase_table[idx].name);
Wang Nanba1fae42015-11-06 13:49:43 +0000170 if (!obj) {
171 ret = TEST_FAIL;
172 goto out;
173 }
174
175 ret = do_test(obj,
Arnaldo Carvalho de Melo916d4092015-11-18 17:38:49 -0300176 bpf_testcase_table[idx].target_func,
177 bpf_testcase_table[idx].expect_result);
Wang Nanba1fae42015-11-06 13:49:43 +0000178out:
179 bpf__clear();
180 return ret;
181}
182
183int test__bpf(void)
184{
185 unsigned int i;
186 int err;
187
188 if (geteuid() != 0) {
189 pr_debug("Only root can run BPF test\n");
190 return TEST_SKIP;
191 }
192
193 for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
194 err = __test__bpf(i);
195
196 if (err != TEST_OK)
197 return err;
198 }
199
200 return TEST_OK;
201}
202
203#else
204int test__bpf(void)
205{
206 pr_debug("Skip BPF test because BPF support is not compiled\n");
207 return TEST_SKIP;
208}
209#endif