blob: 0da8e2c09426eceb34cdc7aeffcd15890124da57 [file] [log] [blame]
Brenden Blanco246b9422015-06-05 11:15:27 -07001/*
2 * Copyright (c) 2015 PLUMgrid, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Colin Ian Kinga12db192017-07-06 13:58:17 +010016#define _GNU_SOURCE
Brenden Blanco246b9422015-06-05 11:15:27 -070017
Brenden Blancocd5cb412015-04-26 09:41:58 -070018#include <arpa/inet.h>
Brenden Blancoa94bd932015-04-26 00:56:42 -070019#include <errno.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070020#include <fcntl.h>
Brenden Blanco3069caa2016-08-01 18:12:11 -070021#include <limits.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070022#include <linux/bpf.h>
Brenden Blancoa934c902017-05-31 08:44:22 -070023#include <linux/bpf_common.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070024#include <linux/if_packet.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070025#include <linux/perf_event.h>
Brenden Blancoa934c902017-05-31 08:44:22 -070026#include <linux/pkt_cls.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070027#include <linux/rtnetlink.h>
Brenden Blancoa934c902017-05-31 08:44:22 -070028#include <linux/sched.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070029#include <linux/unistd.h>
30#include <linux/version.h>
Brenden Blancoa94bd932015-04-26 00:56:42 -070031#include <net/ethernet.h>
32#include <net/if.h>
Brenden Blancofa073452017-05-30 17:35:53 -070033#include <sched.h>
Brenden Blancoa934c902017-05-31 08:44:22 -070034#include <stdbool.h>
Brenden Blancobb7200c2015-06-04 18:01:42 -070035#include <stdio.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070036#include <stdlib.h>
37#include <string.h>
38#include <sys/ioctl.h>
Brenden Blanco4b4bd272015-11-30 10:54:47 -080039#include <sys/resource.h>
Derek35c25012017-01-22 20:58:23 -080040#include <sys/stat.h>
41#include <sys/types.h>
Brenden Blancoa934c902017-05-31 08:44:22 -070042#include <unistd.h>
Brenden Blancocd5cb412015-04-26 09:41:58 -070043
Brenden Blancoa94bd932015-04-26 00:56:42 -070044#include "libbpf.h"
Brenden Blanco8207d102015-09-25 13:58:30 -070045#include "perf_reader.h"
Brenden Blancoa94bd932015-04-26 00:56:42 -070046
Brenden Blancof275d3d2015-07-06 23:41:23 -070047// TODO: remove these defines when linux-libc-dev exports them properly
48
49#ifndef __NR_bpf
Naveen N. Rao0006ad12016-04-29 16:42:58 +053050#if defined(__powerpc64__)
51#define __NR_bpf 361
Zvonko Kosic98121a32017-03-07 07:30:25 +010052#elif defined(__s390x__)
53#define __NR_bpf 351
Zhiyi Sun8e434b72016-12-06 16:21:37 +080054#elif defined(__aarch64__)
55#define __NR_bpf 280
Naveen N. Rao0006ad12016-04-29 16:42:58 +053056#else
Brenden Blancof275d3d2015-07-06 23:41:23 -070057#define __NR_bpf 321
58#endif
Naveen N. Rao0006ad12016-04-29 16:42:58 +053059#endif
Brenden Blancof275d3d2015-07-06 23:41:23 -070060
61#ifndef SO_ATTACH_BPF
62#define SO_ATTACH_BPF 50
63#endif
64
65#ifndef PERF_EVENT_IOC_SET_BPF
66#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
67#endif
68
69#ifndef PERF_FLAG_FD_CLOEXEC
70#define PERF_FLAG_FD_CLOEXEC (1UL << 3)
71#endif
72
Mark Drayton5f5687e2017-02-20 18:13:03 +000073static int probe_perf_reader_page_cnt = 8;
74
Brenden Blancofa073452017-05-30 17:35:53 -070075static uint64_t ptr_to_u64(void *ptr)
Brenden Blancoa94bd932015-04-26 00:56:42 -070076{
Brenden Blancofa073452017-05-30 17:35:53 -070077 return (uint64_t) (unsigned long) ptr;
Brenden Blancoa94bd932015-04-26 00:56:42 -070078}
79
Huapeng Zhoude11d072016-12-06 18:10:38 -080080int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, int map_flags)
Brenden Blancoa94bd932015-04-26 00:56:42 -070081{
Brenden Blancofdc027c2015-09-03 11:49:54 -070082 union bpf_attr attr;
83 memset(&attr, 0, sizeof(attr));
84 attr.map_type = map_type;
85 attr.key_size = key_size;
86 attr.value_size = value_size;
87 attr.max_entries = max_entries;
Huapeng Zhoude11d072016-12-06 18:10:38 -080088 attr.map_flags = map_flags;
Brenden Blancoa94bd932015-04-26 00:56:42 -070089
Brenden Blanco4b4bd272015-11-30 10:54:47 -080090 int ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
91 if (ret < 0 && errno == EPERM) {
92 // see note below about the rationale for this retry
93
94 struct rlimit rl = {};
95 if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) {
96 rl.rlim_max = RLIM_INFINITY;
97 rl.rlim_cur = rl.rlim_max;
98 if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0)
99 ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
100 }
101 }
102 return ret;
Brenden Blancoa94bd932015-04-26 00:56:42 -0700103}
104
105int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags)
106{
Brenden Blancofdc027c2015-09-03 11:49:54 -0700107 union bpf_attr attr;
108 memset(&attr, 0, sizeof(attr));
109 attr.map_fd = fd;
110 attr.key = ptr_to_u64(key);
111 attr.value = ptr_to_u64(value);
112 attr.flags = flags;
Brenden Blancoa94bd932015-04-26 00:56:42 -0700113
114 return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
115}
116
117int bpf_lookup_elem(int fd, void *key, void *value)
118{
Brenden Blancofdc027c2015-09-03 11:49:54 -0700119 union bpf_attr attr;
120 memset(&attr, 0, sizeof(attr));
121 attr.map_fd = fd;
122 attr.key = ptr_to_u64(key);
123 attr.value = ptr_to_u64(value);
Brenden Blancoa94bd932015-04-26 00:56:42 -0700124
125 return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
126}
127
128int bpf_delete_elem(int fd, void *key)
129{
Brenden Blancofdc027c2015-09-03 11:49:54 -0700130 union bpf_attr attr;
131 memset(&attr, 0, sizeof(attr));
132 attr.map_fd = fd;
133 attr.key = ptr_to_u64(key);
Brenden Blancoa94bd932015-04-26 00:56:42 -0700134
135 return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
136}
137
Teng Qindb7fab52017-05-16 01:10:15 -0700138int bpf_get_first_key(int fd, void *key, size_t key_size)
139{
140 union bpf_attr attr;
141 int i, res;
142
143 memset(&attr, 0, sizeof(attr));
144 attr.map_fd = fd;
145 attr.key = 0;
146 attr.next_key = ptr_to_u64(key);
147
148 // 4.12 and above kernel supports passing NULL to BPF_MAP_GET_NEXT_KEY
149 // to get first key of the map. For older kernels, the call will fail.
150 res = syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
151 if (res < 0 && errno == EFAULT) {
152 // Fall back to try to find a non-existing key.
153 static unsigned char try_values[3] = {0, 0xff, 0x55};
154 attr.key = ptr_to_u64(key);
155 for (i = 0; i < 3; i++) {
156 memset(key, try_values[i], key_size);
157 // We want to check the existence of the key but we don't know the size
158 // of map's value. So we pass an invalid pointer for value, expect
159 // the call to fail and check if the error is ENOENT indicating the
160 // key doesn't exist. If we use NULL for the invalid pointer, it might
161 // trigger a page fault in kernel and affect performence. Hence we use
162 // ~0 which will fail and return fast.
163 // This should fail since we pass an invalid pointer for value.
Teng Qin9190ef52017-05-20 22:46:00 -0700164 if (bpf_lookup_elem(fd, key, (void *)~0) >= 0)
Teng Qindb7fab52017-05-16 01:10:15 -0700165 return -1;
166 // This means the key doesn't exist.
167 if (errno == ENOENT)
168 return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
169 }
170 return -1;
171 } else {
172 return res;
173 }
174}
175
Brenden Blancoa94bd932015-04-26 00:56:42 -0700176int bpf_get_next_key(int fd, void *key, void *next_key)
177{
Brenden Blancofdc027c2015-09-03 11:49:54 -0700178 union bpf_attr attr;
179 memset(&attr, 0, sizeof(attr));
180 attr.map_fd = fd;
181 attr.key = ptr_to_u64(key);
182 attr.next_key = ptr_to_u64(next_key);
Brenden Blancoa94bd932015-04-26 00:56:42 -0700183
184 return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
185}
186
Brenden Blancofa073452017-05-30 17:35:53 -0700187static void bpf_print_hints(char *log)
Brendan Gregg34826372017-01-13 14:02:02 -0800188{
189 if (log == NULL)
190 return;
191
192 // The following error strings will need maintenance to match LLVM.
193
194 // stack busting
195 if (strstr(log, "invalid stack off=-") != NULL) {
196 fprintf(stderr, "HINT: Looks like you exceeded the BPF stack limit. "
197 "This can happen if you allocate too much local variable storage. "
198 "For example, if you allocated a 1 Kbyte struct (maybe for "
199 "BPF_PERF_OUTPUT), busting a max stack of 512 bytes.\n\n");
200 }
201
202 // didn't check NULL on map lookup
203 if (strstr(log, "invalid mem access 'map_value_or_null'") != NULL) {
204 fprintf(stderr, "HINT: The 'map_value_or_null' error can happen if "
205 "you dereference a pointer value from a map lookup without first "
206 "checking if that pointer is NULL.\n\n");
207 }
208
209 // lacking a bpf_probe_read
210 if (strstr(log, "invalid mem access 'inv'") != NULL) {
211 fprintf(stderr, "HINT: The invalid mem access 'inv' error can happen "
212 "if you try to dereference memory without first using "
213 "bpf_probe_read() to copy it to the BPF stack. Sometimes the "
214 "bpf_probe_read is automatic by the bcc rewriter, other times "
215 "you'll need to be explicit.\n\n");
216 }
217}
Brenden Blancoa94bd932015-04-26 00:56:42 -0700218#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
219
Brenden Blancoa94bd932015-04-26 00:56:42 -0700220int bpf_prog_load(enum bpf_prog_type prog_type,
Brenden Blancocd5cb412015-04-26 09:41:58 -0700221 const struct bpf_insn *insns, int prog_len,
Brenden Blanco759029f2015-07-29 15:47:51 -0700222 const char *license, unsigned kern_version,
223 char *log_buf, unsigned log_buf_size)
Brenden Blancoa94bd932015-04-26 00:56:42 -0700224{
Brenden Blancofdc027c2015-09-03 11:49:54 -0700225 union bpf_attr attr;
davidefdl2dece102016-09-12 12:00:37 -0700226 char *bpf_log_buffer = NULL;
227 unsigned buffer_size = 0;
228 int ret = 0;
229
Brenden Blancofdc027c2015-09-03 11:49:54 -0700230 memset(&attr, 0, sizeof(attr));
231 attr.prog_type = prog_type;
232 attr.insns = ptr_to_u64((void *) insns);
233 attr.insn_cnt = prog_len / sizeof(struct bpf_insn);
234 attr.license = ptr_to_u64((void *) license);
235 attr.log_buf = ptr_to_u64(log_buf);
236 attr.log_size = log_buf_size;
237 attr.log_level = log_buf ? 1 : 0;
Brenden Blancoa94bd932015-04-26 00:56:42 -0700238
Brenden Blanco7009b552015-05-26 11:48:17 -0700239 attr.kern_version = kern_version;
Brenden Blanco81a783a2015-08-24 23:42:42 -0700240 if (log_buf)
241 log_buf[0] = 0;
Brenden Blancoa94bd932015-04-26 00:56:42 -0700242
davidefdl2dece102016-09-12 12:00:37 -0700243 if (attr.insn_cnt > BPF_MAXINSNS) {
244 ret = -1;
245 errno = EINVAL;
246 fprintf(stderr,
247 "bpf: %s. Program too large (%d insns), at most %d insns\n\n",
248 strerror(errno), attr.insn_cnt, BPF_MAXINSNS);
249 return ret;
250 }
251
252 ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
Huapeng Zhoude11d072016-12-06 18:10:38 -0800253
Brenden Blanco4b4bd272015-11-30 10:54:47 -0800254 if (ret < 0 && errno == EPERM) {
255 // When EPERM is returned, two reasons are possible:
256 // 1. user has no permissions for bpf()
257 // 2. user has insufficent rlimit for locked memory
258 // Unfortunately, there is no api to inspect the current usage of locked
259 // mem for the user, so an accurate calculation of how much memory to lock
260 // for this new program is difficult to calculate. As a hack, bump the limit
261 // to unlimited. If program load fails again, return the error.
262
263 struct rlimit rl = {};
264 if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) {
265 rl.rlim_max = RLIM_INFINITY;
266 rl.rlim_cur = rl.rlim_max;
267 if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0)
268 ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
269 }
270 }
271
Brenden Blanco81a783a2015-08-24 23:42:42 -0700272 if (ret < 0 && !log_buf) {
davidefdl2dece102016-09-12 12:00:37 -0700273
274 buffer_size = LOG_BUF_SIZE;
Brenden Blanco81a783a2015-08-24 23:42:42 -0700275 // caller did not specify log_buf but failure should be printed,
davidefdl2dece102016-09-12 12:00:37 -0700276 // so repeat the syscall and print the result to stderr
277 for (;;) {
278 bpf_log_buffer = malloc(buffer_size);
279 if (!bpf_log_buffer) {
280 fprintf(stderr,
281 "bpf: buffer log memory allocation failed for error %s\n\n",
282 strerror(errno));
283 return ret;
284 }
Teng Qinba8cb302016-12-02 16:31:00 -0800285 bpf_log_buffer[0] = 0;
davidefdl2dece102016-09-12 12:00:37 -0700286
287 attr.log_buf = ptr_to_u64(bpf_log_buffer);
288 attr.log_size = buffer_size;
289 attr.log_level = bpf_log_buffer ? 1 : 0;
290
291 ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
292 if (ret < 0 && errno == ENOSPC) {
293 free(bpf_log_buffer);
294 bpf_log_buffer = NULL;
295 buffer_size <<= 1;
296 } else {
297 break;
298 }
299 }
300
301 fprintf(stderr, "bpf: %s\n%s\n", strerror(errno), bpf_log_buffer);
Brendan Gregg34826372017-01-13 14:02:02 -0800302 bpf_print_hints(bpf_log_buffer);
davidefdl2dece102016-09-12 12:00:37 -0700303
Huapeng Zhoude11d072016-12-06 18:10:38 -0800304 free(bpf_log_buffer);
Brenden Blancocd5cb412015-04-26 09:41:58 -0700305 }
306 return ret;
Brenden Blancoa94bd932015-04-26 00:56:42 -0700307}
308
309int bpf_open_raw_sock(const char *name)
310{
311 struct sockaddr_ll sll;
312 int sock;
313
314 sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
315 if (sock < 0) {
316 printf("cannot create raw socket\n");
317 return -1;
318 }
319
320 memset(&sll, 0, sizeof(sll));
321 sll.sll_family = AF_PACKET;
322 sll.sll_ifindex = if_nametoindex(name);
323 sll.sll_protocol = htons(ETH_P_ALL);
324 if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
325 printf("bind to %s: %s\n", name, strerror(errno));
326 close(sock);
327 return -1;
328 }
329
330 return sock;
331}
332
333int bpf_attach_socket(int sock, int prog) {
Brenden Blancoaf956732015-06-09 13:58:42 -0700334 return setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog, sizeof(prog));
Brenden Blancoa94bd932015-04-26 00:56:42 -0700335}
336
Brenden Blanco8207d102015-09-25 13:58:30 -0700337static int bpf_attach_tracing_event(int progfd, const char *event_path,
338 struct perf_reader *reader, int pid, int cpu, int group_fd) {
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700339 int efd, pfd;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800340 ssize_t bytes;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700341 char buf[256];
342 struct perf_event_attr attr = {};
343
344 snprintf(buf, sizeof(buf), "%s/id", event_path);
345 efd = open(buf, O_RDONLY, 0);
346 if (efd < 0) {
347 fprintf(stderr, "open(%s): %s\n", buf, strerror(errno));
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700348 return -1;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700349 }
350
351 bytes = read(efd, buf, sizeof(buf));
352 if (bytes <= 0 || bytes >= sizeof(buf)) {
353 fprintf(stderr, "read(%s): %s\n", buf, strerror(errno));
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700354 close(efd);
355 return -1;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700356 }
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700357 close(efd);
Brenden Blancocd5cb412015-04-26 09:41:58 -0700358 buf[bytes] = '\0';
359 attr.config = strtol(buf, NULL, 0);
360 attr.type = PERF_TYPE_TRACEPOINT;
Brenden Blanco8207d102015-09-25 13:58:30 -0700361 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700362 attr.sample_period = 1;
363 attr.wakeup_events = 1;
364 pfd = syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, PERF_FLAG_FD_CLOEXEC);
365 if (pfd < 0) {
Brenden Blanco74681952016-01-28 14:18:46 -0800366 fprintf(stderr, "perf_event_open(%s/id): %s\n", event_path, strerror(errno));
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700367 return -1;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700368 }
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800369 perf_reader_set_fd(reader, pfd);
Brenden Blanco8207d102015-09-25 13:58:30 -0700370
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800371 if (perf_reader_mmap(reader, attr.type, attr.sample_type) < 0)
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700372 return -1;
Brenden Blanco8207d102015-09-25 13:58:30 -0700373
Brenden Blancocd5cb412015-04-26 09:41:58 -0700374 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, progfd) < 0) {
375 perror("ioctl(PERF_EVENT_IOC_SET_BPF)");
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700376 return -1;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700377 }
378 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
379 perror("ioctl(PERF_EVENT_IOC_ENABLE)");
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700380 return -1;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700381 }
382
Brenden Blanco75982492015-11-06 10:43:05 -0800383 return 0;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700384}
385
Derek7174d932017-01-30 21:03:02 -0800386void * bpf_attach_kprobe(int progfd, enum bpf_probe_attach_type attach_type, const char *ev_name,
Dereke4da6c22017-01-28 16:11:28 -0800387 const char *fn_name,
388 pid_t pid, int cpu, int group_fd,
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500389 perf_reader_cb cb, void *cb_cookie)
Dereke4da6c22017-01-28 16:11:28 -0800390{
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700391 int kfd;
Derek7174d932017-01-30 21:03:02 -0800392 char buf[256];
393 char new_name[128];
Brenden Blanco8207d102015-09-25 13:58:30 -0700394 struct perf_reader *reader = NULL;
Dereke4da6c22017-01-28 16:11:28 -0800395 static char *event_type = "kprobe";
Derek7174d932017-01-30 21:03:02 -0800396 int n;
Brenden Blanco8207d102015-09-25 13:58:30 -0700397
Dereke4da6c22017-01-28 16:11:28 -0800398 snprintf(new_name, sizeof(new_name), "%s_bcc_%d", ev_name, getpid());
Teng Qin4b764de2017-04-03 22:10:46 -0700399 reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt);
Brenden Blanco8207d102015-09-25 13:58:30 -0700400 if (!reader)
Brenden Blanco75982492015-11-06 10:43:05 -0800401 goto error;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700402
Brenden Blanco68e2d142016-01-28 10:24:56 -0800403 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type);
404 kfd = open(buf, O_WRONLY | O_APPEND, 0);
Brenden Blancocd5cb412015-04-26 09:41:58 -0700405 if (kfd < 0) {
Brenden Blanco68e2d142016-01-28 10:24:56 -0800406 fprintf(stderr, "open(%s): %s\n", buf, strerror(errno));
Brenden Blanco75982492015-11-06 10:43:05 -0800407 goto error;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700408 }
409
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500410 snprintf(buf, sizeof(buf), "%c:%ss/%s %s", attach_type==BPF_PROBE_ENTRY ? 'p' : 'r',
Dereke4da6c22017-01-28 16:11:28 -0800411 event_type, new_name, fn_name);
412 if (write(kfd, buf, strlen(buf)) < 0) {
Brenden Blanco7e71aef2015-09-09 18:28:21 -0700413 if (errno == EINVAL)
414 fprintf(stderr, "check dmesg output for possible cause\n");
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700415 close(kfd);
Brenden Blanco75982492015-11-06 10:43:05 -0800416 goto error;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700417 }
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700418 close(kfd);
Brenden Blancocd5cb412015-04-26 09:41:58 -0700419
Derek35c25012017-01-22 20:58:23 -0800420 if (access("/sys/kernel/debug/tracing/instances", F_OK) != -1) {
Derek6c999582017-02-06 19:34:11 -0800421 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid());
422 if (access(buf, F_OK) == -1) {
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500423 if (mkdir(buf, 0755) == -1)
Derek6c999582017-02-06 19:34:11 -0800424 goto retry;
425 }
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500426 n = snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d/events/%ss/%s",
Derek6c999582017-02-06 19:34:11 -0800427 getpid(), event_type, new_name);
Derek7174d932017-01-30 21:03:02 -0800428 if (n < sizeof(buf) && bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) == 0)
Derek35c25012017-01-22 20:58:23 -0800429 goto out;
Derek6c999582017-02-06 19:34:11 -0800430 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid());
Derek35c25012017-01-22 20:58:23 -0800431 rmdir(buf);
432 }
433retry:
Dereke4da6c22017-01-28 16:11:28 -0800434 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s", event_type, new_name);
435 if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0)
436 goto error;
437out:
438 return reader;
439
440error:
441 perf_reader_free(reader);
442 return NULL;
443
444}
445
Brenden Blancofa073452017-05-30 17:35:53 -0700446static int enter_mount_ns(int pid) {
447 struct stat self_stat, target_stat;
448 int self_fd = -1, target_fd = -1;
449 char buf[64];
450
451 if (pid < 0)
452 return -1;
453
454 if ((size_t)snprintf(buf, sizeof(buf), "/proc/%d/ns/mnt", pid) >= sizeof(buf))
455 return -1;
456
457 self_fd = open("/proc/self/ns/mnt", O_RDONLY);
458 if (self_fd < 0) {
459 perror("open(/proc/self/ns/mnt)");
460 return -1;
461 }
462
463 target_fd = open(buf, O_RDONLY);
464 if (target_fd < 0) {
465 perror("open(/proc/<pid>/ns/mnt)");
466 goto error;
467 }
468
469 if (fstat(self_fd, &self_stat)) {
470 perror("fstat(self_fd)");
471 goto error;
472 }
473
474 if (fstat(target_fd, &target_stat)) {
475 perror("fstat(target_fd)");
476 goto error;
477 }
478
479 // both target and current ns are same, avoid setns and close all fds
480 if (self_stat.st_ino == target_stat.st_ino)
481 goto error;
482
483 if (setns(target_fd, CLONE_NEWNS)) {
484 perror("setns(target)");
485 goto error;
486 }
487
488 close(target_fd);
489 return self_fd;
490
491error:
492 if (self_fd >= 0)
493 close(self_fd);
494 if (target_fd >= 0)
495 close(target_fd);
496 return -1;
497}
498
499static void exit_mount_ns(int fd) {
500 if (fd < 0)
501 return;
502
503 if (setns(fd, CLONE_NEWNS))
504 perror("setns");
505}
506
Derek7174d932017-01-30 21:03:02 -0800507void * bpf_attach_uprobe(int progfd, enum bpf_probe_attach_type attach_type, const char *ev_name,
Dereke4da6c22017-01-28 16:11:28 -0800508 const char *binary_path, uint64_t offset,
509 pid_t pid, int cpu, int group_fd,
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500510 perf_reader_cb cb, void *cb_cookie)
Dereke4da6c22017-01-28 16:11:28 -0800511{
512 int kfd;
Derek7174d932017-01-30 21:03:02 -0800513 char buf[PATH_MAX];
514 char new_name[128];
Dereke4da6c22017-01-28 16:11:28 -0800515 struct perf_reader *reader = NULL;
516 static char *event_type = "uprobe";
Brenden Blancofa073452017-05-30 17:35:53 -0700517 int ns_fd = -1;
Derek7174d932017-01-30 21:03:02 -0800518 int n;
Dereke4da6c22017-01-28 16:11:28 -0800519
520 snprintf(new_name, sizeof(new_name), "%s_bcc_%d", ev_name, getpid());
Teng Qin4b764de2017-04-03 22:10:46 -0700521 reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt);
Dereke4da6c22017-01-28 16:11:28 -0800522 if (!reader)
523 goto error;
524
525 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type);
526 kfd = open(buf, O_WRONLY | O_APPEND, 0);
527 if (kfd < 0) {
528 fprintf(stderr, "open(%s): %s\n", buf, strerror(errno));
529 goto error;
530 }
531
Mauricio Vasquez Bd1324ac2017-05-17 20:26:47 -0500532 n = snprintf(buf, sizeof(buf), "%c:%ss/%s %s:0x%lx", attach_type==BPF_PROBE_ENTRY ? 'p' : 'r',
Dereke4da6c22017-01-28 16:11:28 -0800533 event_type, new_name, binary_path, offset);
Derek7174d932017-01-30 21:03:02 -0800534 if (n >= sizeof(buf)) {
535 close(kfd);
536 goto error;
537 }
kmjohansen4b87af02017-03-30 00:58:31 -0700538
Brenden Blancofa073452017-05-30 17:35:53 -0700539 ns_fd = enter_mount_ns(pid);
Dereke4da6c22017-01-28 16:11:28 -0800540 if (write(kfd, buf, strlen(buf)) < 0) {
541 if (errno == EINVAL)
542 fprintf(stderr, "check dmesg output for possible cause\n");
543 close(kfd);
544 goto error;
545 }
546 close(kfd);
Brenden Blancofa073452017-05-30 17:35:53 -0700547 exit_mount_ns(ns_fd);
548 ns_fd = -1;
Dereke4da6c22017-01-28 16:11:28 -0800549
550 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s", event_type, new_name);
Brenden Blanco75982492015-11-06 10:43:05 -0800551 if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0)
552 goto error;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700553
Brenden Blanco8207d102015-09-25 13:58:30 -0700554 return reader;
Brenden Blanco75982492015-11-06 10:43:05 -0800555
556error:
Brenden Blancofa073452017-05-30 17:35:53 -0700557 exit_mount_ns(ns_fd);
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700558 perf_reader_free(reader);
Brenden Blanco75982492015-11-06 10:43:05 -0800559 return NULL;
Brenden Blancocd5cb412015-04-26 09:41:58 -0700560}
561
Dereke4da6c22017-01-28 16:11:28 -0800562static int bpf_detach_probe(const char *ev_name, const char *event_type)
563{
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700564 int kfd;
Derek7174d932017-01-30 21:03:02 -0800565 char buf[256];
Brenden Blanco68e2d142016-01-28 10:24:56 -0800566 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type);
567 kfd = open(buf, O_WRONLY | O_APPEND, 0);
Brenden Blanco839dd272015-06-11 12:35:55 -0700568 if (kfd < 0) {
Brenden Blanco68e2d142016-01-28 10:24:56 -0800569 fprintf(stderr, "open(%s): %s\n", buf, strerror(errno));
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700570 return -1;
Brenden Blanco839dd272015-06-11 12:35:55 -0700571 }
572
Dereke4da6c22017-01-28 16:11:28 -0800573 snprintf(buf, sizeof(buf), "-:%ss/%s_bcc_%d", event_type, ev_name, getpid());
574 if (write(kfd, buf, strlen(buf)) < 0) {
Brenden Blanco68e2d142016-01-28 10:24:56 -0800575 fprintf(stderr, "write(%s): %s\n", buf, strerror(errno));
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700576 close(kfd);
577 return -1;
Brenden Blanco839dd272015-06-11 12:35:55 -0700578 }
Martin KaFai Laua8e66962016-05-19 23:04:41 -0700579 close(kfd);
Brenden Blanco839dd272015-06-11 12:35:55 -0700580
Brenden Blanco75982492015-11-06 10:43:05 -0800581 return 0;
Brenden Blanco839dd272015-06-11 12:35:55 -0700582}
583
Dereke4da6c22017-01-28 16:11:28 -0800584int bpf_detach_kprobe(const char *ev_name)
585{
Derek7174d932017-01-30 21:03:02 -0800586 char buf[256];
Dereke4da6c22017-01-28 16:11:28 -0800587 int ret = bpf_detach_probe(ev_name, "kprobe");
Derek6c999582017-02-06 19:34:11 -0800588 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid());
Dereke4da6c22017-01-28 16:11:28 -0800589 if (access(buf, F_OK) != -1) {
590 rmdir(buf);
591 }
592
593 return ret;
Brenden Blanco68e2d142016-01-28 10:24:56 -0800594}
595
Dereke4da6c22017-01-28 16:11:28 -0800596int bpf_detach_uprobe(const char *ev_name)
597{
598 return bpf_detach_probe(ev_name, "uprobe");
Brenden Blanco68e2d142016-01-28 10:24:56 -0800599}
600
Dereke4da6c22017-01-28 16:11:28 -0800601
Sasha Goldshtein1198c3c2016-06-30 06:26:28 -0700602void * bpf_attach_tracepoint(int progfd, const char *tp_category,
603 const char *tp_name, int pid, int cpu,
604 int group_fd, perf_reader_cb cb, void *cb_cookie) {
605 char buf[256];
606 struct perf_reader *reader = NULL;
607
Teng Qin4b764de2017-04-03 22:10:46 -0700608 reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt);
Sasha Goldshtein1198c3c2016-06-30 06:26:28 -0700609 if (!reader)
610 goto error;
611
612 snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%s/%s",
613 tp_category, tp_name);
614 if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0)
615 goto error;
616
617 return reader;
618
619error:
620 perf_reader_free(reader);
621 return NULL;
622}
623
624int bpf_detach_tracepoint(const char *tp_category, const char *tp_name) {
625 // Right now, there is nothing to do, but it's a good idea to encourage
626 // callers to detach anything they attach.
627 return 0;
628}
629
Teng Qin4b764de2017-04-03 22:10:46 -0700630void * bpf_open_perf_buffer(perf_reader_raw_cb raw_cb,
631 perf_reader_lost_cb lost_cb, void *cb_cookie,
632 int pid, int cpu, int page_cnt) {
Brenden Blanco75982492015-11-06 10:43:05 -0800633 int pfd;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800634 struct perf_event_attr attr = {};
Brenden Blanco75982492015-11-06 10:43:05 -0800635 struct perf_reader *reader = NULL;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800636
Teng Qin4b764de2017-04-03 22:10:46 -0700637 reader = perf_reader_new(NULL, raw_cb, lost_cb, cb_cookie, page_cnt);
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800638 if (!reader)
Brenden Blanco75982492015-11-06 10:43:05 -0800639 goto error;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800640
Brenden Blanco0dd24412016-02-17 00:26:14 -0800641 attr.config = 10;//PERF_COUNT_SW_BPF_OUTPUT;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800642 attr.type = PERF_TYPE_SOFTWARE;
643 attr.sample_type = PERF_SAMPLE_RAW;
Brenden Blanco75982492015-11-06 10:43:05 -0800644 attr.sample_period = 1;
645 attr.wakeup_events = 1;
646 pfd = syscall(__NR_perf_event_open, &attr, pid, cpu, -1, PERF_FLAG_FD_CLOEXEC);
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800647 if (pfd < 0) {
Brenden Blanco0dd24412016-02-17 00:26:14 -0800648 fprintf(stderr, "perf_event_open: %s\n", strerror(errno));
649 fprintf(stderr, " (check your kernel for PERF_COUNT_SW_BPF_OUTPUT support, 4.4 or newer)\n");
Brenden Blanco75982492015-11-06 10:43:05 -0800650 goto error;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800651 }
652 perf_reader_set_fd(reader, pfd);
653
654 if (perf_reader_mmap(reader, attr.type, attr.sample_type) < 0)
Brenden Blanco75982492015-11-06 10:43:05 -0800655 goto error;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800656
657 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
658 perror("ioctl(PERF_EVENT_IOC_ENABLE)");
Brenden Blanco75982492015-11-06 10:43:05 -0800659 goto error;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800660 }
661
662 return reader;
Brenden Blanco75982492015-11-06 10:43:05 -0800663
664error:
665 if (reader)
666 perf_reader_free(reader);
667
668 return NULL;
Brenden Blancod0daf6a2015-11-05 23:31:22 -0800669}
Jan Rüthe0724d72016-07-28 22:32:46 +0200670
Brenden Blancofa073452017-05-30 17:35:53 -0700671static int invalid_perf_config(uint32_t type, uint64_t config) {
Teng Qin98752212017-05-19 19:05:24 -0700672 switch (type) {
673 case PERF_TYPE_HARDWARE:
674 return config >= PERF_COUNT_HW_MAX;
675 case PERF_TYPE_SOFTWARE:
676 return config >= PERF_COUNT_SW_MAX;
677 case PERF_TYPE_RAW:
678 return 0;
679 default:
680 return 1;
681 }
682}
683
Brenden Blanco3069caa2016-08-01 18:12:11 -0700684int bpf_open_perf_event(uint32_t type, uint64_t config, int pid, int cpu) {
685 int fd;
686 struct perf_event_attr attr = {};
Jan Rüthe0724d72016-07-28 22:32:46 +0200687
Teng Qin98752212017-05-19 19:05:24 -0700688 if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_RAW) {
689 fprintf(stderr, "Unsupported perf event type\n");
690 return -1;
691 }
692 if (invalid_perf_config(type, config)) {
693 fprintf(stderr, "Invalid perf event config\n");
694 return -1;
695 }
696
Brenden Blanco3069caa2016-08-01 18:12:11 -0700697 attr.sample_period = LONG_MAX;
698 attr.type = type;
699 attr.config = config;
700
701 fd = syscall(__NR_perf_event_open, &attr, pid, cpu, -1, PERF_FLAG_FD_CLOEXEC);
702 if (fd < 0) {
703 fprintf(stderr, "perf_event_open: %s\n", strerror(errno));
704 return -1;
705 }
706
707 if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
708 perror("ioctl(PERF_EVENT_IOC_ENABLE)");
709 close(fd);
710 return -1;
711 }
712
713 return fd;
714}
Jan Rüthe0724d72016-07-28 22:32:46 +0200715
Andy Gospodarek9f3cab72017-05-17 16:18:45 -0400716int bpf_attach_xdp(const char *dev_name, int progfd, uint32_t flags) {
Jan Rüthe0724d72016-07-28 22:32:46 +0200717 struct sockaddr_nl sa;
718 int sock, seq = 0, len, ret = -1;
719 char buf[4096];
720 struct nlattr *nla, *nla_xdp;
721 struct {
722 struct nlmsghdr nh;
723 struct ifinfomsg ifinfo;
724 char attrbuf[64];
725 } req;
726 struct nlmsghdr *nh;
727 struct nlmsgerr *err;
Toshiaki Makitabb9b92a2017-07-31 20:20:55 +0900728 socklen_t addrlen;
Jan Rüthe0724d72016-07-28 22:32:46 +0200729
730 memset(&sa, 0, sizeof(sa));
731 sa.nl_family = AF_NETLINK;
732
733 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
734 if (sock < 0) {
735 fprintf(stderr, "bpf: opening a netlink socket: %s\n", strerror(errno));
736 return -1;
737 }
738
739 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
740 fprintf(stderr, "bpf: bind to netlink: %s\n", strerror(errno));
741 goto cleanup;
742 }
743
Toshiaki Makitabb9b92a2017-07-31 20:20:55 +0900744 addrlen = sizeof(sa);
745 if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
746 fprintf(stderr, "bpf: get sock name of netlink: %s\n", strerror(errno));
747 goto cleanup;
748 }
749
750 if (addrlen != sizeof(sa)) {
751 fprintf(stderr, "bpf: wrong netlink address length: %d\n", addrlen);
752 goto cleanup;
753 }
754
Jan Rüthe0724d72016-07-28 22:32:46 +0200755 memset(&req, 0, sizeof(req));
756 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
757 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
758 req.nh.nlmsg_type = RTM_SETLINK;
759 req.nh.nlmsg_pid = 0;
760 req.nh.nlmsg_seq = ++seq;
761 req.ifinfo.ifi_family = AF_UNSPEC;
762 req.ifinfo.ifi_index = if_nametoindex(dev_name);
763 if (req.ifinfo.ifi_index == 0) {
764 fprintf(stderr, "bpf: Resolving device name to index: %s\n", strerror(errno));
765 goto cleanup;
766 }
767
768 nla = (struct nlattr *)(((char *)&req)
769 + NLMSG_ALIGN(req.nh.nlmsg_len));
770 nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/;
771
772 nla_xdp = (struct nlattr *)((char *)nla + NLA_HDRLEN);
Andy Gospodarek9f3cab72017-05-17 16:18:45 -0400773 nla->nla_len = NLA_HDRLEN;
Jan Rüthe0724d72016-07-28 22:32:46 +0200774
775 // we specify the FD passed over by the user
776 nla_xdp->nla_type = 1/*IFLA_XDP_FD*/;
Arthur Gautierfbd91e22017-04-28 21:39:58 +0000777 nla_xdp->nla_len = NLA_HDRLEN + sizeof(progfd);
Jan Rüthe0724d72016-07-28 22:32:46 +0200778 memcpy((char *)nla_xdp + NLA_HDRLEN, &progfd, sizeof(progfd));
Andy Gospodarek9f3cab72017-05-17 16:18:45 -0400779 nla->nla_len += nla_xdp->nla_len;
780
781 // parse flags as passed by the user
782 if (flags) {
783 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
Gary Lindb8353b2017-08-18 18:10:43 +0800784 nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/;
Andy Gospodarek9f3cab72017-05-17 16:18:45 -0400785 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
786 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
787 nla->nla_len += nla_xdp->nla_len;
788 }
Jan Rüthe0724d72016-07-28 22:32:46 +0200789
790 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
791
792 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
793 fprintf(stderr, "bpf: send to netlink: %s\n", strerror(errno));
794 goto cleanup;
795 }
796
797 len = recv(sock, buf, sizeof(buf), 0);
798 if (len < 0) {
799 fprintf(stderr, "bpf: recv from netlink: %s\n", strerror(errno));
800 goto cleanup;
801 }
802
803 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
804 nh = NLMSG_NEXT(nh, len)) {
Toshiaki Makitabb9b92a2017-07-31 20:20:55 +0900805 if (nh->nlmsg_pid != sa.nl_pid) {
Toshiaki Makita890c76a2017-07-31 20:20:55 +0900806 fprintf(stderr, "bpf: Wrong pid %u, expected %u\n",
Toshiaki Makitabb9b92a2017-07-31 20:20:55 +0900807 nh->nlmsg_pid, sa.nl_pid);
Jan Rüthe0724d72016-07-28 22:32:46 +0200808 errno = EBADMSG;
809 goto cleanup;
810 }
811 if (nh->nlmsg_seq != seq) {
812 fprintf(stderr, "bpf: Wrong seq %d, expected %d\n",
813 nh->nlmsg_seq, seq);
814 errno = EBADMSG;
815 goto cleanup;
816 }
817 switch (nh->nlmsg_type) {
818 case NLMSG_ERROR:
819 err = (struct nlmsgerr *)NLMSG_DATA(nh);
820 if (!err->error)
821 continue;
822 fprintf(stderr, "bpf: nlmsg error %s\n", strerror(-err->error));
823 errno = -err->error;
824 goto cleanup;
825 case NLMSG_DONE:
826 break;
827 }
828 }
829
830 ret = 0;
831
832cleanup:
833 close(sock);
834 return ret;
835}
Teng Qin206b0202016-10-18 16:06:57 -0700836
837int bpf_attach_perf_event(int progfd, uint32_t ev_type, uint32_t ev_config,
838 uint64_t sample_period, uint64_t sample_freq,
839 pid_t pid, int cpu, int group_fd) {
840 if (ev_type != PERF_TYPE_HARDWARE && ev_type != PERF_TYPE_SOFTWARE) {
841 fprintf(stderr, "Unsupported perf event type\n");
842 return -1;
843 }
Teng Qin98752212017-05-19 19:05:24 -0700844 if (invalid_perf_config(ev_type, ev_config)) {
Teng Qin206b0202016-10-18 16:06:57 -0700845 fprintf(stderr, "Invalid perf event config\n");
846 return -1;
847 }
848 if (!((sample_period > 0) ^ (sample_freq > 0))) {
849 fprintf(
850 stderr, "Exactly one of sample_period / sample_freq should be set\n"
851 );
852 return -1;
853 }
854
855 struct perf_event_attr attr = {};
856 attr.type = ev_type;
857 attr.config = ev_config;
858 attr.inherit = 1;
859 if (sample_freq > 0) {
860 attr.freq = 1;
861 attr.sample_freq = sample_freq;
862 } else {
863 attr.sample_period = sample_period;
864 }
865
866 int fd = syscall(
867 __NR_perf_event_open, &attr, pid, cpu, group_fd, PERF_FLAG_FD_CLOEXEC
868 );
869 if (fd < 0) {
870 perror("perf_event_open failed");
871 return -1;
872 }
873 if (ioctl(fd, PERF_EVENT_IOC_SET_BPF, progfd) != 0) {
874 perror("ioctl(PERF_EVENT_IOC_SET_BPF) failed");
875 close(fd);
876 return -1;
877 }
878 if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0) != 0) {
879 perror("ioctl(PERF_EVENT_IOC_ENABLE) failed");
880 close(fd);
881 return -1;
882 }
883
884 return fd;
885}
886
Teng Qind6827332017-05-23 16:35:11 -0700887int bpf_close_perf_event_fd(int fd) {
888 int res, error = 0;
889 if (fd >= 0) {
890 res = ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
891 if (res != 0) {
892 perror("ioctl(PERF_EVENT_IOC_DISABLE) failed");
893 error = res;
894 }
895 res = close(fd);
896 if (res != 0) {
897 perror("close perf event FD failed");
898 error = (res && !error) ? res : error;
899 }
900 }
901 return error;
Teng Qin206b0202016-10-18 16:06:57 -0700902}
Huapeng Zhou37dcac02016-12-20 13:42:01 -0800903
904int bpf_obj_pin(int fd, const char *pathname)
905{
Brenden Blancofa073452017-05-30 17:35:53 -0700906 union bpf_attr attr;
907
908 memset(&attr, 0, sizeof(attr));
909 attr.pathname = ptr_to_u64((void *)pathname);
910 attr.bpf_fd = fd;
Huapeng Zhou37dcac02016-12-20 13:42:01 -0800911
912 return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr));
913}
914
915int bpf_obj_get(const char *pathname)
916{
Brenden Blancofa073452017-05-30 17:35:53 -0700917 union bpf_attr attr;
918
919 memset(&attr, 0, sizeof(attr));
920 attr.pathname = ptr_to_u64((void *)pathname);
Huapeng Zhou37dcac02016-12-20 13:42:01 -0800921
922 return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr));
923}