Brenden Blanco | 246b942 | 2015-06-05 11:15:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 PLUMgrid, Inc. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Colin Ian King | a12db19 | 2017-07-06 13:58:17 +0100 | [diff] [blame] | 16 | #define _GNU_SOURCE |
Brenden Blanco | 246b942 | 2015-06-05 11:15:27 -0700 | [diff] [blame] | 17 | |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 18 | #include <arpa/inet.h> |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 19 | #include <errno.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 20 | #include <fcntl.h> |
Brenden Blanco | 3069caa | 2016-08-01 18:12:11 -0700 | [diff] [blame] | 21 | #include <limits.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 22 | #include <linux/bpf.h> |
Brenden Blanco | a934c90 | 2017-05-31 08:44:22 -0700 | [diff] [blame] | 23 | #include <linux/bpf_common.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 24 | #include <linux/if_packet.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 25 | #include <linux/perf_event.h> |
Brenden Blanco | a934c90 | 2017-05-31 08:44:22 -0700 | [diff] [blame] | 26 | #include <linux/pkt_cls.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 27 | #include <linux/rtnetlink.h> |
Brenden Blanco | a934c90 | 2017-05-31 08:44:22 -0700 | [diff] [blame] | 28 | #include <linux/sched.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 29 | #include <linux/unistd.h> |
| 30 | #include <linux/version.h> |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 31 | #include <net/ethernet.h> |
| 32 | #include <net/if.h> |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 33 | #include <sched.h> |
Brenden Blanco | a934c90 | 2017-05-31 08:44:22 -0700 | [diff] [blame] | 34 | #include <stdbool.h> |
Brenden Blanco | bb7200c | 2015-06-04 18:01:42 -0700 | [diff] [blame] | 35 | #include <stdio.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 36 | #include <stdlib.h> |
| 37 | #include <string.h> |
| 38 | #include <sys/ioctl.h> |
Brenden Blanco | 4b4bd27 | 2015-11-30 10:54:47 -0800 | [diff] [blame] | 39 | #include <sys/resource.h> |
Derek | 35c2501 | 2017-01-22 20:58:23 -0800 | [diff] [blame] | 40 | #include <sys/stat.h> |
| 41 | #include <sys/types.h> |
Brenden Blanco | a934c90 | 2017-05-31 08:44:22 -0700 | [diff] [blame] | 42 | #include <unistd.h> |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 43 | |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 44 | #include "libbpf.h" |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 45 | #include "perf_reader.h" |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 46 | |
Brenden Blanco | f275d3d | 2015-07-06 23:41:23 -0700 | [diff] [blame] | 47 | // TODO: remove these defines when linux-libc-dev exports them properly |
| 48 | |
| 49 | #ifndef __NR_bpf |
Naveen N. Rao | 0006ad1 | 2016-04-29 16:42:58 +0530 | [diff] [blame] | 50 | #if defined(__powerpc64__) |
| 51 | #define __NR_bpf 361 |
Zvonko Kosic | 98121a3 | 2017-03-07 07:30:25 +0100 | [diff] [blame] | 52 | #elif defined(__s390x__) |
| 53 | #define __NR_bpf 351 |
Zhiyi Sun | 8e434b7 | 2016-12-06 16:21:37 +0800 | [diff] [blame] | 54 | #elif defined(__aarch64__) |
| 55 | #define __NR_bpf 280 |
Naveen N. Rao | 0006ad1 | 2016-04-29 16:42:58 +0530 | [diff] [blame] | 56 | #else |
Brenden Blanco | f275d3d | 2015-07-06 23:41:23 -0700 | [diff] [blame] | 57 | #define __NR_bpf 321 |
| 58 | #endif |
Naveen N. Rao | 0006ad1 | 2016-04-29 16:42:58 +0530 | [diff] [blame] | 59 | #endif |
Brenden Blanco | f275d3d | 2015-07-06 23:41:23 -0700 | [diff] [blame] | 60 | |
| 61 | #ifndef SO_ATTACH_BPF |
| 62 | #define SO_ATTACH_BPF 50 |
| 63 | #endif |
| 64 | |
| 65 | #ifndef PERF_EVENT_IOC_SET_BPF |
| 66 | #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) |
| 67 | #endif |
| 68 | |
| 69 | #ifndef PERF_FLAG_FD_CLOEXEC |
| 70 | #define PERF_FLAG_FD_CLOEXEC (1UL << 3) |
| 71 | #endif |
| 72 | |
Mark Drayton | 5f5687e | 2017-02-20 18:13:03 +0000 | [diff] [blame] | 73 | static int probe_perf_reader_page_cnt = 8; |
| 74 | |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 75 | static uint64_t ptr_to_u64(void *ptr) |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 76 | { |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 77 | return (uint64_t) (unsigned long) ptr; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 78 | } |
| 79 | |
Huapeng Zhou | de11d07 | 2016-12-06 18:10:38 -0800 | [diff] [blame] | 80 | int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, int map_flags) |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 81 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 82 | union bpf_attr attr; |
| 83 | memset(&attr, 0, sizeof(attr)); |
| 84 | attr.map_type = map_type; |
| 85 | attr.key_size = key_size; |
| 86 | attr.value_size = value_size; |
| 87 | attr.max_entries = max_entries; |
Huapeng Zhou | de11d07 | 2016-12-06 18:10:38 -0800 | [diff] [blame] | 88 | attr.map_flags = map_flags; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 89 | |
Brenden Blanco | 4b4bd27 | 2015-11-30 10:54:47 -0800 | [diff] [blame] | 90 | int ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); |
| 91 | if (ret < 0 && errno == EPERM) { |
| 92 | // see note below about the rationale for this retry |
| 93 | |
| 94 | struct rlimit rl = {}; |
| 95 | if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) { |
| 96 | rl.rlim_max = RLIM_INFINITY; |
| 97 | rl.rlim_cur = rl.rlim_max; |
| 98 | if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0) |
| 99 | ret = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); |
| 100 | } |
| 101 | } |
| 102 | return ret; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags) |
| 106 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 107 | union bpf_attr attr; |
| 108 | memset(&attr, 0, sizeof(attr)); |
| 109 | attr.map_fd = fd; |
| 110 | attr.key = ptr_to_u64(key); |
| 111 | attr.value = ptr_to_u64(value); |
| 112 | attr.flags = flags; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 113 | |
| 114 | return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); |
| 115 | } |
| 116 | |
| 117 | int bpf_lookup_elem(int fd, void *key, void *value) |
| 118 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 119 | union bpf_attr attr; |
| 120 | memset(&attr, 0, sizeof(attr)); |
| 121 | attr.map_fd = fd; |
| 122 | attr.key = ptr_to_u64(key); |
| 123 | attr.value = ptr_to_u64(value); |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 124 | |
| 125 | return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); |
| 126 | } |
| 127 | |
| 128 | int bpf_delete_elem(int fd, void *key) |
| 129 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 130 | union bpf_attr attr; |
| 131 | memset(&attr, 0, sizeof(attr)); |
| 132 | attr.map_fd = fd; |
| 133 | attr.key = ptr_to_u64(key); |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 134 | |
| 135 | return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); |
| 136 | } |
| 137 | |
Teng Qin | db7fab5 | 2017-05-16 01:10:15 -0700 | [diff] [blame] | 138 | int bpf_get_first_key(int fd, void *key, size_t key_size) |
| 139 | { |
| 140 | union bpf_attr attr; |
| 141 | int i, res; |
| 142 | |
| 143 | memset(&attr, 0, sizeof(attr)); |
| 144 | attr.map_fd = fd; |
| 145 | attr.key = 0; |
| 146 | attr.next_key = ptr_to_u64(key); |
| 147 | |
| 148 | // 4.12 and above kernel supports passing NULL to BPF_MAP_GET_NEXT_KEY |
| 149 | // to get first key of the map. For older kernels, the call will fail. |
| 150 | res = syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); |
| 151 | if (res < 0 && errno == EFAULT) { |
| 152 | // Fall back to try to find a non-existing key. |
| 153 | static unsigned char try_values[3] = {0, 0xff, 0x55}; |
| 154 | attr.key = ptr_to_u64(key); |
| 155 | for (i = 0; i < 3; i++) { |
| 156 | memset(key, try_values[i], key_size); |
| 157 | // We want to check the existence of the key but we don't know the size |
| 158 | // of map's value. So we pass an invalid pointer for value, expect |
| 159 | // the call to fail and check if the error is ENOENT indicating the |
| 160 | // key doesn't exist. If we use NULL for the invalid pointer, it might |
| 161 | // trigger a page fault in kernel and affect performence. Hence we use |
| 162 | // ~0 which will fail and return fast. |
| 163 | // This should fail since we pass an invalid pointer for value. |
Teng Qin | 9190ef5 | 2017-05-20 22:46:00 -0700 | [diff] [blame] | 164 | if (bpf_lookup_elem(fd, key, (void *)~0) >= 0) |
Teng Qin | db7fab5 | 2017-05-16 01:10:15 -0700 | [diff] [blame] | 165 | return -1; |
| 166 | // This means the key doesn't exist. |
| 167 | if (errno == ENOENT) |
| 168 | return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); |
| 169 | } |
| 170 | return -1; |
| 171 | } else { |
| 172 | return res; |
| 173 | } |
| 174 | } |
| 175 | |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 176 | int bpf_get_next_key(int fd, void *key, void *next_key) |
| 177 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 178 | union bpf_attr attr; |
| 179 | memset(&attr, 0, sizeof(attr)); |
| 180 | attr.map_fd = fd; |
| 181 | attr.key = ptr_to_u64(key); |
| 182 | attr.next_key = ptr_to_u64(next_key); |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 183 | |
| 184 | return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); |
| 185 | } |
| 186 | |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 187 | static void bpf_print_hints(char *log) |
Brendan Gregg | 3482637 | 2017-01-13 14:02:02 -0800 | [diff] [blame] | 188 | { |
| 189 | if (log == NULL) |
| 190 | return; |
| 191 | |
| 192 | // The following error strings will need maintenance to match LLVM. |
| 193 | |
| 194 | // stack busting |
| 195 | if (strstr(log, "invalid stack off=-") != NULL) { |
| 196 | fprintf(stderr, "HINT: Looks like you exceeded the BPF stack limit. " |
| 197 | "This can happen if you allocate too much local variable storage. " |
| 198 | "For example, if you allocated a 1 Kbyte struct (maybe for " |
| 199 | "BPF_PERF_OUTPUT), busting a max stack of 512 bytes.\n\n"); |
| 200 | } |
| 201 | |
| 202 | // didn't check NULL on map lookup |
| 203 | if (strstr(log, "invalid mem access 'map_value_or_null'") != NULL) { |
| 204 | fprintf(stderr, "HINT: The 'map_value_or_null' error can happen if " |
| 205 | "you dereference a pointer value from a map lookup without first " |
| 206 | "checking if that pointer is NULL.\n\n"); |
| 207 | } |
| 208 | |
| 209 | // lacking a bpf_probe_read |
| 210 | if (strstr(log, "invalid mem access 'inv'") != NULL) { |
| 211 | fprintf(stderr, "HINT: The invalid mem access 'inv' error can happen " |
| 212 | "if you try to dereference memory without first using " |
| 213 | "bpf_probe_read() to copy it to the BPF stack. Sometimes the " |
| 214 | "bpf_probe_read is automatic by the bcc rewriter, other times " |
| 215 | "you'll need to be explicit.\n\n"); |
| 216 | } |
| 217 | } |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 218 | #define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u)) |
| 219 | |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 220 | int bpf_prog_load(enum bpf_prog_type prog_type, |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 221 | const struct bpf_insn *insns, int prog_len, |
Brenden Blanco | 759029f | 2015-07-29 15:47:51 -0700 | [diff] [blame] | 222 | const char *license, unsigned kern_version, |
| 223 | char *log_buf, unsigned log_buf_size) |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 224 | { |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 225 | union bpf_attr attr; |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 226 | char *bpf_log_buffer = NULL; |
| 227 | unsigned buffer_size = 0; |
| 228 | int ret = 0; |
| 229 | |
Brenden Blanco | fdc027c | 2015-09-03 11:49:54 -0700 | [diff] [blame] | 230 | memset(&attr, 0, sizeof(attr)); |
| 231 | attr.prog_type = prog_type; |
| 232 | attr.insns = ptr_to_u64((void *) insns); |
| 233 | attr.insn_cnt = prog_len / sizeof(struct bpf_insn); |
| 234 | attr.license = ptr_to_u64((void *) license); |
| 235 | attr.log_buf = ptr_to_u64(log_buf); |
| 236 | attr.log_size = log_buf_size; |
| 237 | attr.log_level = log_buf ? 1 : 0; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 238 | |
Brenden Blanco | 7009b55 | 2015-05-26 11:48:17 -0700 | [diff] [blame] | 239 | attr.kern_version = kern_version; |
Brenden Blanco | 81a783a | 2015-08-24 23:42:42 -0700 | [diff] [blame] | 240 | if (log_buf) |
| 241 | log_buf[0] = 0; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 242 | |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 243 | if (attr.insn_cnt > BPF_MAXINSNS) { |
| 244 | ret = -1; |
| 245 | errno = EINVAL; |
| 246 | fprintf(stderr, |
| 247 | "bpf: %s. Program too large (%d insns), at most %d insns\n\n", |
| 248 | strerror(errno), attr.insn_cnt, BPF_MAXINSNS); |
| 249 | return ret; |
| 250 | } |
| 251 | |
| 252 | ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); |
Huapeng Zhou | de11d07 | 2016-12-06 18:10:38 -0800 | [diff] [blame] | 253 | |
Brenden Blanco | 4b4bd27 | 2015-11-30 10:54:47 -0800 | [diff] [blame] | 254 | if (ret < 0 && errno == EPERM) { |
| 255 | // When EPERM is returned, two reasons are possible: |
| 256 | // 1. user has no permissions for bpf() |
| 257 | // 2. user has insufficent rlimit for locked memory |
| 258 | // Unfortunately, there is no api to inspect the current usage of locked |
| 259 | // mem for the user, so an accurate calculation of how much memory to lock |
| 260 | // for this new program is difficult to calculate. As a hack, bump the limit |
| 261 | // to unlimited. If program load fails again, return the error. |
| 262 | |
| 263 | struct rlimit rl = {}; |
| 264 | if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) { |
| 265 | rl.rlim_max = RLIM_INFINITY; |
| 266 | rl.rlim_cur = rl.rlim_max; |
| 267 | if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0) |
| 268 | ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); |
| 269 | } |
| 270 | } |
| 271 | |
Brenden Blanco | 81a783a | 2015-08-24 23:42:42 -0700 | [diff] [blame] | 272 | if (ret < 0 && !log_buf) { |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 273 | |
| 274 | buffer_size = LOG_BUF_SIZE; |
Brenden Blanco | 81a783a | 2015-08-24 23:42:42 -0700 | [diff] [blame] | 275 | // caller did not specify log_buf but failure should be printed, |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 276 | // so repeat the syscall and print the result to stderr |
| 277 | for (;;) { |
| 278 | bpf_log_buffer = malloc(buffer_size); |
| 279 | if (!bpf_log_buffer) { |
| 280 | fprintf(stderr, |
| 281 | "bpf: buffer log memory allocation failed for error %s\n\n", |
| 282 | strerror(errno)); |
| 283 | return ret; |
| 284 | } |
Teng Qin | ba8cb30 | 2016-12-02 16:31:00 -0800 | [diff] [blame] | 285 | bpf_log_buffer[0] = 0; |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 286 | |
| 287 | attr.log_buf = ptr_to_u64(bpf_log_buffer); |
| 288 | attr.log_size = buffer_size; |
| 289 | attr.log_level = bpf_log_buffer ? 1 : 0; |
| 290 | |
| 291 | ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); |
| 292 | if (ret < 0 && errno == ENOSPC) { |
| 293 | free(bpf_log_buffer); |
| 294 | bpf_log_buffer = NULL; |
| 295 | buffer_size <<= 1; |
| 296 | } else { |
| 297 | break; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | fprintf(stderr, "bpf: %s\n%s\n", strerror(errno), bpf_log_buffer); |
Brendan Gregg | 3482637 | 2017-01-13 14:02:02 -0800 | [diff] [blame] | 302 | bpf_print_hints(bpf_log_buffer); |
davidefdl | 2dece10 | 2016-09-12 12:00:37 -0700 | [diff] [blame] | 303 | |
Huapeng Zhou | de11d07 | 2016-12-06 18:10:38 -0800 | [diff] [blame] | 304 | free(bpf_log_buffer); |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 305 | } |
| 306 | return ret; |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 307 | } |
| 308 | |
| 309 | int bpf_open_raw_sock(const char *name) |
| 310 | { |
| 311 | struct sockaddr_ll sll; |
| 312 | int sock; |
| 313 | |
| 314 | sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL)); |
| 315 | if (sock < 0) { |
| 316 | printf("cannot create raw socket\n"); |
| 317 | return -1; |
| 318 | } |
| 319 | |
| 320 | memset(&sll, 0, sizeof(sll)); |
| 321 | sll.sll_family = AF_PACKET; |
| 322 | sll.sll_ifindex = if_nametoindex(name); |
| 323 | sll.sll_protocol = htons(ETH_P_ALL); |
| 324 | if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) { |
| 325 | printf("bind to %s: %s\n", name, strerror(errno)); |
| 326 | close(sock); |
| 327 | return -1; |
| 328 | } |
| 329 | |
| 330 | return sock; |
| 331 | } |
| 332 | |
| 333 | int bpf_attach_socket(int sock, int prog) { |
Brenden Blanco | af95673 | 2015-06-09 13:58:42 -0700 | [diff] [blame] | 334 | return setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog, sizeof(prog)); |
Brenden Blanco | a94bd93 | 2015-04-26 00:56:42 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 337 | static int bpf_attach_tracing_event(int progfd, const char *event_path, |
| 338 | struct perf_reader *reader, int pid, int cpu, int group_fd) { |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 339 | int efd, pfd; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 340 | ssize_t bytes; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 341 | char buf[256]; |
| 342 | struct perf_event_attr attr = {}; |
| 343 | |
| 344 | snprintf(buf, sizeof(buf), "%s/id", event_path); |
| 345 | efd = open(buf, O_RDONLY, 0); |
| 346 | if (efd < 0) { |
| 347 | fprintf(stderr, "open(%s): %s\n", buf, strerror(errno)); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 348 | return -1; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | bytes = read(efd, buf, sizeof(buf)); |
| 352 | if (bytes <= 0 || bytes >= sizeof(buf)) { |
| 353 | fprintf(stderr, "read(%s): %s\n", buf, strerror(errno)); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 354 | close(efd); |
| 355 | return -1; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 356 | } |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 357 | close(efd); |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 358 | buf[bytes] = '\0'; |
| 359 | attr.config = strtol(buf, NULL, 0); |
| 360 | attr.type = PERF_TYPE_TRACEPOINT; |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 361 | attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 362 | attr.sample_period = 1; |
| 363 | attr.wakeup_events = 1; |
| 364 | pfd = syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, PERF_FLAG_FD_CLOEXEC); |
| 365 | if (pfd < 0) { |
Brenden Blanco | 7468195 | 2016-01-28 14:18:46 -0800 | [diff] [blame] | 366 | fprintf(stderr, "perf_event_open(%s/id): %s\n", event_path, strerror(errno)); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 367 | return -1; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 368 | } |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 369 | perf_reader_set_fd(reader, pfd); |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 370 | |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 371 | if (perf_reader_mmap(reader, attr.type, attr.sample_type) < 0) |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 372 | return -1; |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 373 | |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 374 | if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, progfd) < 0) { |
| 375 | perror("ioctl(PERF_EVENT_IOC_SET_BPF)"); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 376 | return -1; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 377 | } |
| 378 | if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
| 379 | perror("ioctl(PERF_EVENT_IOC_ENABLE)"); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 380 | return -1; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 381 | } |
| 382 | |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 383 | return 0; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 384 | } |
| 385 | |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 386 | void * bpf_attach_kprobe(int progfd, enum bpf_probe_attach_type attach_type, const char *ev_name, |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 387 | const char *fn_name, |
| 388 | pid_t pid, int cpu, int group_fd, |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 389 | perf_reader_cb cb, void *cb_cookie) |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 390 | { |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 391 | int kfd; |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 392 | char buf[256]; |
| 393 | char new_name[128]; |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 394 | struct perf_reader *reader = NULL; |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 395 | static char *event_type = "kprobe"; |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 396 | int n; |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 397 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 398 | snprintf(new_name, sizeof(new_name), "%s_bcc_%d", ev_name, getpid()); |
Teng Qin | 4b764de | 2017-04-03 22:10:46 -0700 | [diff] [blame] | 399 | reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt); |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 400 | if (!reader) |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 401 | goto error; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 402 | |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 403 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type); |
| 404 | kfd = open(buf, O_WRONLY | O_APPEND, 0); |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 405 | if (kfd < 0) { |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 406 | fprintf(stderr, "open(%s): %s\n", buf, strerror(errno)); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 407 | goto error; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 408 | } |
| 409 | |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 410 | snprintf(buf, sizeof(buf), "%c:%ss/%s %s", attach_type==BPF_PROBE_ENTRY ? 'p' : 'r', |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 411 | event_type, new_name, fn_name); |
| 412 | if (write(kfd, buf, strlen(buf)) < 0) { |
Brenden Blanco | 7e71aef | 2015-09-09 18:28:21 -0700 | [diff] [blame] | 413 | if (errno == EINVAL) |
| 414 | fprintf(stderr, "check dmesg output for possible cause\n"); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 415 | close(kfd); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 416 | goto error; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 417 | } |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 418 | close(kfd); |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 419 | |
Derek | 35c2501 | 2017-01-22 20:58:23 -0800 | [diff] [blame] | 420 | if (access("/sys/kernel/debug/tracing/instances", F_OK) != -1) { |
Derek | 6c99958 | 2017-02-06 19:34:11 -0800 | [diff] [blame] | 421 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); |
| 422 | if (access(buf, F_OK) == -1) { |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 423 | if (mkdir(buf, 0755) == -1) |
Derek | 6c99958 | 2017-02-06 19:34:11 -0800 | [diff] [blame] | 424 | goto retry; |
| 425 | } |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 426 | n = snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d/events/%ss/%s", |
Derek | 6c99958 | 2017-02-06 19:34:11 -0800 | [diff] [blame] | 427 | getpid(), event_type, new_name); |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 428 | if (n < sizeof(buf) && bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) == 0) |
Derek | 35c2501 | 2017-01-22 20:58:23 -0800 | [diff] [blame] | 429 | goto out; |
Derek | 6c99958 | 2017-02-06 19:34:11 -0800 | [diff] [blame] | 430 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); |
Derek | 35c2501 | 2017-01-22 20:58:23 -0800 | [diff] [blame] | 431 | rmdir(buf); |
| 432 | } |
| 433 | retry: |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 434 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s", event_type, new_name); |
| 435 | if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0) |
| 436 | goto error; |
| 437 | out: |
| 438 | return reader; |
| 439 | |
| 440 | error: |
| 441 | perf_reader_free(reader); |
| 442 | return NULL; |
| 443 | |
| 444 | } |
| 445 | |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 446 | static int enter_mount_ns(int pid) { |
| 447 | struct stat self_stat, target_stat; |
| 448 | int self_fd = -1, target_fd = -1; |
| 449 | char buf[64]; |
| 450 | |
| 451 | if (pid < 0) |
| 452 | return -1; |
| 453 | |
| 454 | if ((size_t)snprintf(buf, sizeof(buf), "/proc/%d/ns/mnt", pid) >= sizeof(buf)) |
| 455 | return -1; |
| 456 | |
| 457 | self_fd = open("/proc/self/ns/mnt", O_RDONLY); |
| 458 | if (self_fd < 0) { |
| 459 | perror("open(/proc/self/ns/mnt)"); |
| 460 | return -1; |
| 461 | } |
| 462 | |
| 463 | target_fd = open(buf, O_RDONLY); |
| 464 | if (target_fd < 0) { |
| 465 | perror("open(/proc/<pid>/ns/mnt)"); |
| 466 | goto error; |
| 467 | } |
| 468 | |
| 469 | if (fstat(self_fd, &self_stat)) { |
| 470 | perror("fstat(self_fd)"); |
| 471 | goto error; |
| 472 | } |
| 473 | |
| 474 | if (fstat(target_fd, &target_stat)) { |
| 475 | perror("fstat(target_fd)"); |
| 476 | goto error; |
| 477 | } |
| 478 | |
| 479 | // both target and current ns are same, avoid setns and close all fds |
| 480 | if (self_stat.st_ino == target_stat.st_ino) |
| 481 | goto error; |
| 482 | |
| 483 | if (setns(target_fd, CLONE_NEWNS)) { |
| 484 | perror("setns(target)"); |
| 485 | goto error; |
| 486 | } |
| 487 | |
| 488 | close(target_fd); |
| 489 | return self_fd; |
| 490 | |
| 491 | error: |
| 492 | if (self_fd >= 0) |
| 493 | close(self_fd); |
| 494 | if (target_fd >= 0) |
| 495 | close(target_fd); |
| 496 | return -1; |
| 497 | } |
| 498 | |
| 499 | static void exit_mount_ns(int fd) { |
| 500 | if (fd < 0) |
| 501 | return; |
| 502 | |
| 503 | if (setns(fd, CLONE_NEWNS)) |
| 504 | perror("setns"); |
| 505 | } |
| 506 | |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 507 | void * bpf_attach_uprobe(int progfd, enum bpf_probe_attach_type attach_type, const char *ev_name, |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 508 | const char *binary_path, uint64_t offset, |
| 509 | pid_t pid, int cpu, int group_fd, |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 510 | perf_reader_cb cb, void *cb_cookie) |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 511 | { |
| 512 | int kfd; |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 513 | char buf[PATH_MAX]; |
| 514 | char new_name[128]; |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 515 | struct perf_reader *reader = NULL; |
| 516 | static char *event_type = "uprobe"; |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 517 | int ns_fd = -1; |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 518 | int n; |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 519 | |
| 520 | snprintf(new_name, sizeof(new_name), "%s_bcc_%d", ev_name, getpid()); |
Teng Qin | 4b764de | 2017-04-03 22:10:46 -0700 | [diff] [blame] | 521 | reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt); |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 522 | if (!reader) |
| 523 | goto error; |
| 524 | |
| 525 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type); |
| 526 | kfd = open(buf, O_WRONLY | O_APPEND, 0); |
| 527 | if (kfd < 0) { |
| 528 | fprintf(stderr, "open(%s): %s\n", buf, strerror(errno)); |
| 529 | goto error; |
| 530 | } |
| 531 | |
Mauricio Vasquez B | d1324ac | 2017-05-17 20:26:47 -0500 | [diff] [blame] | 532 | n = snprintf(buf, sizeof(buf), "%c:%ss/%s %s:0x%lx", attach_type==BPF_PROBE_ENTRY ? 'p' : 'r', |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 533 | event_type, new_name, binary_path, offset); |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 534 | if (n >= sizeof(buf)) { |
| 535 | close(kfd); |
| 536 | goto error; |
| 537 | } |
kmjohansen | 4b87af0 | 2017-03-30 00:58:31 -0700 | [diff] [blame] | 538 | |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 539 | ns_fd = enter_mount_ns(pid); |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 540 | if (write(kfd, buf, strlen(buf)) < 0) { |
| 541 | if (errno == EINVAL) |
| 542 | fprintf(stderr, "check dmesg output for possible cause\n"); |
| 543 | close(kfd); |
| 544 | goto error; |
| 545 | } |
| 546 | close(kfd); |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 547 | exit_mount_ns(ns_fd); |
| 548 | ns_fd = -1; |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 549 | |
| 550 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s", event_type, new_name); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 551 | if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0) |
| 552 | goto error; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 553 | |
Brenden Blanco | 8207d10 | 2015-09-25 13:58:30 -0700 | [diff] [blame] | 554 | return reader; |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 555 | |
| 556 | error: |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 557 | exit_mount_ns(ns_fd); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 558 | perf_reader_free(reader); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 559 | return NULL; |
Brenden Blanco | cd5cb41 | 2015-04-26 09:41:58 -0700 | [diff] [blame] | 560 | } |
| 561 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 562 | static int bpf_detach_probe(const char *ev_name, const char *event_type) |
| 563 | { |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 564 | int kfd; |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 565 | char buf[256]; |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 566 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events", event_type); |
| 567 | kfd = open(buf, O_WRONLY | O_APPEND, 0); |
Brenden Blanco | 839dd27 | 2015-06-11 12:35:55 -0700 | [diff] [blame] | 568 | if (kfd < 0) { |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 569 | fprintf(stderr, "open(%s): %s\n", buf, strerror(errno)); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 570 | return -1; |
Brenden Blanco | 839dd27 | 2015-06-11 12:35:55 -0700 | [diff] [blame] | 571 | } |
| 572 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 573 | snprintf(buf, sizeof(buf), "-:%ss/%s_bcc_%d", event_type, ev_name, getpid()); |
| 574 | if (write(kfd, buf, strlen(buf)) < 0) { |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 575 | fprintf(stderr, "write(%s): %s\n", buf, strerror(errno)); |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 576 | close(kfd); |
| 577 | return -1; |
Brenden Blanco | 839dd27 | 2015-06-11 12:35:55 -0700 | [diff] [blame] | 578 | } |
Martin KaFai Lau | a8e6696 | 2016-05-19 23:04:41 -0700 | [diff] [blame] | 579 | close(kfd); |
Brenden Blanco | 839dd27 | 2015-06-11 12:35:55 -0700 | [diff] [blame] | 580 | |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 581 | return 0; |
Brenden Blanco | 839dd27 | 2015-06-11 12:35:55 -0700 | [diff] [blame] | 582 | } |
| 583 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 584 | int bpf_detach_kprobe(const char *ev_name) |
| 585 | { |
Derek | 7174d93 | 2017-01-30 21:03:02 -0800 | [diff] [blame] | 586 | char buf[256]; |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 587 | int ret = bpf_detach_probe(ev_name, "kprobe"); |
Derek | 6c99958 | 2017-02-06 19:34:11 -0800 | [diff] [blame] | 588 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 589 | if (access(buf, F_OK) != -1) { |
| 590 | rmdir(buf); |
| 591 | } |
| 592 | |
| 593 | return ret; |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 594 | } |
| 595 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 596 | int bpf_detach_uprobe(const char *ev_name) |
| 597 | { |
| 598 | return bpf_detach_probe(ev_name, "uprobe"); |
Brenden Blanco | 68e2d14 | 2016-01-28 10:24:56 -0800 | [diff] [blame] | 599 | } |
| 600 | |
Derek | e4da6c2 | 2017-01-28 16:11:28 -0800 | [diff] [blame] | 601 | |
Sasha Goldshtein | 1198c3c | 2016-06-30 06:26:28 -0700 | [diff] [blame] | 602 | void * bpf_attach_tracepoint(int progfd, const char *tp_category, |
| 603 | const char *tp_name, int pid, int cpu, |
| 604 | int group_fd, perf_reader_cb cb, void *cb_cookie) { |
| 605 | char buf[256]; |
| 606 | struct perf_reader *reader = NULL; |
| 607 | |
Teng Qin | 4b764de | 2017-04-03 22:10:46 -0700 | [diff] [blame] | 608 | reader = perf_reader_new(cb, NULL, NULL, cb_cookie, probe_perf_reader_page_cnt); |
Sasha Goldshtein | 1198c3c | 2016-06-30 06:26:28 -0700 | [diff] [blame] | 609 | if (!reader) |
| 610 | goto error; |
| 611 | |
| 612 | snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%s/%s", |
| 613 | tp_category, tp_name); |
| 614 | if (bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) < 0) |
| 615 | goto error; |
| 616 | |
| 617 | return reader; |
| 618 | |
| 619 | error: |
| 620 | perf_reader_free(reader); |
| 621 | return NULL; |
| 622 | } |
| 623 | |
| 624 | int bpf_detach_tracepoint(const char *tp_category, const char *tp_name) { |
| 625 | // Right now, there is nothing to do, but it's a good idea to encourage |
| 626 | // callers to detach anything they attach. |
| 627 | return 0; |
| 628 | } |
| 629 | |
Teng Qin | 4b764de | 2017-04-03 22:10:46 -0700 | [diff] [blame] | 630 | void * bpf_open_perf_buffer(perf_reader_raw_cb raw_cb, |
| 631 | perf_reader_lost_cb lost_cb, void *cb_cookie, |
| 632 | int pid, int cpu, int page_cnt) { |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 633 | int pfd; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 634 | struct perf_event_attr attr = {}; |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 635 | struct perf_reader *reader = NULL; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 636 | |
Teng Qin | 4b764de | 2017-04-03 22:10:46 -0700 | [diff] [blame] | 637 | reader = perf_reader_new(NULL, raw_cb, lost_cb, cb_cookie, page_cnt); |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 638 | if (!reader) |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 639 | goto error; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 640 | |
Brenden Blanco | 0dd2441 | 2016-02-17 00:26:14 -0800 | [diff] [blame] | 641 | attr.config = 10;//PERF_COUNT_SW_BPF_OUTPUT; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 642 | attr.type = PERF_TYPE_SOFTWARE; |
| 643 | attr.sample_type = PERF_SAMPLE_RAW; |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 644 | attr.sample_period = 1; |
| 645 | attr.wakeup_events = 1; |
| 646 | pfd = syscall(__NR_perf_event_open, &attr, pid, cpu, -1, PERF_FLAG_FD_CLOEXEC); |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 647 | if (pfd < 0) { |
Brenden Blanco | 0dd2441 | 2016-02-17 00:26:14 -0800 | [diff] [blame] | 648 | fprintf(stderr, "perf_event_open: %s\n", strerror(errno)); |
| 649 | fprintf(stderr, " (check your kernel for PERF_COUNT_SW_BPF_OUTPUT support, 4.4 or newer)\n"); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 650 | goto error; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 651 | } |
| 652 | perf_reader_set_fd(reader, pfd); |
| 653 | |
| 654 | if (perf_reader_mmap(reader, attr.type, attr.sample_type) < 0) |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 655 | goto error; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 656 | |
| 657 | if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
| 658 | perror("ioctl(PERF_EVENT_IOC_ENABLE)"); |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 659 | goto error; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | return reader; |
Brenden Blanco | 7598249 | 2015-11-06 10:43:05 -0800 | [diff] [blame] | 663 | |
| 664 | error: |
| 665 | if (reader) |
| 666 | perf_reader_free(reader); |
| 667 | |
| 668 | return NULL; |
Brenden Blanco | d0daf6a | 2015-11-05 23:31:22 -0800 | [diff] [blame] | 669 | } |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 670 | |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 671 | static int invalid_perf_config(uint32_t type, uint64_t config) { |
Teng Qin | 9875221 | 2017-05-19 19:05:24 -0700 | [diff] [blame] | 672 | switch (type) { |
| 673 | case PERF_TYPE_HARDWARE: |
| 674 | return config >= PERF_COUNT_HW_MAX; |
| 675 | case PERF_TYPE_SOFTWARE: |
| 676 | return config >= PERF_COUNT_SW_MAX; |
| 677 | case PERF_TYPE_RAW: |
| 678 | return 0; |
| 679 | default: |
| 680 | return 1; |
| 681 | } |
| 682 | } |
| 683 | |
Brenden Blanco | 3069caa | 2016-08-01 18:12:11 -0700 | [diff] [blame] | 684 | int bpf_open_perf_event(uint32_t type, uint64_t config, int pid, int cpu) { |
| 685 | int fd; |
| 686 | struct perf_event_attr attr = {}; |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 687 | |
Teng Qin | 9875221 | 2017-05-19 19:05:24 -0700 | [diff] [blame] | 688 | if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_RAW) { |
| 689 | fprintf(stderr, "Unsupported perf event type\n"); |
| 690 | return -1; |
| 691 | } |
| 692 | if (invalid_perf_config(type, config)) { |
| 693 | fprintf(stderr, "Invalid perf event config\n"); |
| 694 | return -1; |
| 695 | } |
| 696 | |
Brenden Blanco | 3069caa | 2016-08-01 18:12:11 -0700 | [diff] [blame] | 697 | attr.sample_period = LONG_MAX; |
| 698 | attr.type = type; |
| 699 | attr.config = config; |
| 700 | |
| 701 | fd = syscall(__NR_perf_event_open, &attr, pid, cpu, -1, PERF_FLAG_FD_CLOEXEC); |
| 702 | if (fd < 0) { |
| 703 | fprintf(stderr, "perf_event_open: %s\n", strerror(errno)); |
| 704 | return -1; |
| 705 | } |
| 706 | |
| 707 | if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { |
| 708 | perror("ioctl(PERF_EVENT_IOC_ENABLE)"); |
| 709 | close(fd); |
| 710 | return -1; |
| 711 | } |
| 712 | |
| 713 | return fd; |
| 714 | } |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 715 | |
Andy Gospodarek | 9f3cab7 | 2017-05-17 16:18:45 -0400 | [diff] [blame] | 716 | int bpf_attach_xdp(const char *dev_name, int progfd, uint32_t flags) { |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 717 | struct sockaddr_nl sa; |
| 718 | int sock, seq = 0, len, ret = -1; |
| 719 | char buf[4096]; |
| 720 | struct nlattr *nla, *nla_xdp; |
| 721 | struct { |
| 722 | struct nlmsghdr nh; |
| 723 | struct ifinfomsg ifinfo; |
| 724 | char attrbuf[64]; |
| 725 | } req; |
| 726 | struct nlmsghdr *nh; |
| 727 | struct nlmsgerr *err; |
Toshiaki Makita | bb9b92a | 2017-07-31 20:20:55 +0900 | [diff] [blame] | 728 | socklen_t addrlen; |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 729 | |
| 730 | memset(&sa, 0, sizeof(sa)); |
| 731 | sa.nl_family = AF_NETLINK; |
| 732 | |
| 733 | sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); |
| 734 | if (sock < 0) { |
| 735 | fprintf(stderr, "bpf: opening a netlink socket: %s\n", strerror(errno)); |
| 736 | return -1; |
| 737 | } |
| 738 | |
| 739 | if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { |
| 740 | fprintf(stderr, "bpf: bind to netlink: %s\n", strerror(errno)); |
| 741 | goto cleanup; |
| 742 | } |
| 743 | |
Toshiaki Makita | bb9b92a | 2017-07-31 20:20:55 +0900 | [diff] [blame] | 744 | addrlen = sizeof(sa); |
| 745 | if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { |
| 746 | fprintf(stderr, "bpf: get sock name of netlink: %s\n", strerror(errno)); |
| 747 | goto cleanup; |
| 748 | } |
| 749 | |
| 750 | if (addrlen != sizeof(sa)) { |
| 751 | fprintf(stderr, "bpf: wrong netlink address length: %d\n", addrlen); |
| 752 | goto cleanup; |
| 753 | } |
| 754 | |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 755 | memset(&req, 0, sizeof(req)); |
| 756 | req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); |
| 757 | req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; |
| 758 | req.nh.nlmsg_type = RTM_SETLINK; |
| 759 | req.nh.nlmsg_pid = 0; |
| 760 | req.nh.nlmsg_seq = ++seq; |
| 761 | req.ifinfo.ifi_family = AF_UNSPEC; |
| 762 | req.ifinfo.ifi_index = if_nametoindex(dev_name); |
| 763 | if (req.ifinfo.ifi_index == 0) { |
| 764 | fprintf(stderr, "bpf: Resolving device name to index: %s\n", strerror(errno)); |
| 765 | goto cleanup; |
| 766 | } |
| 767 | |
| 768 | nla = (struct nlattr *)(((char *)&req) |
| 769 | + NLMSG_ALIGN(req.nh.nlmsg_len)); |
| 770 | nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/; |
| 771 | |
| 772 | nla_xdp = (struct nlattr *)((char *)nla + NLA_HDRLEN); |
Andy Gospodarek | 9f3cab7 | 2017-05-17 16:18:45 -0400 | [diff] [blame] | 773 | nla->nla_len = NLA_HDRLEN; |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 774 | |
| 775 | // we specify the FD passed over by the user |
| 776 | nla_xdp->nla_type = 1/*IFLA_XDP_FD*/; |
Arthur Gautier | fbd91e2 | 2017-04-28 21:39:58 +0000 | [diff] [blame] | 777 | nla_xdp->nla_len = NLA_HDRLEN + sizeof(progfd); |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 778 | memcpy((char *)nla_xdp + NLA_HDRLEN, &progfd, sizeof(progfd)); |
Andy Gospodarek | 9f3cab7 | 2017-05-17 16:18:45 -0400 | [diff] [blame] | 779 | nla->nla_len += nla_xdp->nla_len; |
| 780 | |
| 781 | // parse flags as passed by the user |
| 782 | if (flags) { |
| 783 | nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); |
Gary Lin | db8353b | 2017-08-18 18:10:43 +0800 | [diff] [blame^] | 784 | nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/; |
Andy Gospodarek | 9f3cab7 | 2017-05-17 16:18:45 -0400 | [diff] [blame] | 785 | nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); |
| 786 | memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); |
| 787 | nla->nla_len += nla_xdp->nla_len; |
| 788 | } |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 789 | |
| 790 | req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); |
| 791 | |
| 792 | if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { |
| 793 | fprintf(stderr, "bpf: send to netlink: %s\n", strerror(errno)); |
| 794 | goto cleanup; |
| 795 | } |
| 796 | |
| 797 | len = recv(sock, buf, sizeof(buf), 0); |
| 798 | if (len < 0) { |
| 799 | fprintf(stderr, "bpf: recv from netlink: %s\n", strerror(errno)); |
| 800 | goto cleanup; |
| 801 | } |
| 802 | |
| 803 | for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); |
| 804 | nh = NLMSG_NEXT(nh, len)) { |
Toshiaki Makita | bb9b92a | 2017-07-31 20:20:55 +0900 | [diff] [blame] | 805 | if (nh->nlmsg_pid != sa.nl_pid) { |
Toshiaki Makita | 890c76a | 2017-07-31 20:20:55 +0900 | [diff] [blame] | 806 | fprintf(stderr, "bpf: Wrong pid %u, expected %u\n", |
Toshiaki Makita | bb9b92a | 2017-07-31 20:20:55 +0900 | [diff] [blame] | 807 | nh->nlmsg_pid, sa.nl_pid); |
Jan Rüth | e0724d7 | 2016-07-28 22:32:46 +0200 | [diff] [blame] | 808 | errno = EBADMSG; |
| 809 | goto cleanup; |
| 810 | } |
| 811 | if (nh->nlmsg_seq != seq) { |
| 812 | fprintf(stderr, "bpf: Wrong seq %d, expected %d\n", |
| 813 | nh->nlmsg_seq, seq); |
| 814 | errno = EBADMSG; |
| 815 | goto cleanup; |
| 816 | } |
| 817 | switch (nh->nlmsg_type) { |
| 818 | case NLMSG_ERROR: |
| 819 | err = (struct nlmsgerr *)NLMSG_DATA(nh); |
| 820 | if (!err->error) |
| 821 | continue; |
| 822 | fprintf(stderr, "bpf: nlmsg error %s\n", strerror(-err->error)); |
| 823 | errno = -err->error; |
| 824 | goto cleanup; |
| 825 | case NLMSG_DONE: |
| 826 | break; |
| 827 | } |
| 828 | } |
| 829 | |
| 830 | ret = 0; |
| 831 | |
| 832 | cleanup: |
| 833 | close(sock); |
| 834 | return ret; |
| 835 | } |
Teng Qin | 206b020 | 2016-10-18 16:06:57 -0700 | [diff] [blame] | 836 | |
| 837 | int bpf_attach_perf_event(int progfd, uint32_t ev_type, uint32_t ev_config, |
| 838 | uint64_t sample_period, uint64_t sample_freq, |
| 839 | pid_t pid, int cpu, int group_fd) { |
| 840 | if (ev_type != PERF_TYPE_HARDWARE && ev_type != PERF_TYPE_SOFTWARE) { |
| 841 | fprintf(stderr, "Unsupported perf event type\n"); |
| 842 | return -1; |
| 843 | } |
Teng Qin | 9875221 | 2017-05-19 19:05:24 -0700 | [diff] [blame] | 844 | if (invalid_perf_config(ev_type, ev_config)) { |
Teng Qin | 206b020 | 2016-10-18 16:06:57 -0700 | [diff] [blame] | 845 | fprintf(stderr, "Invalid perf event config\n"); |
| 846 | return -1; |
| 847 | } |
| 848 | if (!((sample_period > 0) ^ (sample_freq > 0))) { |
| 849 | fprintf( |
| 850 | stderr, "Exactly one of sample_period / sample_freq should be set\n" |
| 851 | ); |
| 852 | return -1; |
| 853 | } |
| 854 | |
| 855 | struct perf_event_attr attr = {}; |
| 856 | attr.type = ev_type; |
| 857 | attr.config = ev_config; |
| 858 | attr.inherit = 1; |
| 859 | if (sample_freq > 0) { |
| 860 | attr.freq = 1; |
| 861 | attr.sample_freq = sample_freq; |
| 862 | } else { |
| 863 | attr.sample_period = sample_period; |
| 864 | } |
| 865 | |
| 866 | int fd = syscall( |
| 867 | __NR_perf_event_open, &attr, pid, cpu, group_fd, PERF_FLAG_FD_CLOEXEC |
| 868 | ); |
| 869 | if (fd < 0) { |
| 870 | perror("perf_event_open failed"); |
| 871 | return -1; |
| 872 | } |
| 873 | if (ioctl(fd, PERF_EVENT_IOC_SET_BPF, progfd) != 0) { |
| 874 | perror("ioctl(PERF_EVENT_IOC_SET_BPF) failed"); |
| 875 | close(fd); |
| 876 | return -1; |
| 877 | } |
| 878 | if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0) != 0) { |
| 879 | perror("ioctl(PERF_EVENT_IOC_ENABLE) failed"); |
| 880 | close(fd); |
| 881 | return -1; |
| 882 | } |
| 883 | |
| 884 | return fd; |
| 885 | } |
| 886 | |
Teng Qin | d682733 | 2017-05-23 16:35:11 -0700 | [diff] [blame] | 887 | int bpf_close_perf_event_fd(int fd) { |
| 888 | int res, error = 0; |
| 889 | if (fd >= 0) { |
| 890 | res = ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); |
| 891 | if (res != 0) { |
| 892 | perror("ioctl(PERF_EVENT_IOC_DISABLE) failed"); |
| 893 | error = res; |
| 894 | } |
| 895 | res = close(fd); |
| 896 | if (res != 0) { |
| 897 | perror("close perf event FD failed"); |
| 898 | error = (res && !error) ? res : error; |
| 899 | } |
| 900 | } |
| 901 | return error; |
Teng Qin | 206b020 | 2016-10-18 16:06:57 -0700 | [diff] [blame] | 902 | } |
Huapeng Zhou | 37dcac0 | 2016-12-20 13:42:01 -0800 | [diff] [blame] | 903 | |
| 904 | int bpf_obj_pin(int fd, const char *pathname) |
| 905 | { |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 906 | union bpf_attr attr; |
| 907 | |
| 908 | memset(&attr, 0, sizeof(attr)); |
| 909 | attr.pathname = ptr_to_u64((void *)pathname); |
| 910 | attr.bpf_fd = fd; |
Huapeng Zhou | 37dcac0 | 2016-12-20 13:42:01 -0800 | [diff] [blame] | 911 | |
| 912 | return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr)); |
| 913 | } |
| 914 | |
| 915 | int bpf_obj_get(const char *pathname) |
| 916 | { |
Brenden Blanco | fa07345 | 2017-05-30 17:35:53 -0700 | [diff] [blame] | 917 | union bpf_attr attr; |
| 918 | |
| 919 | memset(&attr, 0, sizeof(attr)); |
| 920 | attr.pathname = ptr_to_u64((void *)pathname); |
Huapeng Zhou | 37dcac0 | 2016-12-20 13:42:01 -0800 | [diff] [blame] | 921 | |
| 922 | return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr)); |
| 923 | } |