blob: df54c4c9e48a2c698bc3c964a0877a9f54f64ebe [file] [log] [blame]
Eric Leblond6061a3d2018-01-30 21:55:03 +01001// SPDX-License-Identifier: LGPL-2.1
2
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan203d1ca2016-07-04 11:02:42 +000010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation;
14 * version 2.1 of the License (not later!)
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, see <http://www.gnu.org/licenses>
Wang Nan1b76c132015-07-01 02:13:51 +000023 */
24
25#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000026#include <stdio.h>
27#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080028#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000029#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000030#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000031#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000032#include <fcntl.h>
33#include <errno.h>
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -070034#include <perf-sys.h>
Wang Nan1b76c132015-07-01 02:13:51 +000035#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080036#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000037#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000038#include <linux/bpf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000039#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080040#include <linux/limits.h>
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000044#include <libelf.h>
45#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000046
47#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000048#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070049#include "btf.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000050
Wang Nan9b161372016-07-18 06:01:08 +000051#ifndef EM_BPF
52#define EM_BPF 247
53#endif
54
Joe Stringerf3675402017-01-26 13:19:56 -080055#ifndef BPF_FS_MAGIC
56#define BPF_FS_MAGIC 0xcafe4a11
57#endif
58
Wang Nanb3f59d62015-07-01 02:13:52 +000059#define __printf(a, b) __attribute__((format(printf, a, b)))
60
61__printf(1, 2)
62static int __base_pr(const char *format, ...)
63{
64 va_list args;
65 int err;
66
67 va_start(args, format);
68 err = vfprintf(stderr, format, args);
69 va_end(args);
70 return err;
71}
72
73static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
74static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
75static __printf(1, 2) libbpf_print_fn_t __pr_debug;
76
77#define __pr(func, fmt, ...) \
78do { \
79 if ((func)) \
80 (func)("libbpf: " fmt, ##__VA_ARGS__); \
81} while (0)
82
83#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
84#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
85#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
86
87void libbpf_set_print(libbpf_print_fn_t warn,
88 libbpf_print_fn_t info,
89 libbpf_print_fn_t debug)
90{
91 __pr_warning = warn;
92 __pr_info = info;
93 __pr_debug = debug;
94}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000095
Wang Nan6371ca3b2015-11-06 13:49:37 +000096#define STRERR_BUFSIZE 128
97
98#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
99#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
100#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
101
102static const char *libbpf_strerror_table[NR_ERRNO] = {
103 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
104 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
105 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
Colin Ian Kingde8a63b2016-06-28 13:23:37 +0100106 [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
Wang Nan6371ca3b2015-11-06 13:49:37 +0000107 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
108 [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
109 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
110 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
111 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
Wang Nan705fa212016-07-13 10:44:02 +0000112 [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
Eric Leblond949abbe2018-01-30 21:55:01 +0100113 [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
114 [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
Wang Nan6371ca3b2015-11-06 13:49:37 +0000115};
116
117int libbpf_strerror(int err, char *buf, size_t size)
118{
119 if (!buf || !size)
120 return -1;
121
122 err = err > 0 ? err : -err;
123
124 if (err < __LIBBPF_ERRNO__START) {
125 int ret;
126
127 ret = strerror_r(err, buf, size);
128 buf[size - 1] = '\0';
129 return ret;
130 }
131
132 if (err < __LIBBPF_ERRNO__END) {
133 const char *msg;
134
135 msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
136 snprintf(buf, size, "%s", msg);
137 buf[size - 1] = '\0';
138 return 0;
139 }
140
141 snprintf(buf, size, "Unknown libbpf error %d", err);
142 buf[size - 1] = '\0';
143 return -1;
144}
145
146#define CHECK_ERR(action, err, out) do { \
147 err = action; \
148 if (err) \
149 goto out; \
150} while(0)
151
152
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000153/* Copied from tools/perf/util/util.h */
154#ifndef zfree
155# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
156#endif
157
158#ifndef zclose
159# define zclose(fd) ({ \
160 int ___err = 0; \
161 if ((fd) >= 0) \
162 ___err = close((fd)); \
163 fd = -1; \
164 ___err; })
165#endif
166
167#ifdef HAVE_LIBELF_MMAP_SUPPORT
168# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
169#else
170# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
171#endif
172
Wang Nana5b8bd42015-07-01 02:14:00 +0000173/*
174 * bpf_prog should be a better name but it has been used in
175 * linux/filter.h.
176 */
177struct bpf_program {
178 /* Index in elf obj file, for relocation use. */
179 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700180 char *name;
Wang Nana5b8bd42015-07-01 02:14:00 +0000181 char *section_name;
182 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800183 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000184 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000185
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800186 struct reloc_desc {
187 enum {
188 RELO_LD64,
189 RELO_CALL,
190 } type;
Wang Nan34090912015-07-01 02:14:02 +0000191 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800192 union {
193 int map_idx;
194 int text_off;
195 };
Wang Nan34090912015-07-01 02:14:02 +0000196 } *reloc_desc;
197 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000198
Wang Nanb5805632015-11-16 12:10:09 +0000199 struct {
200 int nr;
201 int *fds;
202 } instances;
203 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000204
205 struct bpf_object *obj;
206 void *priv;
207 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700208
209 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000210};
211
Wang Nan9d759a92015-11-27 08:47:35 +0000212struct bpf_map {
213 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000214 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000215 size_t offset;
Wang Nan9d759a92015-11-27 08:47:35 +0000216 struct bpf_map_def def;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700217 uint32_t btf_key_id;
218 uint32_t btf_value_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000219 void *priv;
220 bpf_map_clear_priv_t clear_priv;
221};
222
Wang Nan9a208ef2015-07-01 02:14:10 +0000223static LIST_HEAD(bpf_objects_list);
224
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000225struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000226 char license[64];
227 u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000228
Wang Nana5b8bd42015-07-01 02:14:00 +0000229 struct bpf_program *programs;
230 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000231 struct bpf_map *maps;
232 size_t nr_maps;
233
Wang Nan52d33522015-07-01 02:14:04 +0000234 bool loaded;
Wang Nana5b8bd42015-07-01 02:14:00 +0000235
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000236 /*
237 * Information when doing elf related work. Only valid if fd
238 * is valid.
239 */
240 struct {
241 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000242 void *obj_buf;
243 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000244 Elf *elf;
245 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000246 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000247 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000248 struct {
249 GElf_Shdr shdr;
250 Elf_Data *data;
251 } *reloc;
252 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000253 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800254 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000255 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000256 /*
257 * All loaded bpf_object is linked in a list, which is
258 * hidden to caller. bpf_objects__<func> handlers deal with
259 * all objects.
260 */
261 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000262
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700263 struct btf *btf;
264
Wang Nan10931d22016-11-26 07:03:26 +0000265 void *priv;
266 bpf_object_clear_priv_t clear_priv;
267
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000268 char path[];
269};
270#define obj_elf_valid(o) ((o)->efile.elf)
271
Wang Nan55cffde2015-07-01 02:14:07 +0000272static void bpf_program__unload(struct bpf_program *prog)
273{
Wang Nanb5805632015-11-16 12:10:09 +0000274 int i;
275
Wang Nan55cffde2015-07-01 02:14:07 +0000276 if (!prog)
277 return;
278
Wang Nanb5805632015-11-16 12:10:09 +0000279 /*
280 * If the object is opened but the program was never loaded,
281 * it is possible that prog->instances.nr == -1.
282 */
283 if (prog->instances.nr > 0) {
284 for (i = 0; i < prog->instances.nr; i++)
285 zclose(prog->instances.fds[i]);
286 } else if (prog->instances.nr != -1) {
287 pr_warning("Internal error: instances.nr is %d\n",
288 prog->instances.nr);
289 }
290
291 prog->instances.nr = -1;
292 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000293}
294
Wang Nana5b8bd42015-07-01 02:14:00 +0000295static void bpf_program__exit(struct bpf_program *prog)
296{
297 if (!prog)
298 return;
299
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000300 if (prog->clear_priv)
301 prog->clear_priv(prog, prog->priv);
302
303 prog->priv = NULL;
304 prog->clear_priv = NULL;
305
Wang Nan55cffde2015-07-01 02:14:07 +0000306 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700307 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000308 zfree(&prog->section_name);
309 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000310 zfree(&prog->reloc_desc);
311
312 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000313 prog->insns_cnt = 0;
314 prog->idx = -1;
315}
316
317static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700318bpf_program__init(void *data, size_t size, char *section_name, int idx,
319 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000320{
321 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700322 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000323 return -EINVAL;
324 }
325
326 bzero(prog, sizeof(*prog));
327
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700328 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000329 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100330 pr_warning("failed to alloc name for prog under section(%d) %s\n",
331 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000332 goto errout;
333 }
334
335 prog->insns = malloc(size);
336 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700337 pr_warning("failed to alloc insns for prog under section %s\n",
338 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000339 goto errout;
340 }
341 prog->insns_cnt = size / sizeof(struct bpf_insn);
342 memcpy(prog->insns, data,
343 prog->insns_cnt * sizeof(struct bpf_insn));
344 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000345 prog->instances.fds = NULL;
346 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000347 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000348
349 return 0;
350errout:
351 bpf_program__exit(prog);
352 return -ENOMEM;
353}
354
355static int
356bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700357 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000358{
359 struct bpf_program prog, *progs;
360 int nr_progs, err;
361
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700362 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000363 if (err)
364 return err;
365
366 progs = obj->programs;
367 nr_progs = obj->nr_programs;
368
369 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
370 if (!progs) {
371 /*
372 * In this case the original obj->programs
373 * is still valid, so don't need special treat for
374 * bpf_close_object().
375 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700376 pr_warning("failed to alloc a new program under section '%s'\n",
377 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000378 bpf_program__exit(&prog);
379 return -ENOMEM;
380 }
381
382 pr_debug("found program %s\n", prog.section_name);
383 obj->programs = progs;
384 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000385 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000386 progs[nr_progs] = prog;
387 return 0;
388}
389
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700390static int
391bpf_object__init_prog_names(struct bpf_object *obj)
392{
393 Elf_Data *symbols = obj->efile.symbols;
394 struct bpf_program *prog;
395 size_t pi, si;
396
397 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800398 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700399
400 prog = &obj->programs[pi];
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800401 if (prog->idx == obj->efile.text_shndx) {
402 name = ".text";
403 goto skip_search;
404 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700405
406 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
407 si++) {
408 GElf_Sym sym;
409
410 if (!gelf_getsym(symbols, si, &sym))
411 continue;
412 if (sym.st_shndx != prog->idx)
413 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000414 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
415 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700416
417 name = elf_strptr(obj->efile.elf,
418 obj->efile.strtabidx,
419 sym.st_name);
420 if (!name) {
421 pr_warning("failed to get sym name string for prog %s\n",
422 prog->section_name);
423 return -LIBBPF_ERRNO__LIBELF;
424 }
425 }
426
427 if (!name) {
428 pr_warning("failed to find sym for prog %s\n",
429 prog->section_name);
430 return -EINVAL;
431 }
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800432skip_search:
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700433 prog->name = strdup(name);
434 if (!prog->name) {
435 pr_warning("failed to allocate memory for prog sym %s\n",
436 name);
437 return -ENOMEM;
438 }
439 }
440
441 return 0;
442}
443
Wang Nan6c956392015-07-01 02:13:54 +0000444static struct bpf_object *bpf_object__new(const char *path,
445 void *obj_buf,
446 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000447{
448 struct bpf_object *obj;
449
450 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
451 if (!obj) {
452 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000453 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000454 }
455
456 strcpy(obj->path, path);
457 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000458
459 /*
460 * Caller of this function should also calls
461 * bpf_object__elf_finish() after data collection to return
462 * obj_buf to user. If not, we should duplicate the buffer to
463 * avoid user freeing them before elf finish.
464 */
465 obj->efile.obj_buf = obj_buf;
466 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000467 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000468
Wang Nan52d33522015-07-01 02:14:04 +0000469 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000470
471 INIT_LIST_HEAD(&obj->list);
472 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000473 return obj;
474}
475
476static void bpf_object__elf_finish(struct bpf_object *obj)
477{
478 if (!obj_elf_valid(obj))
479 return;
480
481 if (obj->efile.elf) {
482 elf_end(obj->efile.elf);
483 obj->efile.elf = NULL;
484 }
Wang Nanbec7d682015-07-01 02:13:59 +0000485 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000486
487 zfree(&obj->efile.reloc);
488 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000489 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000490 obj->efile.obj_buf = NULL;
491 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000492}
493
494static int bpf_object__elf_init(struct bpf_object *obj)
495{
496 int err = 0;
497 GElf_Ehdr *ep;
498
499 if (obj_elf_valid(obj)) {
500 pr_warning("elf init: internal error\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000501 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000502 }
503
Wang Nan6c956392015-07-01 02:13:54 +0000504 if (obj->efile.obj_buf_sz > 0) {
505 /*
506 * obj_buf should have been validated by
507 * bpf_object__open_buffer().
508 */
509 obj->efile.elf = elf_memory(obj->efile.obj_buf,
510 obj->efile.obj_buf_sz);
511 } else {
512 obj->efile.fd = open(obj->path, O_RDONLY);
513 if (obj->efile.fd < 0) {
514 pr_warning("failed to open %s: %s\n", obj->path,
515 strerror(errno));
516 return -errno;
517 }
518
519 obj->efile.elf = elf_begin(obj->efile.fd,
520 LIBBPF_ELF_C_READ_MMAP,
521 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000522 }
523
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000524 if (!obj->efile.elf) {
525 pr_warning("failed to open %s as ELF file\n",
526 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000527 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000528 goto errout;
529 }
530
531 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
532 pr_warning("failed to get EHDR from %s\n",
533 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000534 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000535 goto errout;
536 }
537 ep = &obj->efile.ehdr;
538
Wang Nan9b161372016-07-18 06:01:08 +0000539 /* Old LLVM set e_machine to EM_NONE */
540 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000541 pr_warning("%s is not an eBPF object file\n",
542 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000543 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000544 goto errout;
545 }
546
547 return 0;
548errout:
549 bpf_object__elf_finish(obj);
550 return err;
551}
552
Wang Nancc4228d2015-07-01 02:13:55 +0000553static int
554bpf_object__check_endianness(struct bpf_object *obj)
555{
556 static unsigned int const endian = 1;
557
558 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
559 case ELFDATA2LSB:
560 /* We are big endian, BPF obj is little endian. */
561 if (*(unsigned char const *)&endian != 1)
562 goto mismatch;
563 break;
564
565 case ELFDATA2MSB:
566 /* We are little endian, BPF obj is big endian. */
567 if (*(unsigned char const *)&endian != 0)
568 goto mismatch;
569 break;
570 default:
Wang Nan6371ca3b2015-11-06 13:49:37 +0000571 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000572 }
573
574 return 0;
575
576mismatch:
577 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +0000578 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000579}
580
Wang Nancb1e5e92015-07-01 02:13:57 +0000581static int
582bpf_object__init_license(struct bpf_object *obj,
583 void *data, size_t size)
584{
585 memcpy(obj->license, data,
586 min(size, sizeof(obj->license) - 1));
587 pr_debug("license of %s is %s\n", obj->path, obj->license);
588 return 0;
589}
590
591static int
592bpf_object__init_kversion(struct bpf_object *obj,
593 void *data, size_t size)
594{
595 u32 kver;
596
597 if (size != sizeof(kver)) {
598 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000599 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000600 }
601 memcpy(&kver, data, sizeof(kver));
602 obj->kern_version = kver;
603 pr_debug("kernel version of %s is %x\n", obj->path,
604 obj->kern_version);
605 return 0;
606}
607
Eric Leblond4708bbd2016-11-15 04:05:47 +0000608static int compare_bpf_map(const void *_a, const void *_b)
609{
610 const struct bpf_map *a = _a;
611 const struct bpf_map *b = _b;
612
613 return a->offset - b->offset;
614}
615
616static int
617bpf_object__init_maps(struct bpf_object *obj)
618{
Craig Gallekb13c5c12017-10-05 10:41:57 -0400619 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000620 Elf_Scn *scn;
621 Elf_Data *data;
622 Elf_Data *symbols = obj->efile.symbols;
623
624 if (obj->efile.maps_shndx < 0)
625 return -EINVAL;
626 if (!symbols)
627 return -EINVAL;
628
629 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
630 if (scn)
631 data = elf_getdata(scn, NULL);
632 if (!scn || !data) {
633 pr_warning("failed to get Elf_Data from map section %d\n",
634 obj->efile.maps_shndx);
635 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000636 }
637
Eric Leblond4708bbd2016-11-15 04:05:47 +0000638 /*
639 * Count number of maps. Each map has a name.
640 * Array of maps is not supported: only the first element is
641 * considered.
642 *
643 * TODO: Detect array of map and report error.
644 */
645 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
646 GElf_Sym sym;
647
648 if (!gelf_getsym(symbols, i, &sym))
649 continue;
650 if (sym.st_shndx != obj->efile.maps_shndx)
651 continue;
652 nr_maps++;
653 }
654
655 /* Alloc obj->maps and fill nr_maps. */
656 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
657 nr_maps, data->d_size);
658
659 if (!nr_maps)
660 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000661
Craig Gallekb13c5c12017-10-05 10:41:57 -0400662 /* Assume equally sized map definitions */
663 map_def_sz = data->d_size / nr_maps;
664 if (!data->d_size || (data->d_size % nr_maps) != 0) {
665 pr_warning("unable to determine map definition size "
666 "section %s, %d maps in %zd bytes\n",
667 obj->path, nr_maps, data->d_size);
668 return -EINVAL;
669 }
670
Wang Nan9d759a92015-11-27 08:47:35 +0000671 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
672 if (!obj->maps) {
673 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000674 return -ENOMEM;
675 }
Wang Nan9d759a92015-11-27 08:47:35 +0000676 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000677
Eric Leblond4708bbd2016-11-15 04:05:47 +0000678 /*
679 * fill all fd with -1 so won't close incorrect
680 * fd (fd=0 is stdin) when failure (zclose won't close
681 * negative fd)).
682 */
683 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000684 obj->maps[i].fd = -1;
685
Eric Leblond4708bbd2016-11-15 04:05:47 +0000686 /*
687 * Fill obj->maps using data in "maps" section.
688 */
689 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000690 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000691 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000692 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000693
694 if (!gelf_getsym(symbols, i, &sym))
695 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000696 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000697 continue;
698
699 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000700 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000701 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000702 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400703 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000704 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
705 obj->path, map_name);
706 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000707 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000708
Wang Nan561bbcc2015-11-27 08:47:36 +0000709 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000710 if (!obj->maps[map_idx].name) {
711 pr_warning("failed to alloc map name\n");
712 return -ENOMEM;
713 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000714 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000715 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000716 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400717 /*
718 * If the definition of the map in the object file fits in
719 * bpf_map_def, copy it. Any extra fields in our version
720 * of bpf_map_def will default to zero as a result of the
721 * calloc above.
722 */
723 if (map_def_sz <= sizeof(struct bpf_map_def)) {
724 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
725 } else {
726 /*
727 * Here the map structure being read is bigger than what
728 * we expect, truncate if the excess bits are all zero.
729 * If they are not zero, reject this map as
730 * incompatible.
731 */
732 char *b;
733 for (b = ((char *)def) + sizeof(struct bpf_map_def);
734 b < ((char *)def) + map_def_sz; b++) {
735 if (*b != 0) {
736 pr_warning("maps section in %s: \"%s\" "
737 "has unrecognized, non-zero "
738 "options\n",
739 obj->path, map_name);
740 return -EINVAL;
741 }
742 }
743 memcpy(&obj->maps[map_idx].def, def,
744 sizeof(struct bpf_map_def));
745 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000746 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000747 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000748
749 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400750 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000751}
752
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100753static bool section_have_execinstr(struct bpf_object *obj, int idx)
754{
755 Elf_Scn *scn;
756 GElf_Shdr sh;
757
758 scn = elf_getscn(obj->efile.elf, idx);
759 if (!scn)
760 return false;
761
762 if (gelf_getshdr(scn, &sh) != &sh)
763 return false;
764
765 if (sh.sh_flags & SHF_EXECINSTR)
766 return true;
767
768 return false;
769}
770
Wang Nan29603662015-07-01 02:13:56 +0000771static int bpf_object__elf_collect(struct bpf_object *obj)
772{
773 Elf *elf = obj->efile.elf;
774 GElf_Ehdr *ep = &obj->efile.ehdr;
775 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000776 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000777
778 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
779 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
780 pr_warning("failed to get e_shstrndx from %s\n",
781 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000782 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000783 }
784
785 while ((scn = elf_nextscn(elf, scn)) != NULL) {
786 char *name;
787 GElf_Shdr sh;
788 Elf_Data *data;
789
790 idx++;
791 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100792 pr_warning("failed to get section(%d) header from %s\n",
793 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000794 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000795 goto out;
796 }
797
798 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
799 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100800 pr_warning("failed to get section(%d) name from %s\n",
801 idx, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000802 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000803 goto out;
804 }
805
806 data = elf_getdata(scn, 0);
807 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100808 pr_warning("failed to get section(%d) data from %s(%s)\n",
809 idx, name, obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000810 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000811 goto out;
812 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100813 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
814 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000815 (int)sh.sh_link, (unsigned long)sh.sh_flags,
816 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000817
818 if (strcmp(name, "license") == 0)
819 err = bpf_object__init_license(obj,
820 data->d_buf,
821 data->d_size);
822 else if (strcmp(name, "version") == 0)
823 err = bpf_object__init_kversion(obj,
824 data->d_buf,
825 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000826 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000827 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700828 else if (strcmp(name, BTF_ELF_SEC) == 0) {
829 obj->btf = btf__new(data->d_buf, data->d_size,
830 __pr_debug);
831 if (IS_ERR(obj->btf)) {
832 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
833 BTF_ELF_SEC, PTR_ERR(obj->btf));
834 obj->btf = NULL;
835 }
836 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000837 if (obj->efile.symbols) {
838 pr_warning("bpf: multiple SYMTAB in %s\n",
839 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000840 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000841 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000842 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000843 obj->efile.strtabidx = sh.sh_link;
844 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000845 } else if ((sh.sh_type == SHT_PROGBITS) &&
846 (sh.sh_flags & SHF_EXECINSTR) &&
847 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800848 if (strcmp(name, ".text") == 0)
849 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000850 err = bpf_object__add_program(obj, data->d_buf,
851 data->d_size, name, idx);
852 if (err) {
Wang Nan6371ca3b2015-11-06 13:49:37 +0000853 char errmsg[STRERR_BUFSIZE];
854
Wang Nana5b8bd42015-07-01 02:14:00 +0000855 strerror_r(-err, errmsg, sizeof(errmsg));
856 pr_warning("failed to alloc program %s (%s): %s",
857 name, obj->path, errmsg);
858 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000859 } else if (sh.sh_type == SHT_REL) {
860 void *reloc = obj->efile.reloc;
861 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100862 int sec = sh.sh_info; /* points to other section */
863
864 /* Only do relo for section with exec instructions */
865 if (!section_have_execinstr(obj, sec)) {
866 pr_debug("skip relo %s(%d) for section(%d)\n",
867 name, idx, sec);
868 continue;
869 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000870
871 reloc = realloc(reloc,
872 sizeof(*obj->efile.reloc) * nr_reloc);
873 if (!reloc) {
874 pr_warning("realloc failed\n");
875 err = -ENOMEM;
876 } else {
877 int n = nr_reloc - 1;
878
879 obj->efile.reloc = reloc;
880 obj->efile.nr_reloc = nr_reloc;
881
882 obj->efile.reloc[n].shdr = sh;
883 obj->efile.reloc[n].data = data;
884 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100885 } else {
886 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000887 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000888 if (err)
889 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000890 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000891
Wang Nan77ba9a52015-12-08 02:25:30 +0000892 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
893 pr_warning("Corrupted ELF file: index of strtab invalid\n");
894 return LIBBPF_ERRNO__FORMAT;
895 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700896 if (obj->efile.maps_shndx >= 0) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000897 err = bpf_object__init_maps(obj);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700898 if (err)
899 goto out;
900 }
901 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000902out:
903 return err;
904}
905
Wang Nan34090912015-07-01 02:14:02 +0000906static struct bpf_program *
907bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
908{
909 struct bpf_program *prog;
910 size_t i;
911
912 for (i = 0; i < obj->nr_programs; i++) {
913 prog = &obj->programs[i];
914 if (prog->idx == idx)
915 return prog;
916 }
917 return NULL;
918}
919
920static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800921bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
922 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000923{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800924 Elf_Data *symbols = obj->efile.symbols;
925 int text_shndx = obj->efile.text_shndx;
926 int maps_shndx = obj->efile.maps_shndx;
927 struct bpf_map *maps = obj->maps;
928 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000929 int i, nrels;
930
931 pr_debug("collecting relocating info for: '%s'\n",
932 prog->section_name);
933 nrels = shdr->sh_size / shdr->sh_entsize;
934
935 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
936 if (!prog->reloc_desc) {
937 pr_warning("failed to alloc memory in relocation\n");
938 return -ENOMEM;
939 }
940 prog->nr_reloc = nrels;
941
942 for (i = 0; i < nrels; i++) {
943 GElf_Sym sym;
944 GElf_Rel rel;
945 unsigned int insn_idx;
946 struct bpf_insn *insns = prog->insns;
947 size_t map_idx;
948
949 if (!gelf_getrel(data, i, &rel)) {
950 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000951 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000952 }
953
Wang Nan34090912015-07-01 02:14:02 +0000954 if (!gelf_getsym(symbols,
955 GELF_R_SYM(rel.r_info),
956 &sym)) {
957 pr_warning("relocation: symbol %"PRIx64" not found\n",
958 GELF_R_SYM(rel.r_info));
Wang Nan6371ca3b2015-11-06 13:49:37 +0000959 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000960 }
David Miller7d9890e2017-12-19 15:53:11 -0500961 pr_debug("relo for %lld value %lld name %d\n",
962 (long long) (rel.r_info >> 32),
963 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000964
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800965 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000966 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
967 prog->section_name, sym.st_shndx);
968 return -LIBBPF_ERRNO__RELOC;
969 }
970
971 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
972 pr_debug("relocation: insn_idx=%u\n", insn_idx);
973
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800974 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
975 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
976 pr_warning("incorrect bpf_call opcode\n");
977 return -LIBBPF_ERRNO__RELOC;
978 }
979 prog->reloc_desc[i].type = RELO_CALL;
980 prog->reloc_desc[i].insn_idx = insn_idx;
981 prog->reloc_desc[i].text_off = sym.st_value;
982 continue;
983 }
984
Wang Nan34090912015-07-01 02:14:02 +0000985 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
986 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
987 insn_idx, insns[insn_idx].code);
Wang Nan6371ca3b2015-11-06 13:49:37 +0000988 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000989 }
990
Joe Stringer94e5ade2017-01-22 17:11:22 -0800991 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
992 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
993 if (maps[map_idx].offset == sym.st_value) {
994 pr_debug("relocation: find map %zd (%s) for insn %u\n",
995 map_idx, maps[map_idx].name, insn_idx);
996 break;
997 }
998 }
999
Wang Nan34090912015-07-01 02:14:02 +00001000 if (map_idx >= nr_maps) {
1001 pr_warning("bpf relocation: map_idx %d large than %d\n",
1002 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001003 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001004 }
1005
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001006 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +00001007 prog->reloc_desc[i].insn_idx = insn_idx;
1008 prog->reloc_desc[i].map_idx = map_idx;
1009 }
1010 return 0;
1011}
1012
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001013static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1014{
1015 struct bpf_map_def *def = &map->def;
1016 const size_t max_name = 256;
1017 int64_t key_size, value_size;
1018 int32_t key_id, value_id;
1019 char name[max_name];
1020
1021 /* Find key type by name from BTF */
1022 if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
1023 pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
1024 map->name, map->name);
1025 return -EINVAL;
1026 }
1027
1028 key_id = btf__find_by_name(btf, name);
1029 if (key_id < 0) {
1030 pr_debug("map:%s key_type:%s cannot be found in BTF\n",
1031 map->name, name);
1032 return key_id;
1033 }
1034
1035 key_size = btf__resolve_size(btf, key_id);
1036 if (key_size < 0) {
1037 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
1038 map->name, name);
1039 return key_size;
1040 }
1041
1042 if (def->key_size != key_size) {
1043 pr_warning("map:%s key_type:%s has BTF type_size:%ld != key_size:%u\n",
1044 map->name, name, key_size, def->key_size);
1045 return -EINVAL;
1046 }
1047
1048 /* Find value type from BTF */
1049 if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
1050 pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
1051 map->name, map->name);
1052 return -EINVAL;
1053 }
1054
1055 value_id = btf__find_by_name(btf, name);
1056 if (value_id < 0) {
1057 pr_debug("map:%s value_type:%s cannot be found in BTF\n",
1058 map->name, name);
1059 return value_id;
1060 }
1061
1062 value_size = btf__resolve_size(btf, value_id);
1063 if (value_size < 0) {
1064 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
1065 map->name, name);
1066 return value_size;
1067 }
1068
1069 if (def->value_size != value_size) {
1070 pr_warning("map:%s value_type:%s has BTF type_size:%ld != value_size:%u\n",
1071 map->name, name, value_size, def->value_size);
1072 return -EINVAL;
1073 }
1074
1075 map->btf_key_id = key_id;
1076 map->btf_value_id = value_id;
1077
1078 return 0;
1079}
1080
Wang Nan52d33522015-07-01 02:14:04 +00001081static int
1082bpf_object__create_maps(struct bpf_object *obj)
1083{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001084 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001085 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001086 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001087
Wang Nan9d759a92015-11-27 08:47:35 +00001088 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001089 struct bpf_map *map = &obj->maps[i];
1090 struct bpf_map_def *def = &map->def;
1091 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001092
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001093 create_attr.name = map->name;
1094 create_attr.map_type = def->type;
1095 create_attr.map_flags = def->map_flags;
1096 create_attr.key_size = def->key_size;
1097 create_attr.value_size = def->value_size;
1098 create_attr.max_entries = def->max_entries;
1099 create_attr.btf_fd = 0;
1100 create_attr.btf_key_id = 0;
1101 create_attr.btf_value_id = 0;
1102
1103 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1104 create_attr.btf_fd = btf__fd(obj->btf);
1105 create_attr.btf_key_id = map->btf_key_id;
1106 create_attr.btf_value_id = map->btf_value_id;
1107 }
1108
1109 *pfd = bpf_create_map_xattr(&create_attr);
1110 if (*pfd < 0 && create_attr.btf_key_id) {
1111 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1112 map->name, strerror(errno), errno);
1113 create_attr.btf_fd = 0;
1114 create_attr.btf_key_id = 0;
1115 create_attr.btf_value_id = 0;
1116 map->btf_key_id = 0;
1117 map->btf_value_id = 0;
1118 *pfd = bpf_create_map_xattr(&create_attr);
1119 }
1120
Wang Nan52d33522015-07-01 02:14:04 +00001121 if (*pfd < 0) {
1122 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001123
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001124 err = *pfd;
Eric Leblond49bf4b32017-08-20 21:48:14 +02001125 pr_warning("failed to create map (name: '%s'): %s\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001126 map->name,
Wang Nan52d33522015-07-01 02:14:04 +00001127 strerror(errno));
1128 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001129 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001130 return err;
1131 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001132 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001133 }
1134
Wang Nan52d33522015-07-01 02:14:04 +00001135 return 0;
1136}
1137
Wang Nan8a47a6c2015-07-01 02:14:05 +00001138static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001139bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1140 struct reloc_desc *relo)
1141{
1142 struct bpf_insn *insn, *new_insn;
1143 struct bpf_program *text;
1144 size_t new_cnt;
1145
1146 if (relo->type != RELO_CALL)
1147 return -LIBBPF_ERRNO__RELOC;
1148
1149 if (prog->idx == obj->efile.text_shndx) {
1150 pr_warning("relo in .text insn %d into off %d\n",
1151 relo->insn_idx, relo->text_off);
1152 return -LIBBPF_ERRNO__RELOC;
1153 }
1154
1155 if (prog->main_prog_cnt == 0) {
1156 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1157 if (!text) {
1158 pr_warning("no .text section found yet relo into text exist\n");
1159 return -LIBBPF_ERRNO__RELOC;
1160 }
1161 new_cnt = prog->insns_cnt + text->insns_cnt;
1162 new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
1163 if (!new_insn) {
1164 pr_warning("oom in prog realloc\n");
1165 return -ENOMEM;
1166 }
1167 memcpy(new_insn + prog->insns_cnt, text->insns,
1168 text->insns_cnt * sizeof(*insn));
1169 prog->insns = new_insn;
1170 prog->main_prog_cnt = prog->insns_cnt;
1171 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001172 pr_debug("added %zd insn from %s to prog %s\n",
1173 text->insns_cnt, text->section_name,
1174 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001175 }
1176 insn = &prog->insns[relo->insn_idx];
1177 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001178 return 0;
1179}
1180
1181static int
Wang Nan9d759a92015-11-27 08:47:35 +00001182bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001183{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001184 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001185
1186 if (!prog || !prog->reloc_desc)
1187 return 0;
1188
1189 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001190 if (prog->reloc_desc[i].type == RELO_LD64) {
1191 struct bpf_insn *insns = prog->insns;
1192 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001193
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001194 insn_idx = prog->reloc_desc[i].insn_idx;
1195 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001196
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001197 if (insn_idx >= (int)prog->insns_cnt) {
1198 pr_warning("relocation out of range: '%s'\n",
1199 prog->section_name);
1200 return -LIBBPF_ERRNO__RELOC;
1201 }
1202 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1203 insns[insn_idx].imm = obj->maps[map_idx].fd;
1204 } else {
1205 err = bpf_program__reloc_text(prog, obj,
1206 &prog->reloc_desc[i]);
1207 if (err)
1208 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001209 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001210 }
1211
1212 zfree(&prog->reloc_desc);
1213 prog->nr_reloc = 0;
1214 return 0;
1215}
1216
1217
1218static int
1219bpf_object__relocate(struct bpf_object *obj)
1220{
1221 struct bpf_program *prog;
1222 size_t i;
1223 int err;
1224
1225 for (i = 0; i < obj->nr_programs; i++) {
1226 prog = &obj->programs[i];
1227
Wang Nan9d759a92015-11-27 08:47:35 +00001228 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001229 if (err) {
1230 pr_warning("failed to relocate '%s'\n",
1231 prog->section_name);
1232 return err;
1233 }
1234 }
1235 return 0;
1236}
1237
Wang Nan34090912015-07-01 02:14:02 +00001238static int bpf_object__collect_reloc(struct bpf_object *obj)
1239{
1240 int i, err;
1241
1242 if (!obj_elf_valid(obj)) {
1243 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001244 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001245 }
1246
1247 for (i = 0; i < obj->efile.nr_reloc; i++) {
1248 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1249 Elf_Data *data = obj->efile.reloc[i].data;
1250 int idx = shdr->sh_info;
1251 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001252
1253 if (shdr->sh_type != SHT_REL) {
1254 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001255 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001256 }
1257
1258 prog = bpf_object__find_prog_by_idx(obj, idx);
1259 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001260 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001261 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001262 }
1263
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001264 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001265 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001266 obj);
Wang Nan34090912015-07-01 02:14:02 +00001267 if (err)
Wang Nan6371ca3b2015-11-06 13:49:37 +00001268 return err;
Wang Nan34090912015-07-01 02:14:02 +00001269 }
1270 return 0;
1271}
1272
Wang Nan55cffde2015-07-01 02:14:07 +00001273static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001274load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1275 const char *name, struct bpf_insn *insns, int insns_cnt,
1276 char *license, u32 kern_version, int *pfd)
Wang Nan55cffde2015-07-01 02:14:07 +00001277{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001278 struct bpf_load_program_attr load_attr;
Wang Nan55cffde2015-07-01 02:14:07 +00001279 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001280 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001281
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001282 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1283 load_attr.prog_type = type;
1284 load_attr.expected_attach_type = expected_attach_type;
1285 load_attr.name = name;
1286 load_attr.insns = insns;
1287 load_attr.insns_cnt = insns_cnt;
1288 load_attr.license = license;
1289 load_attr.kern_version = kern_version;
1290
1291 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001292 return -EINVAL;
1293
1294 log_buf = malloc(BPF_LOG_BUF_SIZE);
1295 if (!log_buf)
1296 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1297
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001298 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001299
1300 if (ret >= 0) {
1301 *pfd = ret;
1302 ret = 0;
1303 goto out;
1304 }
1305
Wang Nan6371ca3b2015-11-06 13:49:37 +00001306 ret = -LIBBPF_ERRNO__LOAD;
Wang Nan55cffde2015-07-01 02:14:07 +00001307 pr_warning("load bpf program failed: %s\n", strerror(errno));
1308
Wang Nan6371ca3b2015-11-06 13:49:37 +00001309 if (log_buf && log_buf[0] != '\0') {
1310 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001311 pr_warning("-- BEGIN DUMP LOG ---\n");
1312 pr_warning("\n%s\n", log_buf);
1313 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001314 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1315 pr_warning("Program too large (%zu insns), at most %d insns\n",
1316 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001317 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001318 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001319 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001320 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001321 int fd;
1322
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001323 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1324 load_attr.expected_attach_type = 0;
1325 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001326 if (fd >= 0) {
1327 close(fd);
1328 ret = -LIBBPF_ERRNO__PROGTYPE;
1329 goto out;
1330 }
Wang Nan6371ca3b2015-11-06 13:49:37 +00001331 }
Wang Nan705fa212016-07-13 10:44:02 +00001332
1333 if (log_buf)
1334 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001335 }
1336
1337out:
1338 free(log_buf);
1339 return ret;
1340}
1341
1342static int
1343bpf_program__load(struct bpf_program *prog,
1344 char *license, u32 kern_version)
1345{
Wang Nanb5805632015-11-16 12:10:09 +00001346 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001347
Wang Nanb5805632015-11-16 12:10:09 +00001348 if (prog->instances.nr < 0 || !prog->instances.fds) {
1349 if (prog->preprocessor) {
1350 pr_warning("Internal error: can't load program '%s'\n",
1351 prog->section_name);
1352 return -LIBBPF_ERRNO__INTERNAL;
1353 }
Wang Nan55cffde2015-07-01 02:14:07 +00001354
Wang Nanb5805632015-11-16 12:10:09 +00001355 prog->instances.fds = malloc(sizeof(int));
1356 if (!prog->instances.fds) {
1357 pr_warning("Not enough memory for BPF fds\n");
1358 return -ENOMEM;
1359 }
1360 prog->instances.nr = 1;
1361 prog->instances.fds[0] = -1;
1362 }
1363
1364 if (!prog->preprocessor) {
1365 if (prog->instances.nr != 1) {
1366 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1367 prog->section_name, prog->instances.nr);
1368 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001369 err = load_program(prog->type, prog->expected_attach_type,
1370 prog->name, prog->insns, prog->insns_cnt,
1371 license, kern_version, &fd);
Wang Nanb5805632015-11-16 12:10:09 +00001372 if (!err)
1373 prog->instances.fds[0] = fd;
1374 goto out;
1375 }
1376
1377 for (i = 0; i < prog->instances.nr; i++) {
1378 struct bpf_prog_prep_result result;
1379 bpf_program_prep_t preprocessor = prog->preprocessor;
1380
1381 bzero(&result, sizeof(result));
1382 err = preprocessor(prog, i, prog->insns,
1383 prog->insns_cnt, &result);
1384 if (err) {
1385 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1386 i, prog->section_name);
1387 goto out;
1388 }
1389
1390 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1391 pr_debug("Skip loading the %dth instance of program '%s'\n",
1392 i, prog->section_name);
1393 prog->instances.fds[i] = -1;
1394 if (result.pfd)
1395 *result.pfd = -1;
1396 continue;
1397 }
1398
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001399 err = load_program(prog->type, prog->expected_attach_type,
1400 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001401 result.new_insn_cnt,
1402 license, kern_version, &fd);
1403
1404 if (err) {
1405 pr_warning("Loading the %dth instance of program '%s' failed\n",
1406 i, prog->section_name);
1407 goto out;
1408 }
1409
1410 if (result.pfd)
1411 *result.pfd = fd;
1412 prog->instances.fds[i] = fd;
1413 }
1414out:
Wang Nan55cffde2015-07-01 02:14:07 +00001415 if (err)
1416 pr_warning("failed to load program '%s'\n",
1417 prog->section_name);
1418 zfree(&prog->insns);
1419 prog->insns_cnt = 0;
1420 return err;
1421}
1422
1423static int
1424bpf_object__load_progs(struct bpf_object *obj)
1425{
1426 size_t i;
1427 int err;
1428
1429 for (i = 0; i < obj->nr_programs; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001430 if (obj->programs[i].idx == obj->efile.text_shndx)
1431 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001432 err = bpf_program__load(&obj->programs[i],
1433 obj->license,
1434 obj->kern_version);
1435 if (err)
1436 return err;
1437 }
1438 return 0;
1439}
1440
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001441static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001442{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001443 switch (type) {
1444 case BPF_PROG_TYPE_SOCKET_FILTER:
1445 case BPF_PROG_TYPE_SCHED_CLS:
1446 case BPF_PROG_TYPE_SCHED_ACT:
1447 case BPF_PROG_TYPE_XDP:
1448 case BPF_PROG_TYPE_CGROUP_SKB:
1449 case BPF_PROG_TYPE_CGROUP_SOCK:
1450 case BPF_PROG_TYPE_LWT_IN:
1451 case BPF_PROG_TYPE_LWT_OUT:
1452 case BPF_PROG_TYPE_LWT_XMIT:
1453 case BPF_PROG_TYPE_SOCK_OPS:
1454 case BPF_PROG_TYPE_SK_SKB:
1455 case BPF_PROG_TYPE_CGROUP_DEVICE:
1456 case BPF_PROG_TYPE_SK_MSG:
1457 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1458 return false;
1459 case BPF_PROG_TYPE_UNSPEC:
1460 case BPF_PROG_TYPE_KPROBE:
1461 case BPF_PROG_TYPE_TRACEPOINT:
1462 case BPF_PROG_TYPE_PERF_EVENT:
1463 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1464 default:
1465 return true;
1466 }
1467}
1468
1469static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1470{
1471 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001472 pr_warning("%s doesn't provide kernel version\n",
1473 obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001474 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001475 }
1476 return 0;
1477}
1478
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001479static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001480__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1481 bool needs_kver)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001482{
1483 struct bpf_object *obj;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001484 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001485
1486 if (elf_version(EV_CURRENT) == EV_NONE) {
1487 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001488 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001489 }
1490
Wang Nan6c956392015-07-01 02:13:54 +00001491 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001492 if (IS_ERR(obj))
1493 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001494
Wang Nan6371ca3b2015-11-06 13:49:37 +00001495 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1496 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1497 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1498 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001499 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001500
1501 bpf_object__elf_finish(obj);
1502 return obj;
1503out:
1504 bpf_object__close(obj);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001505 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001506}
1507
1508struct bpf_object *bpf_object__open(const char *path)
1509{
1510 /* param validation */
1511 if (!path)
1512 return NULL;
1513
1514 pr_debug("loading %s\n", path);
1515
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001516 return __bpf_object__open(path, NULL, 0, true);
Wang Nan6c956392015-07-01 02:13:54 +00001517}
1518
1519struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001520 size_t obj_buf_sz,
1521 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001522{
Wang Nanacf860a2015-08-27 02:30:55 +00001523 char tmp_name[64];
1524
Wang Nan6c956392015-07-01 02:13:54 +00001525 /* param validation */
1526 if (!obj_buf || obj_buf_sz <= 0)
1527 return NULL;
1528
Wang Nanacf860a2015-08-27 02:30:55 +00001529 if (!name) {
1530 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1531 (unsigned long)obj_buf,
1532 (unsigned long)obj_buf_sz);
1533 tmp_name[sizeof(tmp_name) - 1] = '\0';
1534 name = tmp_name;
1535 }
1536 pr_debug("loading object '%s' from buffer\n",
1537 name);
Wang Nan6c956392015-07-01 02:13:54 +00001538
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001539 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001540}
1541
Wang Nan52d33522015-07-01 02:14:04 +00001542int bpf_object__unload(struct bpf_object *obj)
1543{
1544 size_t i;
1545
1546 if (!obj)
1547 return -EINVAL;
1548
Wang Nan9d759a92015-11-27 08:47:35 +00001549 for (i = 0; i < obj->nr_maps; i++)
1550 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001551
Wang Nan55cffde2015-07-01 02:14:07 +00001552 for (i = 0; i < obj->nr_programs; i++)
1553 bpf_program__unload(&obj->programs[i]);
1554
Wang Nan52d33522015-07-01 02:14:04 +00001555 return 0;
1556}
1557
1558int bpf_object__load(struct bpf_object *obj)
1559{
Wang Nan6371ca3b2015-11-06 13:49:37 +00001560 int err;
1561
Wang Nan52d33522015-07-01 02:14:04 +00001562 if (!obj)
1563 return -EINVAL;
1564
1565 if (obj->loaded) {
1566 pr_warning("object should not be loaded twice\n");
1567 return -EINVAL;
1568 }
1569
1570 obj->loaded = true;
Wang Nan6371ca3b2015-11-06 13:49:37 +00001571
1572 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1573 CHECK_ERR(bpf_object__relocate(obj), err, out);
1574 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001575
1576 return 0;
1577out:
1578 bpf_object__unload(obj);
1579 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca3b2015-11-06 13:49:37 +00001580 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001581}
1582
Joe Stringerf3675402017-01-26 13:19:56 -08001583static int check_path(const char *path)
1584{
1585 struct statfs st_fs;
1586 char *dname, *dir;
1587 int err = 0;
1588
1589 if (path == NULL)
1590 return -EINVAL;
1591
1592 dname = strdup(path);
1593 if (dname == NULL)
1594 return -ENOMEM;
1595
1596 dir = dirname(dname);
1597 if (statfs(dir, &st_fs)) {
1598 pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
1599 err = -errno;
1600 }
1601 free(dname);
1602
1603 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1604 pr_warning("specified path %s is not on BPF FS\n", path);
1605 err = -EINVAL;
1606 }
1607
1608 return err;
1609}
1610
1611int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1612 int instance)
1613{
1614 int err;
1615
1616 err = check_path(path);
1617 if (err)
1618 return err;
1619
1620 if (prog == NULL) {
1621 pr_warning("invalid program pointer\n");
1622 return -EINVAL;
1623 }
1624
1625 if (instance < 0 || instance >= prog->instances.nr) {
1626 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1627 instance, prog->section_name, prog->instances.nr);
1628 return -EINVAL;
1629 }
1630
1631 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1632 pr_warning("failed to pin program: %s\n", strerror(errno));
1633 return -errno;
1634 }
1635 pr_debug("pinned program '%s'\n", path);
1636
1637 return 0;
1638}
1639
1640static int make_dir(const char *path)
1641{
1642 int err = 0;
1643
1644 if (mkdir(path, 0700) && errno != EEXIST)
1645 err = -errno;
1646
1647 if (err)
1648 pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1649 return err;
1650}
1651
1652int bpf_program__pin(struct bpf_program *prog, const char *path)
1653{
1654 int i, err;
1655
1656 err = check_path(path);
1657 if (err)
1658 return err;
1659
1660 if (prog == NULL) {
1661 pr_warning("invalid program pointer\n");
1662 return -EINVAL;
1663 }
1664
1665 if (prog->instances.nr <= 0) {
1666 pr_warning("no instances of prog %s to pin\n",
1667 prog->section_name);
1668 return -EINVAL;
1669 }
1670
1671 err = make_dir(path);
1672 if (err)
1673 return err;
1674
1675 for (i = 0; i < prog->instances.nr; i++) {
1676 char buf[PATH_MAX];
1677 int len;
1678
1679 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1680 if (len < 0)
1681 return -EINVAL;
1682 else if (len >= PATH_MAX)
1683 return -ENAMETOOLONG;
1684
1685 err = bpf_program__pin_instance(prog, buf, i);
1686 if (err)
1687 return err;
1688 }
1689
1690 return 0;
1691}
1692
Joe Stringerb6989f32017-01-26 13:19:57 -08001693int bpf_map__pin(struct bpf_map *map, const char *path)
1694{
1695 int err;
1696
1697 err = check_path(path);
1698 if (err)
1699 return err;
1700
1701 if (map == NULL) {
1702 pr_warning("invalid map pointer\n");
1703 return -EINVAL;
1704 }
1705
1706 if (bpf_obj_pin(map->fd, path)) {
1707 pr_warning("failed to pin map: %s\n", strerror(errno));
1708 return -errno;
1709 }
1710
1711 pr_debug("pinned map '%s'\n", path);
1712 return 0;
1713}
1714
Joe Stringerd5148d82017-01-26 13:19:58 -08001715int bpf_object__pin(struct bpf_object *obj, const char *path)
1716{
1717 struct bpf_program *prog;
1718 struct bpf_map *map;
1719 int err;
1720
1721 if (!obj)
1722 return -ENOENT;
1723
1724 if (!obj->loaded) {
1725 pr_warning("object not yet loaded; load it first\n");
1726 return -ENOENT;
1727 }
1728
1729 err = make_dir(path);
1730 if (err)
1731 return err;
1732
1733 bpf_map__for_each(map, obj) {
1734 char buf[PATH_MAX];
1735 int len;
1736
1737 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1738 bpf_map__name(map));
1739 if (len < 0)
1740 return -EINVAL;
1741 else if (len >= PATH_MAX)
1742 return -ENAMETOOLONG;
1743
1744 err = bpf_map__pin(map, buf);
1745 if (err)
1746 return err;
1747 }
1748
1749 bpf_object__for_each_program(prog, obj) {
1750 char buf[PATH_MAX];
1751 int len;
1752
1753 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1754 prog->section_name);
1755 if (len < 0)
1756 return -EINVAL;
1757 else if (len >= PATH_MAX)
1758 return -ENAMETOOLONG;
1759
1760 err = bpf_program__pin(prog, buf);
1761 if (err)
1762 return err;
1763 }
1764
1765 return 0;
1766}
1767
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001768void bpf_object__close(struct bpf_object *obj)
1769{
Wang Nana5b8bd42015-07-01 02:14:00 +00001770 size_t i;
1771
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001772 if (!obj)
1773 return;
1774
Wang Nan10931d22016-11-26 07:03:26 +00001775 if (obj->clear_priv)
1776 obj->clear_priv(obj, obj->priv);
1777
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001778 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00001779 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001780 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001781
Wang Nan9d759a92015-11-27 08:47:35 +00001782 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001783 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00001784 if (obj->maps[i].clear_priv)
1785 obj->maps[i].clear_priv(&obj->maps[i],
1786 obj->maps[i].priv);
1787 obj->maps[i].priv = NULL;
1788 obj->maps[i].clear_priv = NULL;
1789 }
1790 zfree(&obj->maps);
1791 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00001792
1793 if (obj->programs && obj->nr_programs) {
1794 for (i = 0; i < obj->nr_programs; i++)
1795 bpf_program__exit(&obj->programs[i]);
1796 }
1797 zfree(&obj->programs);
1798
Wang Nan9a208ef2015-07-01 02:14:10 +00001799 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001800 free(obj);
1801}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001802
Wang Nan9a208ef2015-07-01 02:14:10 +00001803struct bpf_object *
1804bpf_object__next(struct bpf_object *prev)
1805{
1806 struct bpf_object *next;
1807
1808 if (!prev)
1809 next = list_first_entry(&bpf_objects_list,
1810 struct bpf_object,
1811 list);
1812 else
1813 next = list_next_entry(prev, list);
1814
1815 /* Empty list is noticed here so don't need checking on entry. */
1816 if (&next->list == &bpf_objects_list)
1817 return NULL;
1818
1819 return next;
1820}
1821
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001822const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00001823{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001824 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00001825}
1826
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001827unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00001828{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001829 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00001830}
1831
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001832int bpf_object__btf_fd(const struct bpf_object *obj)
1833{
1834 return obj->btf ? btf__fd(obj->btf) : -1;
1835}
1836
Wang Nan10931d22016-11-26 07:03:26 +00001837int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1838 bpf_object_clear_priv_t clear_priv)
1839{
1840 if (obj->priv && obj->clear_priv)
1841 obj->clear_priv(obj, obj->priv);
1842
1843 obj->priv = priv;
1844 obj->clear_priv = clear_priv;
1845 return 0;
1846}
1847
1848void *bpf_object__priv(struct bpf_object *obj)
1849{
1850 return obj ? obj->priv : ERR_PTR(-EINVAL);
1851}
1852
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001853struct bpf_program *
1854bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1855{
1856 size_t idx;
1857
1858 if (!obj->programs)
1859 return NULL;
1860 /* First handler */
1861 if (prev == NULL)
1862 return &obj->programs[0];
1863
1864 if (prev->obj != obj) {
1865 pr_warning("error: program handler doesn't match object\n");
1866 return NULL;
1867 }
1868
1869 idx = (prev - obj->programs) + 1;
1870 if (idx >= obj->nr_programs)
1871 return NULL;
1872 return &obj->programs[idx];
1873}
1874
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03001875int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1876 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001877{
1878 if (prog->priv && prog->clear_priv)
1879 prog->clear_priv(prog, prog->priv);
1880
1881 prog->priv = priv;
1882 prog->clear_priv = clear_priv;
1883 return 0;
1884}
1885
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001886void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001887{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001888 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001889}
1890
Namhyung Kim715f8db2015-11-03 20:21:05 +09001891const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001892{
1893 const char *title;
1894
1895 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09001896 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001897 title = strdup(title);
1898 if (!title) {
1899 pr_warning("failed to strdup program title\n");
Wang Nan6371ca3b2015-11-06 13:49:37 +00001900 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001901 }
1902 }
1903
1904 return title;
1905}
1906
1907int bpf_program__fd(struct bpf_program *prog)
1908{
Wang Nanb5805632015-11-16 12:10:09 +00001909 return bpf_program__nth_fd(prog, 0);
1910}
1911
1912int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1913 bpf_program_prep_t prep)
1914{
1915 int *instances_fds;
1916
1917 if (nr_instances <= 0 || !prep)
1918 return -EINVAL;
1919
1920 if (prog->instances.nr > 0 || prog->instances.fds) {
1921 pr_warning("Can't set pre-processor after loading\n");
1922 return -EINVAL;
1923 }
1924
1925 instances_fds = malloc(sizeof(int) * nr_instances);
1926 if (!instances_fds) {
1927 pr_warning("alloc memory failed for fds\n");
1928 return -ENOMEM;
1929 }
1930
1931 /* fill all fd with -1 */
1932 memset(instances_fds, -1, sizeof(int) * nr_instances);
1933
1934 prog->instances.nr = nr_instances;
1935 prog->instances.fds = instances_fds;
1936 prog->preprocessor = prep;
1937 return 0;
1938}
1939
1940int bpf_program__nth_fd(struct bpf_program *prog, int n)
1941{
1942 int fd;
1943
1944 if (n >= prog->instances.nr || n < 0) {
1945 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1946 n, prog->section_name, prog->instances.nr);
1947 return -EINVAL;
1948 }
1949
1950 fd = prog->instances.fds[n];
1951 if (fd < 0) {
1952 pr_warning("%dth instance of program '%s' is invalid\n",
1953 n, prog->section_name);
1954 return -ENOENT;
1955 }
1956
1957 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001958}
Wang Nan9d759a92015-11-27 08:47:35 +00001959
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07001960void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00001961{
1962 prog->type = type;
1963}
1964
Wang Nan5f44e4c82016-07-13 10:44:01 +00001965static bool bpf_program__is_type(struct bpf_program *prog,
1966 enum bpf_prog_type type)
1967{
1968 return prog ? (prog->type == type) : false;
1969}
1970
Joe Stringered794072017-01-22 17:11:23 -08001971#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
1972int bpf_program__set_##NAME(struct bpf_program *prog) \
1973{ \
1974 if (!prog) \
1975 return -EINVAL; \
1976 bpf_program__set_type(prog, TYPE); \
1977 return 0; \
1978} \
1979 \
1980bool bpf_program__is_##NAME(struct bpf_program *prog) \
1981{ \
1982 return bpf_program__is_type(prog, TYPE); \
1983} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00001984
Joe Stringer7803ba72017-01-22 17:11:24 -08001985BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08001986BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08001987BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
1988BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08001989BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93f2018-04-17 10:28:46 -07001990BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08001991BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
1992BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00001993
John Fastabend16962b22018-04-23 14:30:38 -07001994void bpf_program__set_expected_attach_type(struct bpf_program *prog,
1995 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001996{
1997 prog->expected_attach_type = type;
1998}
1999
2000#define BPF_PROG_SEC_FULL(string, ptype, atype) \
2001 { string, sizeof(string) - 1, ptype, atype }
2002
2003#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2004
Andrey Ignatov81efee72018-04-17 10:28:45 -07002005#define BPF_S_PROG_SEC(string, ptype) \
2006 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2007
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002008#define BPF_SA_PROG_SEC(string, ptype) \
2009 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2010
Roman Gushchin583c9002017-12-13 15:18:51 +00002011static const struct {
2012 const char *sec;
2013 size_t len;
2014 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002015 enum bpf_attach_type expected_attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002016} section_names[] = {
2017 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2018 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2019 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002020 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2021 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002022 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
Andrey Ignatove14c93f2018-04-17 10:28:46 -07002023 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002024 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2025 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2026 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2027 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
2028 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002029 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2030 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2031 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002032 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
2033 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
John Fastabend4c4c3c22018-03-18 12:57:41 -07002034 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002035 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
2036 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
Andrey Ignatov622adaf2018-03-30 15:08:06 -07002037 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2038 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
Andrey Ignatov81efee72018-04-17 10:28:45 -07002039 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2040 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
Roman Gushchin583c9002017-12-13 15:18:51 +00002041};
Roman Gushchin583c9002017-12-13 15:18:51 +00002042
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002043#undef BPF_PROG_SEC
2044#undef BPF_PROG_SEC_FULL
Andrey Ignatov81efee72018-04-17 10:28:45 -07002045#undef BPF_S_PROG_SEC
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002046#undef BPF_SA_PROG_SEC
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002047
2048static int bpf_program__identify_section(struct bpf_program *prog)
Roman Gushchin583c9002017-12-13 15:18:51 +00002049{
2050 int i;
2051
2052 if (!prog->section_name)
2053 goto err;
2054
2055 for (i = 0; i < ARRAY_SIZE(section_names); i++)
2056 if (strncmp(prog->section_name, section_names[i].sec,
2057 section_names[i].len) == 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002058 return i;
Roman Gushchin583c9002017-12-13 15:18:51 +00002059
2060err:
2061 pr_warning("failed to guess program type based on section name %s\n",
2062 prog->section_name);
2063
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002064 return -1;
Roman Gushchin583c9002017-12-13 15:18:51 +00002065}
2066
Arnaldo Carvalho de Melo6e009e62016-06-03 12:15:52 -03002067int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002068{
Arnaldo Carvalho de Melo6e009e62016-06-03 12:15:52 -03002069 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002070}
2071
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002072const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002073{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002074 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002075}
2076
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002077const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002078{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002079 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002080}
2081
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002082uint32_t bpf_map__btf_key_id(const struct bpf_map *map)
2083{
2084 return map ? map->btf_key_id : 0;
2085}
2086
2087uint32_t bpf_map__btf_value_id(const struct bpf_map *map)
2088{
2089 return map ? map->btf_value_id : 0;
2090}
2091
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002092int bpf_map__set_priv(struct bpf_map *map, void *priv,
2093 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002094{
2095 if (!map)
2096 return -EINVAL;
2097
2098 if (map->priv) {
2099 if (map->clear_priv)
2100 map->clear_priv(map, map->priv);
2101 }
2102
2103 map->priv = priv;
2104 map->clear_priv = clear_priv;
2105 return 0;
2106}
2107
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002108void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002109{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002110 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002111}
2112
2113struct bpf_map *
2114bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2115{
2116 size_t idx;
2117 struct bpf_map *s, *e;
2118
2119 if (!obj || !obj->maps)
2120 return NULL;
2121
2122 s = obj->maps;
2123 e = obj->maps + obj->nr_maps;
2124
2125 if (prev == NULL)
2126 return s;
2127
2128 if ((prev < s) || (prev >= e)) {
2129 pr_warning("error in %s: map handler doesn't belong to object\n",
2130 __func__);
2131 return NULL;
2132 }
2133
2134 idx = (prev - obj->maps) + 1;
2135 if (idx >= obj->nr_maps)
2136 return NULL;
2137 return &obj->maps[idx];
2138}
Wang Nan561bbcc2015-11-27 08:47:36 +00002139
2140struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002141bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002142{
2143 struct bpf_map *pos;
2144
2145 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002146 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002147 return pos;
2148 }
2149 return NULL;
2150}
Wang Nan5a6acad2016-11-26 07:03:27 +00002151
2152struct bpf_map *
2153bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2154{
2155 int i;
2156
2157 for (i = 0; i < obj->nr_maps; i++) {
2158 if (obj->maps[i].offset == offset)
2159 return &obj->maps[i];
2160 }
2161 return ERR_PTR(-ENOENT);
2162}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002163
2164long libbpf_get_error(const void *ptr)
2165{
2166 if (IS_ERR(ptr))
2167 return PTR_ERR(ptr);
2168 return 0;
2169}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002170
2171int bpf_prog_load(const char *file, enum bpf_prog_type type,
2172 struct bpf_object **pobj, int *prog_fd)
2173{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002174 struct bpf_prog_load_attr attr;
2175
2176 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2177 attr.file = file;
2178 attr.prog_type = type;
2179 attr.expected_attach_type = 0;
2180
2181 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2182}
2183
2184int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2185 struct bpf_object **pobj, int *prog_fd)
2186{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002187 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002188 enum bpf_attach_type expected_attach_type;
2189 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002190 struct bpf_object *obj;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002191 int section_idx;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002192 int err;
2193
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002194 if (!attr)
2195 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002196 if (!attr->file)
2197 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002198
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002199 obj = __bpf_object__open(attr->file, NULL, 0,
2200 bpf_prog_type__needs_kver(attr->prog_type));
John Fastabend6f6d33f2017-08-15 22:34:22 -07002201 if (IS_ERR(obj))
2202 return -ENOENT;
2203
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002204 bpf_object__for_each_program(prog, obj) {
2205 /*
2206 * If type is not specified, try to guess it based on
2207 * section name.
2208 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002209 prog_type = attr->prog_type;
2210 expected_attach_type = attr->expected_attach_type;
2211 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2212 section_idx = bpf_program__identify_section(prog);
2213 if (section_idx < 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002214 bpf_object__close(obj);
2215 return -EINVAL;
2216 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002217 prog_type = section_names[section_idx].prog_type;
2218 expected_attach_type =
2219 section_names[section_idx].expected_attach_type;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002220 }
2221
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002222 bpf_program__set_type(prog, prog_type);
2223 bpf_program__set_expected_attach_type(prog,
2224 expected_attach_type);
2225
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002226 if (prog->idx != obj->efile.text_shndx && !first_prog)
2227 first_prog = prog;
2228 }
2229
2230 if (!first_prog) {
2231 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002232 bpf_object__close(obj);
2233 return -ENOENT;
2234 }
2235
John Fastabend6f6d33f2017-08-15 22:34:22 -07002236 err = bpf_object__load(obj);
2237 if (err) {
2238 bpf_object__close(obj);
2239 return -EINVAL;
2240 }
2241
2242 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002243 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002244 return 0;
2245}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002246
2247enum bpf_perf_event_ret
2248bpf_perf_event_read_simple(void *mem, unsigned long size,
2249 unsigned long page_size, void **buf, size_t *buf_len,
2250 bpf_perf_event_print_t fn, void *priv)
2251{
2252 volatile struct perf_event_mmap_page *header = mem;
2253 __u64 data_tail = header->data_tail;
2254 __u64 data_head = header->data_head;
2255 void *base, *begin, *end;
2256 int ret;
2257
2258 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2259 if (data_head == data_tail)
2260 return LIBBPF_PERF_EVENT_CONT;
2261
2262 base = ((char *)header) + page_size;
2263
2264 begin = base + data_tail % size;
2265 end = base + data_head % size;
2266
2267 while (begin != end) {
2268 struct perf_event_header *ehdr;
2269
2270 ehdr = begin;
2271 if (begin + ehdr->size > base + size) {
2272 long len = base + size - begin;
2273
2274 if (*buf_len < ehdr->size) {
2275 free(*buf);
2276 *buf = malloc(ehdr->size);
2277 if (!*buf) {
2278 ret = LIBBPF_PERF_EVENT_ERROR;
2279 break;
2280 }
2281 *buf_len = ehdr->size;
2282 }
2283
2284 memcpy(*buf, begin, len);
2285 memcpy(*buf + len, base, ehdr->size - len);
2286 ehdr = (void *)*buf;
2287 begin = base + ehdr->size - len;
2288 } else if (begin + ehdr->size == base + size) {
2289 begin = base;
2290 } else {
2291 begin += ehdr->size;
2292 }
2293
2294 ret = fn(ehdr, priv);
2295 if (ret != LIBBPF_PERF_EVENT_CONT)
2296 break;
2297
2298 data_tail += ehdr->size;
2299 }
2300
2301 __sync_synchronize(); /* smp_mb() */
2302 header->data_tail = data_tail;
2303
2304 return ret;
2305}