blob: a1491e95edd04dd6de9ee29382d1537481302cb5 [file] [log] [blame]
Eric Leblond6061a3d2018-01-30 21:55:03 +01001// SPDX-License-Identifier: LGPL-2.1
2
Wang Nan1b76c132015-07-01 02:13:51 +00003/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Joe Stringerf3675402017-01-26 13:19:56 -08009 * Copyright (C) 2017 Nicira, Inc.
Wang Nan203d1ca2016-07-04 11:02:42 +000010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation;
14 * version 2.1 of the License (not later!)
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, see <http://www.gnu.org/licenses>
Wang Nan1b76c132015-07-01 02:13:51 +000023 */
24
25#include <stdlib.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000026#include <stdio.h>
27#include <stdarg.h>
Joe Stringerf3675402017-01-26 13:19:56 -080028#include <libgen.h>
Wang Nan34090912015-07-01 02:14:02 +000029#include <inttypes.h>
Wang Nanb3f59d62015-07-01 02:13:52 +000030#include <string.h>
Wang Nan1b76c132015-07-01 02:13:51 +000031#include <unistd.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000032#include <fcntl.h>
33#include <errno.h>
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -070034#include <perf-sys.h>
Wang Nan1b76c132015-07-01 02:13:51 +000035#include <asm/unistd.h>
Joe Stringere28ff1a2017-01-22 17:11:25 -080036#include <linux/err.h>
Wang Nancb1e5e92015-07-01 02:13:57 +000037#include <linux/kernel.h>
Wang Nan1b76c132015-07-01 02:13:51 +000038#include <linux/bpf.h>
Wang Nan9a208ef2015-07-01 02:14:10 +000039#include <linux/list.h>
Joe Stringerf3675402017-01-26 13:19:56 -080040#include <linux/limits.h>
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
Wang Nan1a5e3fb2015-07-01 02:13:53 +000044#include <libelf.h>
45#include <gelf.h>
Wang Nan1b76c132015-07-01 02:13:51 +000046
47#include "libbpf.h"
Wang Nan52d33522015-07-01 02:14:04 +000048#include "bpf.h"
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -070049#include "btf.h"
Wang Nanb3f59d62015-07-01 02:13:52 +000050
Wang Nan9b161372016-07-18 06:01:08 +000051#ifndef EM_BPF
52#define EM_BPF 247
53#endif
54
Joe Stringerf3675402017-01-26 13:19:56 -080055#ifndef BPF_FS_MAGIC
56#define BPF_FS_MAGIC 0xcafe4a11
57#endif
58
Wang Nanb3f59d62015-07-01 02:13:52 +000059#define __printf(a, b) __attribute__((format(printf, a, b)))
60
61__printf(1, 2)
62static int __base_pr(const char *format, ...)
63{
64 va_list args;
65 int err;
66
67 va_start(args, format);
68 err = vfprintf(stderr, format, args);
69 va_end(args);
70 return err;
71}
72
73static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
74static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
75static __printf(1, 2) libbpf_print_fn_t __pr_debug;
76
77#define __pr(func, fmt, ...) \
78do { \
79 if ((func)) \
80 (func)("libbpf: " fmt, ##__VA_ARGS__); \
81} while (0)
82
83#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
84#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
85#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
86
87void libbpf_set_print(libbpf_print_fn_t warn,
88 libbpf_print_fn_t info,
89 libbpf_print_fn_t debug)
90{
91 __pr_warning = warn;
92 __pr_info = info;
93 __pr_debug = debug;
94}
Wang Nan1a5e3fb2015-07-01 02:13:53 +000095
Wang Nan6371ca32015-11-06 13:49:37 +000096#define STRERR_BUFSIZE 128
97
98#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
99#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
100#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
101
102static const char *libbpf_strerror_table[NR_ERRNO] = {
103 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
104 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
105 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
Colin Ian Kingde8a63b2016-06-28 13:23:37 +0100106 [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
Wang Nan6371ca32015-11-06 13:49:37 +0000107 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
108 [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
109 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
110 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
111 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
Wang Nan705fa212016-07-13 10:44:02 +0000112 [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
Eric Leblond949abbe2018-01-30 21:55:01 +0100113 [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
114 [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
Wang Nan6371ca32015-11-06 13:49:37 +0000115};
116
117int libbpf_strerror(int err, char *buf, size_t size)
118{
119 if (!buf || !size)
120 return -1;
121
122 err = err > 0 ? err : -err;
123
124 if (err < __LIBBPF_ERRNO__START) {
125 int ret;
126
127 ret = strerror_r(err, buf, size);
128 buf[size - 1] = '\0';
129 return ret;
130 }
131
132 if (err < __LIBBPF_ERRNO__END) {
133 const char *msg;
134
135 msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
136 snprintf(buf, size, "%s", msg);
137 buf[size - 1] = '\0';
138 return 0;
139 }
140
141 snprintf(buf, size, "Unknown libbpf error %d", err);
142 buf[size - 1] = '\0';
143 return -1;
144}
145
146#define CHECK_ERR(action, err, out) do { \
147 err = action; \
148 if (err) \
149 goto out; \
150} while(0)
151
152
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000153/* Copied from tools/perf/util/util.h */
154#ifndef zfree
155# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
156#endif
157
158#ifndef zclose
159# define zclose(fd) ({ \
160 int ___err = 0; \
161 if ((fd) >= 0) \
162 ___err = close((fd)); \
163 fd = -1; \
164 ___err; })
165#endif
166
167#ifdef HAVE_LIBELF_MMAP_SUPPORT
168# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
169#else
170# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
171#endif
172
Wang Nana5b8bd42015-07-01 02:14:00 +0000173/*
174 * bpf_prog should be a better name but it has been used in
175 * linux/filter.h.
176 */
177struct bpf_program {
178 /* Index in elf obj file, for relocation use. */
179 int idx;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700180 char *name;
David Beckettf0307a72018-05-16 14:02:49 -0700181 int prog_ifindex;
Wang Nana5b8bd42015-07-01 02:14:00 +0000182 char *section_name;
183 struct bpf_insn *insns;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800184 size_t insns_cnt, main_prog_cnt;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000185 enum bpf_prog_type type;
Wang Nan34090912015-07-01 02:14:02 +0000186
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800187 struct reloc_desc {
188 enum {
189 RELO_LD64,
190 RELO_CALL,
191 } type;
Wang Nan34090912015-07-01 02:14:02 +0000192 int insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800193 union {
194 int map_idx;
195 int text_off;
196 };
Wang Nan34090912015-07-01 02:14:02 +0000197 } *reloc_desc;
198 int nr_reloc;
Wang Nan55cffde2015-07-01 02:14:07 +0000199
Wang Nanb5805632015-11-16 12:10:09 +0000200 struct {
201 int nr;
202 int *fds;
203 } instances;
204 bpf_program_prep_t preprocessor;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000205
206 struct bpf_object *obj;
207 void *priv;
208 bpf_program_clear_priv_t clear_priv;
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700209
210 enum bpf_attach_type expected_attach_type;
Wang Nana5b8bd42015-07-01 02:14:00 +0000211};
212
Wang Nan9d759a92015-11-27 08:47:35 +0000213struct bpf_map {
214 int fd;
Wang Nan561bbcc2015-11-27 08:47:36 +0000215 char *name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000216 size_t offset;
David Beckettf0307a72018-05-16 14:02:49 -0700217 int map_ifindex;
Wang Nan9d759a92015-11-27 08:47:35 +0000218 struct bpf_map_def def;
Martin KaFai Lau61746db2018-05-22 15:04:24 -0700219 uint32_t btf_key_type_id;
220 uint32_t btf_value_type_id;
Wang Nan9d759a92015-11-27 08:47:35 +0000221 void *priv;
222 bpf_map_clear_priv_t clear_priv;
223};
224
Wang Nan9a208ef2015-07-01 02:14:10 +0000225static LIST_HEAD(bpf_objects_list);
226
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000227struct bpf_object {
Wang Nancb1e5e92015-07-01 02:13:57 +0000228 char license[64];
229 u32 kern_version;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000230
Wang Nana5b8bd42015-07-01 02:14:00 +0000231 struct bpf_program *programs;
232 size_t nr_programs;
Wang Nan9d759a92015-11-27 08:47:35 +0000233 struct bpf_map *maps;
234 size_t nr_maps;
235
Wang Nan52d33522015-07-01 02:14:04 +0000236 bool loaded;
Wang Nana5b8bd42015-07-01 02:14:00 +0000237
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000238 /*
239 * Information when doing elf related work. Only valid if fd
240 * is valid.
241 */
242 struct {
243 int fd;
Wang Nan6c956392015-07-01 02:13:54 +0000244 void *obj_buf;
245 size_t obj_buf_sz;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000246 Elf *elf;
247 GElf_Ehdr ehdr;
Wang Nanbec7d682015-07-01 02:13:59 +0000248 Elf_Data *symbols;
Wang Nan77ba9a52015-12-08 02:25:30 +0000249 size_t strtabidx;
Wang Nanb62f06e2015-07-01 02:14:01 +0000250 struct {
251 GElf_Shdr shdr;
252 Elf_Data *data;
253 } *reloc;
254 int nr_reloc;
Wang Nan666810e2016-01-25 09:55:49 +0000255 int maps_shndx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800256 int text_shndx;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000257 } efile;
Wang Nan9a208ef2015-07-01 02:14:10 +0000258 /*
259 * All loaded bpf_object is linked in a list, which is
260 * hidden to caller. bpf_objects__<func> handlers deal with
261 * all objects.
262 */
263 struct list_head list;
Wang Nan10931d22016-11-26 07:03:26 +0000264
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700265 struct btf *btf;
266
Wang Nan10931d22016-11-26 07:03:26 +0000267 void *priv;
268 bpf_object_clear_priv_t clear_priv;
269
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000270 char path[];
271};
272#define obj_elf_valid(o) ((o)->efile.elf)
273
Wang Nan55cffde2015-07-01 02:14:07 +0000274static void bpf_program__unload(struct bpf_program *prog)
275{
Wang Nanb5805632015-11-16 12:10:09 +0000276 int i;
277
Wang Nan55cffde2015-07-01 02:14:07 +0000278 if (!prog)
279 return;
280
Wang Nanb5805632015-11-16 12:10:09 +0000281 /*
282 * If the object is opened but the program was never loaded,
283 * it is possible that prog->instances.nr == -1.
284 */
285 if (prog->instances.nr > 0) {
286 for (i = 0; i < prog->instances.nr; i++)
287 zclose(prog->instances.fds[i]);
288 } else if (prog->instances.nr != -1) {
289 pr_warning("Internal error: instances.nr is %d\n",
290 prog->instances.nr);
291 }
292
293 prog->instances.nr = -1;
294 zfree(&prog->instances.fds);
Wang Nan55cffde2015-07-01 02:14:07 +0000295}
296
Wang Nana5b8bd42015-07-01 02:14:00 +0000297static void bpf_program__exit(struct bpf_program *prog)
298{
299 if (!prog)
300 return;
301
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000302 if (prog->clear_priv)
303 prog->clear_priv(prog, prog->priv);
304
305 prog->priv = NULL;
306 prog->clear_priv = NULL;
307
Wang Nan55cffde2015-07-01 02:14:07 +0000308 bpf_program__unload(prog);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700309 zfree(&prog->name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000310 zfree(&prog->section_name);
311 zfree(&prog->insns);
Wang Nan34090912015-07-01 02:14:02 +0000312 zfree(&prog->reloc_desc);
313
314 prog->nr_reloc = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +0000315 prog->insns_cnt = 0;
316 prog->idx = -1;
317}
318
319static int
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700320bpf_program__init(void *data, size_t size, char *section_name, int idx,
321 struct bpf_program *prog)
Wang Nana5b8bd42015-07-01 02:14:00 +0000322{
323 if (size < sizeof(struct bpf_insn)) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700324 pr_warning("corrupted section '%s'\n", section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000325 return -EINVAL;
326 }
327
328 bzero(prog, sizeof(*prog));
329
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700330 prog->section_name = strdup(section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000331 if (!prog->section_name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100332 pr_warning("failed to alloc name for prog under section(%d) %s\n",
333 idx, section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000334 goto errout;
335 }
336
337 prog->insns = malloc(size);
338 if (!prog->insns) {
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700339 pr_warning("failed to alloc insns for prog under section %s\n",
340 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000341 goto errout;
342 }
343 prog->insns_cnt = size / sizeof(struct bpf_insn);
344 memcpy(prog->insns, data,
345 prog->insns_cnt * sizeof(struct bpf_insn));
346 prog->idx = idx;
Wang Nanb5805632015-11-16 12:10:09 +0000347 prog->instances.fds = NULL;
348 prog->instances.nr = -1;
Wang Nan5f44e4c82016-07-13 10:44:01 +0000349 prog->type = BPF_PROG_TYPE_KPROBE;
Wang Nana5b8bd42015-07-01 02:14:00 +0000350
351 return 0;
352errout:
353 bpf_program__exit(prog);
354 return -ENOMEM;
355}
356
357static int
358bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700359 char *section_name, int idx)
Wang Nana5b8bd42015-07-01 02:14:00 +0000360{
361 struct bpf_program prog, *progs;
362 int nr_progs, err;
363
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700364 err = bpf_program__init(data, size, section_name, idx, &prog);
Wang Nana5b8bd42015-07-01 02:14:00 +0000365 if (err)
366 return err;
367
368 progs = obj->programs;
369 nr_progs = obj->nr_programs;
370
371 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
372 if (!progs) {
373 /*
374 * In this case the original obj->programs
375 * is still valid, so don't need special treat for
376 * bpf_close_object().
377 */
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700378 pr_warning("failed to alloc a new program under section '%s'\n",
379 section_name);
Wang Nana5b8bd42015-07-01 02:14:00 +0000380 bpf_program__exit(&prog);
381 return -ENOMEM;
382 }
383
384 pr_debug("found program %s\n", prog.section_name);
385 obj->programs = progs;
386 obj->nr_programs = nr_progs + 1;
Wang Nanaa9b1ac2015-07-01 02:14:08 +0000387 prog.obj = obj;
Wang Nana5b8bd42015-07-01 02:14:00 +0000388 progs[nr_progs] = prog;
389 return 0;
390}
391
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700392static int
393bpf_object__init_prog_names(struct bpf_object *obj)
394{
395 Elf_Data *symbols = obj->efile.symbols;
396 struct bpf_program *prog;
397 size_t pi, si;
398
399 for (pi = 0; pi < obj->nr_programs; pi++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800400 const char *name = NULL;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700401
402 prog = &obj->programs[pi];
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800403 if (prog->idx == obj->efile.text_shndx) {
404 name = ".text";
405 goto skip_search;
406 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700407
408 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
409 si++) {
410 GElf_Sym sym;
411
412 if (!gelf_getsym(symbols, si, &sym))
413 continue;
414 if (sym.st_shndx != prog->idx)
415 continue;
Roman Gushchinfe4d44b2017-12-13 15:18:52 +0000416 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
417 continue;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700418
419 name = elf_strptr(obj->efile.elf,
420 obj->efile.strtabidx,
421 sym.st_name);
422 if (!name) {
423 pr_warning("failed to get sym name string for prog %s\n",
424 prog->section_name);
425 return -LIBBPF_ERRNO__LIBELF;
426 }
427 }
428
429 if (!name) {
430 pr_warning("failed to find sym for prog %s\n",
431 prog->section_name);
432 return -EINVAL;
433 }
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800434skip_search:
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700435 prog->name = strdup(name);
436 if (!prog->name) {
437 pr_warning("failed to allocate memory for prog sym %s\n",
438 name);
439 return -ENOMEM;
440 }
441 }
442
443 return 0;
444}
445
Wang Nan6c956392015-07-01 02:13:54 +0000446static struct bpf_object *bpf_object__new(const char *path,
447 void *obj_buf,
448 size_t obj_buf_sz)
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000449{
450 struct bpf_object *obj;
451
452 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
453 if (!obj) {
454 pr_warning("alloc memory failed for %s\n", path);
Wang Nan6371ca32015-11-06 13:49:37 +0000455 return ERR_PTR(-ENOMEM);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000456 }
457
458 strcpy(obj->path, path);
459 obj->efile.fd = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000460
461 /*
462 * Caller of this function should also calls
463 * bpf_object__elf_finish() after data collection to return
464 * obj_buf to user. If not, we should duplicate the buffer to
465 * avoid user freeing them before elf finish.
466 */
467 obj->efile.obj_buf = obj_buf;
468 obj->efile.obj_buf_sz = obj_buf_sz;
Wang Nan666810e2016-01-25 09:55:49 +0000469 obj->efile.maps_shndx = -1;
Wang Nan6c956392015-07-01 02:13:54 +0000470
Wang Nan52d33522015-07-01 02:14:04 +0000471 obj->loaded = false;
Wang Nan9a208ef2015-07-01 02:14:10 +0000472
473 INIT_LIST_HEAD(&obj->list);
474 list_add(&obj->list, &bpf_objects_list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000475 return obj;
476}
477
478static void bpf_object__elf_finish(struct bpf_object *obj)
479{
480 if (!obj_elf_valid(obj))
481 return;
482
483 if (obj->efile.elf) {
484 elf_end(obj->efile.elf);
485 obj->efile.elf = NULL;
486 }
Wang Nanbec7d682015-07-01 02:13:59 +0000487 obj->efile.symbols = NULL;
Wang Nanb62f06e2015-07-01 02:14:01 +0000488
489 zfree(&obj->efile.reloc);
490 obj->efile.nr_reloc = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000491 zclose(obj->efile.fd);
Wang Nan6c956392015-07-01 02:13:54 +0000492 obj->efile.obj_buf = NULL;
493 obj->efile.obj_buf_sz = 0;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000494}
495
496static int bpf_object__elf_init(struct bpf_object *obj)
497{
498 int err = 0;
499 GElf_Ehdr *ep;
500
501 if (obj_elf_valid(obj)) {
502 pr_warning("elf init: internal error\n");
Wang Nan6371ca32015-11-06 13:49:37 +0000503 return -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000504 }
505
Wang Nan6c956392015-07-01 02:13:54 +0000506 if (obj->efile.obj_buf_sz > 0) {
507 /*
508 * obj_buf should have been validated by
509 * bpf_object__open_buffer().
510 */
511 obj->efile.elf = elf_memory(obj->efile.obj_buf,
512 obj->efile.obj_buf_sz);
513 } else {
514 obj->efile.fd = open(obj->path, O_RDONLY);
515 if (obj->efile.fd < 0) {
516 pr_warning("failed to open %s: %s\n", obj->path,
517 strerror(errno));
518 return -errno;
519 }
520
521 obj->efile.elf = elf_begin(obj->efile.fd,
522 LIBBPF_ELF_C_READ_MMAP,
523 NULL);
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000524 }
525
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000526 if (!obj->efile.elf) {
527 pr_warning("failed to open %s as ELF file\n",
528 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000529 err = -LIBBPF_ERRNO__LIBELF;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000530 goto errout;
531 }
532
533 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
534 pr_warning("failed to get EHDR from %s\n",
535 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000536 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000537 goto errout;
538 }
539 ep = &obj->efile.ehdr;
540
Wang Nan9b161372016-07-18 06:01:08 +0000541 /* Old LLVM set e_machine to EM_NONE */
542 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000543 pr_warning("%s is not an eBPF object file\n",
544 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000545 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan1a5e3fb2015-07-01 02:13:53 +0000546 goto errout;
547 }
548
549 return 0;
550errout:
551 bpf_object__elf_finish(obj);
552 return err;
553}
554
Wang Nancc4228d2015-07-01 02:13:55 +0000555static int
556bpf_object__check_endianness(struct bpf_object *obj)
557{
558 static unsigned int const endian = 1;
559
560 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
561 case ELFDATA2LSB:
562 /* We are big endian, BPF obj is little endian. */
563 if (*(unsigned char const *)&endian != 1)
564 goto mismatch;
565 break;
566
567 case ELFDATA2MSB:
568 /* We are little endian, BPF obj is big endian. */
569 if (*(unsigned char const *)&endian != 0)
570 goto mismatch;
571 break;
572 default:
Wang Nan6371ca32015-11-06 13:49:37 +0000573 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000574 }
575
576 return 0;
577
578mismatch:
579 pr_warning("Error: endianness mismatch.\n");
Wang Nan6371ca32015-11-06 13:49:37 +0000580 return -LIBBPF_ERRNO__ENDIAN;
Wang Nancc4228d2015-07-01 02:13:55 +0000581}
582
Wang Nancb1e5e92015-07-01 02:13:57 +0000583static int
584bpf_object__init_license(struct bpf_object *obj,
585 void *data, size_t size)
586{
587 memcpy(obj->license, data,
588 min(size, sizeof(obj->license) - 1));
589 pr_debug("license of %s is %s\n", obj->path, obj->license);
590 return 0;
591}
592
593static int
594bpf_object__init_kversion(struct bpf_object *obj,
595 void *data, size_t size)
596{
597 u32 kver;
598
599 if (size != sizeof(kver)) {
600 pr_warning("invalid kver section in %s\n", obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000601 return -LIBBPF_ERRNO__FORMAT;
Wang Nancb1e5e92015-07-01 02:13:57 +0000602 }
603 memcpy(&kver, data, sizeof(kver));
604 obj->kern_version = kver;
605 pr_debug("kernel version of %s is %x\n", obj->path,
606 obj->kern_version);
607 return 0;
608}
609
Eric Leblond4708bbd2016-11-15 04:05:47 +0000610static int compare_bpf_map(const void *_a, const void *_b)
611{
612 const struct bpf_map *a = _a;
613 const struct bpf_map *b = _b;
614
615 return a->offset - b->offset;
616}
617
618static int
619bpf_object__init_maps(struct bpf_object *obj)
620{
Craig Gallekb13c5c12017-10-05 10:41:57 -0400621 int i, map_idx, map_def_sz, nr_maps = 0;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000622 Elf_Scn *scn;
623 Elf_Data *data;
624 Elf_Data *symbols = obj->efile.symbols;
625
626 if (obj->efile.maps_shndx < 0)
627 return -EINVAL;
628 if (!symbols)
629 return -EINVAL;
630
631 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
632 if (scn)
633 data = elf_getdata(scn, NULL);
634 if (!scn || !data) {
635 pr_warning("failed to get Elf_Data from map section %d\n",
636 obj->efile.maps_shndx);
637 return -EINVAL;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000638 }
639
Eric Leblond4708bbd2016-11-15 04:05:47 +0000640 /*
641 * Count number of maps. Each map has a name.
642 * Array of maps is not supported: only the first element is
643 * considered.
644 *
645 * TODO: Detect array of map and report error.
646 */
647 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
648 GElf_Sym sym;
649
650 if (!gelf_getsym(symbols, i, &sym))
651 continue;
652 if (sym.st_shndx != obj->efile.maps_shndx)
653 continue;
654 nr_maps++;
655 }
656
657 /* Alloc obj->maps and fill nr_maps. */
658 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
659 nr_maps, data->d_size);
660
661 if (!nr_maps)
662 return 0;
Wang Nan9d759a92015-11-27 08:47:35 +0000663
Craig Gallekb13c5c12017-10-05 10:41:57 -0400664 /* Assume equally sized map definitions */
665 map_def_sz = data->d_size / nr_maps;
666 if (!data->d_size || (data->d_size % nr_maps) != 0) {
667 pr_warning("unable to determine map definition size "
668 "section %s, %d maps in %zd bytes\n",
669 obj->path, nr_maps, data->d_size);
670 return -EINVAL;
671 }
672
Wang Nan9d759a92015-11-27 08:47:35 +0000673 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
674 if (!obj->maps) {
675 pr_warning("alloc maps for object failed\n");
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000676 return -ENOMEM;
677 }
Wang Nan9d759a92015-11-27 08:47:35 +0000678 obj->nr_maps = nr_maps;
Wang Nan0b3d1ef2015-07-01 02:13:58 +0000679
Eric Leblond4708bbd2016-11-15 04:05:47 +0000680 /*
681 * fill all fd with -1 so won't close incorrect
682 * fd (fd=0 is stdin) when failure (zclose won't close
683 * negative fd)).
684 */
685 for (i = 0; i < nr_maps; i++)
Wang Nan9d759a92015-11-27 08:47:35 +0000686 obj->maps[i].fd = -1;
687
Eric Leblond4708bbd2016-11-15 04:05:47 +0000688 /*
689 * Fill obj->maps using data in "maps" section.
690 */
691 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +0000692 GElf_Sym sym;
Wang Nan561bbcc2015-11-27 08:47:36 +0000693 const char *map_name;
Eric Leblond4708bbd2016-11-15 04:05:47 +0000694 struct bpf_map_def *def;
Wang Nan561bbcc2015-11-27 08:47:36 +0000695
696 if (!gelf_getsym(symbols, i, &sym))
697 continue;
Wang Nan666810e2016-01-25 09:55:49 +0000698 if (sym.st_shndx != obj->efile.maps_shndx)
Wang Nan561bbcc2015-11-27 08:47:36 +0000699 continue;
700
701 map_name = elf_strptr(obj->efile.elf,
Wang Nan77ba9a52015-12-08 02:25:30 +0000702 obj->efile.strtabidx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000703 sym.st_name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000704 obj->maps[map_idx].offset = sym.st_value;
Craig Gallekb13c5c12017-10-05 10:41:57 -0400705 if (sym.st_value + map_def_sz > data->d_size) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000706 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
707 obj->path, map_name);
708 return -EINVAL;
Wang Nan561bbcc2015-11-27 08:47:36 +0000709 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000710
Wang Nan561bbcc2015-11-27 08:47:36 +0000711 obj->maps[map_idx].name = strdup(map_name);
Wang Nan973170e2015-12-08 02:25:29 +0000712 if (!obj->maps[map_idx].name) {
713 pr_warning("failed to alloc map name\n");
714 return -ENOMEM;
715 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000716 pr_debug("map %d is \"%s\"\n", map_idx,
Wang Nan561bbcc2015-11-27 08:47:36 +0000717 obj->maps[map_idx].name);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000718 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400719 /*
720 * If the definition of the map in the object file fits in
721 * bpf_map_def, copy it. Any extra fields in our version
722 * of bpf_map_def will default to zero as a result of the
723 * calloc above.
724 */
725 if (map_def_sz <= sizeof(struct bpf_map_def)) {
726 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
727 } else {
728 /*
729 * Here the map structure being read is bigger than what
730 * we expect, truncate if the excess bits are all zero.
731 * If they are not zero, reject this map as
732 * incompatible.
733 */
734 char *b;
735 for (b = ((char *)def) + sizeof(struct bpf_map_def);
736 b < ((char *)def) + map_def_sz; b++) {
737 if (*b != 0) {
738 pr_warning("maps section in %s: \"%s\" "
739 "has unrecognized, non-zero "
740 "options\n",
741 obj->path, map_name);
742 return -EINVAL;
743 }
744 }
745 memcpy(&obj->maps[map_idx].def, def,
746 sizeof(struct bpf_map_def));
747 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000748 map_idx++;
Wang Nan561bbcc2015-11-27 08:47:36 +0000749 }
Eric Leblond4708bbd2016-11-15 04:05:47 +0000750
751 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
Craig Gallekb13c5c12017-10-05 10:41:57 -0400752 return 0;
Wang Nan561bbcc2015-11-27 08:47:36 +0000753}
754
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100755static bool section_have_execinstr(struct bpf_object *obj, int idx)
756{
757 Elf_Scn *scn;
758 GElf_Shdr sh;
759
760 scn = elf_getscn(obj->efile.elf, idx);
761 if (!scn)
762 return false;
763
764 if (gelf_getshdr(scn, &sh) != &sh)
765 return false;
766
767 if (sh.sh_flags & SHF_EXECINSTR)
768 return true;
769
770 return false;
771}
772
Wang Nan29603662015-07-01 02:13:56 +0000773static int bpf_object__elf_collect(struct bpf_object *obj)
774{
775 Elf *elf = obj->efile.elf;
776 GElf_Ehdr *ep = &obj->efile.ehdr;
777 Elf_Scn *scn = NULL;
Wang Nan666810e2016-01-25 09:55:49 +0000778 int idx = 0, err = 0;
Wang Nan29603662015-07-01 02:13:56 +0000779
780 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
781 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
782 pr_warning("failed to get e_shstrndx from %s\n",
783 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000784 return -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000785 }
786
787 while ((scn = elf_nextscn(elf, scn)) != NULL) {
788 char *name;
789 GElf_Shdr sh;
790 Elf_Data *data;
791
792 idx++;
793 if (gelf_getshdr(scn, &sh) != &sh) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100794 pr_warning("failed to get section(%d) header from %s\n",
795 idx, obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000796 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000797 goto out;
798 }
799
800 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
801 if (!name) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100802 pr_warning("failed to get section(%d) name from %s\n",
803 idx, obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000804 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000805 goto out;
806 }
807
808 data = elf_getdata(scn, 0);
809 if (!data) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100810 pr_warning("failed to get section(%d) data from %s(%s)\n",
811 idx, name, obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000812 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan29603662015-07-01 02:13:56 +0000813 goto out;
814 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100815 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
816 idx, name, (unsigned long)data->d_size,
Wang Nan29603662015-07-01 02:13:56 +0000817 (int)sh.sh_link, (unsigned long)sh.sh_flags,
818 (int)sh.sh_type);
Wang Nancb1e5e92015-07-01 02:13:57 +0000819
820 if (strcmp(name, "license") == 0)
821 err = bpf_object__init_license(obj,
822 data->d_buf,
823 data->d_size);
824 else if (strcmp(name, "version") == 0)
825 err = bpf_object__init_kversion(obj,
826 data->d_buf,
827 data->d_size);
Eric Leblond4708bbd2016-11-15 04:05:47 +0000828 else if (strcmp(name, "maps") == 0)
Wang Nan666810e2016-01-25 09:55:49 +0000829 obj->efile.maps_shndx = idx;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -0700830 else if (strcmp(name, BTF_ELF_SEC) == 0) {
831 obj->btf = btf__new(data->d_buf, data->d_size,
832 __pr_debug);
833 if (IS_ERR(obj->btf)) {
834 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
835 BTF_ELF_SEC, PTR_ERR(obj->btf));
836 obj->btf = NULL;
837 }
838 } else if (sh.sh_type == SHT_SYMTAB) {
Wang Nanbec7d682015-07-01 02:13:59 +0000839 if (obj->efile.symbols) {
840 pr_warning("bpf: multiple SYMTAB in %s\n",
841 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +0000842 err = -LIBBPF_ERRNO__FORMAT;
Wang Nan77ba9a52015-12-08 02:25:30 +0000843 } else {
Wang Nanbec7d682015-07-01 02:13:59 +0000844 obj->efile.symbols = data;
Wang Nan77ba9a52015-12-08 02:25:30 +0000845 obj->efile.strtabidx = sh.sh_link;
846 }
Wang Nana5b8bd42015-07-01 02:14:00 +0000847 } else if ((sh.sh_type == SHT_PROGBITS) &&
848 (sh.sh_flags & SHF_EXECINSTR) &&
849 (data->d_size > 0)) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800850 if (strcmp(name, ".text") == 0)
851 obj->efile.text_shndx = idx;
Wang Nana5b8bd42015-07-01 02:14:00 +0000852 err = bpf_object__add_program(obj, data->d_buf,
853 data->d_size, name, idx);
854 if (err) {
Wang Nan6371ca32015-11-06 13:49:37 +0000855 char errmsg[STRERR_BUFSIZE];
856
Wang Nana5b8bd42015-07-01 02:14:00 +0000857 strerror_r(-err, errmsg, sizeof(errmsg));
858 pr_warning("failed to alloc program %s (%s): %s",
859 name, obj->path, errmsg);
860 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000861 } else if (sh.sh_type == SHT_REL) {
862 void *reloc = obj->efile.reloc;
863 int nr_reloc = obj->efile.nr_reloc + 1;
Jesper Dangaard Brouere3d91b02018-02-08 12:48:32 +0100864 int sec = sh.sh_info; /* points to other section */
865
866 /* Only do relo for section with exec instructions */
867 if (!section_have_execinstr(obj, sec)) {
868 pr_debug("skip relo %s(%d) for section(%d)\n",
869 name, idx, sec);
870 continue;
871 }
Wang Nanb62f06e2015-07-01 02:14:01 +0000872
873 reloc = realloc(reloc,
874 sizeof(*obj->efile.reloc) * nr_reloc);
875 if (!reloc) {
876 pr_warning("realloc failed\n");
877 err = -ENOMEM;
878 } else {
879 int n = nr_reloc - 1;
880
881 obj->efile.reloc = reloc;
882 obj->efile.nr_reloc = nr_reloc;
883
884 obj->efile.reloc[n].shdr = sh;
885 obj->efile.reloc[n].data = data;
886 }
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +0100887 } else {
888 pr_debug("skip section(%d) %s\n", idx, name);
Wang Nanbec7d682015-07-01 02:13:59 +0000889 }
Wang Nancb1e5e92015-07-01 02:13:57 +0000890 if (err)
891 goto out;
Wang Nan29603662015-07-01 02:13:56 +0000892 }
Wang Nan561bbcc2015-11-27 08:47:36 +0000893
Wang Nan77ba9a52015-12-08 02:25:30 +0000894 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
895 pr_warning("Corrupted ELF file: index of strtab invalid\n");
896 return LIBBPF_ERRNO__FORMAT;
897 }
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700898 if (obj->efile.maps_shndx >= 0) {
Eric Leblond4708bbd2016-11-15 04:05:47 +0000899 err = bpf_object__init_maps(obj);
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700900 if (err)
901 goto out;
902 }
903 err = bpf_object__init_prog_names(obj);
Wang Nan29603662015-07-01 02:13:56 +0000904out:
905 return err;
906}
907
Wang Nan34090912015-07-01 02:14:02 +0000908static struct bpf_program *
909bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
910{
911 struct bpf_program *prog;
912 size_t i;
913
914 for (i = 0; i < obj->nr_programs; i++) {
915 prog = &obj->programs[i];
916 if (prog->idx == idx)
917 return prog;
918 }
919 return NULL;
920}
921
922static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800923bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
924 Elf_Data *data, struct bpf_object *obj)
Wang Nan34090912015-07-01 02:14:02 +0000925{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800926 Elf_Data *symbols = obj->efile.symbols;
927 int text_shndx = obj->efile.text_shndx;
928 int maps_shndx = obj->efile.maps_shndx;
929 struct bpf_map *maps = obj->maps;
930 size_t nr_maps = obj->nr_maps;
Wang Nan34090912015-07-01 02:14:02 +0000931 int i, nrels;
932
933 pr_debug("collecting relocating info for: '%s'\n",
934 prog->section_name);
935 nrels = shdr->sh_size / shdr->sh_entsize;
936
937 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
938 if (!prog->reloc_desc) {
939 pr_warning("failed to alloc memory in relocation\n");
940 return -ENOMEM;
941 }
942 prog->nr_reloc = nrels;
943
944 for (i = 0; i < nrels; i++) {
945 GElf_Sym sym;
946 GElf_Rel rel;
947 unsigned int insn_idx;
948 struct bpf_insn *insns = prog->insns;
949 size_t map_idx;
950
951 if (!gelf_getrel(data, i, &rel)) {
952 pr_warning("relocation: failed to get %d reloc\n", i);
Wang Nan6371ca32015-11-06 13:49:37 +0000953 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000954 }
955
Wang Nan34090912015-07-01 02:14:02 +0000956 if (!gelf_getsym(symbols,
957 GELF_R_SYM(rel.r_info),
958 &sym)) {
959 pr_warning("relocation: symbol %"PRIx64" not found\n",
960 GELF_R_SYM(rel.r_info));
Wang Nan6371ca32015-11-06 13:49:37 +0000961 return -LIBBPF_ERRNO__FORMAT;
Wang Nan34090912015-07-01 02:14:02 +0000962 }
David Miller7d9890e2017-12-19 15:53:11 -0500963 pr_debug("relo for %lld value %lld name %d\n",
964 (long long) (rel.r_info >> 32),
965 (long long) sym.st_value, sym.st_name);
Wang Nan34090912015-07-01 02:14:02 +0000966
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800967 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
Wang Nan666810e2016-01-25 09:55:49 +0000968 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
969 prog->section_name, sym.st_shndx);
970 return -LIBBPF_ERRNO__RELOC;
971 }
972
973 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
974 pr_debug("relocation: insn_idx=%u\n", insn_idx);
975
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800976 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
977 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
978 pr_warning("incorrect bpf_call opcode\n");
979 return -LIBBPF_ERRNO__RELOC;
980 }
981 prog->reloc_desc[i].type = RELO_CALL;
982 prog->reloc_desc[i].insn_idx = insn_idx;
983 prog->reloc_desc[i].text_off = sym.st_value;
984 continue;
985 }
986
Wang Nan34090912015-07-01 02:14:02 +0000987 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
988 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
989 insn_idx, insns[insn_idx].code);
Wang Nan6371ca32015-11-06 13:49:37 +0000990 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +0000991 }
992
Joe Stringer94e5ade2017-01-22 17:11:22 -0800993 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
994 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
995 if (maps[map_idx].offset == sym.st_value) {
996 pr_debug("relocation: find map %zd (%s) for insn %u\n",
997 map_idx, maps[map_idx].name, insn_idx);
998 break;
999 }
1000 }
1001
Wang Nan34090912015-07-01 02:14:02 +00001002 if (map_idx >= nr_maps) {
1003 pr_warning("bpf relocation: map_idx %d large than %d\n",
1004 (int)map_idx, (int)nr_maps - 1);
Wang Nan6371ca32015-11-06 13:49:37 +00001005 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001006 }
1007
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001008 prog->reloc_desc[i].type = RELO_LD64;
Wang Nan34090912015-07-01 02:14:02 +00001009 prog->reloc_desc[i].insn_idx = insn_idx;
1010 prog->reloc_desc[i].map_idx = map_idx;
1011 }
1012 return 0;
1013}
1014
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001015static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1016{
1017 struct bpf_map_def *def = &map->def;
1018 const size_t max_name = 256;
1019 int64_t key_size, value_size;
1020 int32_t key_id, value_id;
1021 char name[max_name];
1022
1023 /* Find key type by name from BTF */
1024 if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
1025 pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
1026 map->name, map->name);
1027 return -EINVAL;
1028 }
1029
1030 key_id = btf__find_by_name(btf, name);
1031 if (key_id < 0) {
1032 pr_debug("map:%s key_type:%s cannot be found in BTF\n",
1033 map->name, name);
1034 return key_id;
1035 }
1036
1037 key_size = btf__resolve_size(btf, key_id);
1038 if (key_size < 0) {
1039 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
1040 map->name, name);
1041 return key_size;
1042 }
1043
1044 if (def->key_size != key_size) {
Sirio Balmellia1c81812018-05-23 18:17:07 +02001045 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
1046 map->name, name, (unsigned int)key_size, def->key_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001047 return -EINVAL;
1048 }
1049
1050 /* Find value type from BTF */
1051 if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
1052 pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
1053 map->name, map->name);
1054 return -EINVAL;
1055 }
1056
1057 value_id = btf__find_by_name(btf, name);
1058 if (value_id < 0) {
1059 pr_debug("map:%s value_type:%s cannot be found in BTF\n",
1060 map->name, name);
1061 return value_id;
1062 }
1063
1064 value_size = btf__resolve_size(btf, value_id);
1065 if (value_size < 0) {
1066 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
1067 map->name, name);
1068 return value_size;
1069 }
1070
1071 if (def->value_size != value_size) {
Sirio Balmellia1c81812018-05-23 18:17:07 +02001072 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
1073 map->name, name, (unsigned int)value_size, def->value_size);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001074 return -EINVAL;
1075 }
1076
Martin KaFai Lau61746db2018-05-22 15:04:24 -07001077 map->btf_key_type_id = key_id;
1078 map->btf_value_type_id = value_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001079
1080 return 0;
1081}
1082
Wang Nan52d33522015-07-01 02:14:04 +00001083static int
1084bpf_object__create_maps(struct bpf_object *obj)
1085{
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001086 struct bpf_create_map_attr create_attr = {};
Wang Nan52d33522015-07-01 02:14:04 +00001087 unsigned int i;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001088 int err;
Wang Nan52d33522015-07-01 02:14:04 +00001089
Wang Nan9d759a92015-11-27 08:47:35 +00001090 for (i = 0; i < obj->nr_maps; i++) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001091 struct bpf_map *map = &obj->maps[i];
1092 struct bpf_map_def *def = &map->def;
1093 int *pfd = &map->fd;
Wang Nan52d33522015-07-01 02:14:04 +00001094
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001095 create_attr.name = map->name;
David Beckettf0307a72018-05-16 14:02:49 -07001096 create_attr.map_ifindex = map->map_ifindex;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001097 create_attr.map_type = def->type;
1098 create_attr.map_flags = def->map_flags;
1099 create_attr.key_size = def->key_size;
1100 create_attr.value_size = def->value_size;
1101 create_attr.max_entries = def->max_entries;
1102 create_attr.btf_fd = 0;
Martin KaFai Lau61746db2018-05-22 15:04:24 -07001103 create_attr.btf_key_type_id = 0;
1104 create_attr.btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001105
1106 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1107 create_attr.btf_fd = btf__fd(obj->btf);
Martin KaFai Lau61746db2018-05-22 15:04:24 -07001108 create_attr.btf_key_type_id = map->btf_key_type_id;
1109 create_attr.btf_value_type_id = map->btf_value_type_id;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001110 }
1111
1112 *pfd = bpf_create_map_xattr(&create_attr);
Martin KaFai Lau61746db2018-05-22 15:04:24 -07001113 if (*pfd < 0 && create_attr.btf_key_type_id) {
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001114 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1115 map->name, strerror(errno), errno);
1116 create_attr.btf_fd = 0;
Martin KaFai Lau61746db2018-05-22 15:04:24 -07001117 create_attr.btf_key_type_id = 0;
1118 create_attr.btf_value_type_id = 0;
1119 map->btf_key_type_id = 0;
1120 map->btf_value_type_id = 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001121 *pfd = bpf_create_map_xattr(&create_attr);
1122 }
1123
Wang Nan52d33522015-07-01 02:14:04 +00001124 if (*pfd < 0) {
1125 size_t j;
Wang Nan52d33522015-07-01 02:14:04 +00001126
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001127 err = *pfd;
Eric Leblond49bf4b32017-08-20 21:48:14 +02001128 pr_warning("failed to create map (name: '%s'): %s\n",
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001129 map->name,
Wang Nan52d33522015-07-01 02:14:04 +00001130 strerror(errno));
1131 for (j = 0; j < i; j++)
Wang Nan9d759a92015-11-27 08:47:35 +00001132 zclose(obj->maps[j].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001133 return err;
1134 }
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001135 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
Wang Nan52d33522015-07-01 02:14:04 +00001136 }
1137
Wang Nan52d33522015-07-01 02:14:04 +00001138 return 0;
1139}
1140
Wang Nan8a47a6c2015-07-01 02:14:05 +00001141static int
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001142bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1143 struct reloc_desc *relo)
1144{
1145 struct bpf_insn *insn, *new_insn;
1146 struct bpf_program *text;
1147 size_t new_cnt;
1148
1149 if (relo->type != RELO_CALL)
1150 return -LIBBPF_ERRNO__RELOC;
1151
1152 if (prog->idx == obj->efile.text_shndx) {
1153 pr_warning("relo in .text insn %d into off %d\n",
1154 relo->insn_idx, relo->text_off);
1155 return -LIBBPF_ERRNO__RELOC;
1156 }
1157
1158 if (prog->main_prog_cnt == 0) {
1159 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1160 if (!text) {
1161 pr_warning("no .text section found yet relo into text exist\n");
1162 return -LIBBPF_ERRNO__RELOC;
1163 }
1164 new_cnt = prog->insns_cnt + text->insns_cnt;
1165 new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
1166 if (!new_insn) {
1167 pr_warning("oom in prog realloc\n");
1168 return -ENOMEM;
1169 }
1170 memcpy(new_insn + prog->insns_cnt, text->insns,
1171 text->insns_cnt * sizeof(*insn));
1172 prog->insns = new_insn;
1173 prog->main_prog_cnt = prog->insns_cnt;
1174 prog->insns_cnt = new_cnt;
Jeremy Clineb1a2ce82018-02-20 01:00:07 +00001175 pr_debug("added %zd insn from %s to prog %s\n",
1176 text->insns_cnt, text->section_name,
1177 prog->section_name);
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001178 }
1179 insn = &prog->insns[relo->insn_idx];
1180 insn->imm += prog->main_prog_cnt - relo->insn_idx;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001181 return 0;
1182}
1183
1184static int
Wang Nan9d759a92015-11-27 08:47:35 +00001185bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
Wang Nan8a47a6c2015-07-01 02:14:05 +00001186{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001187 int i, err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001188
1189 if (!prog || !prog->reloc_desc)
1190 return 0;
1191
1192 for (i = 0; i < prog->nr_reloc; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001193 if (prog->reloc_desc[i].type == RELO_LD64) {
1194 struct bpf_insn *insns = prog->insns;
1195 int insn_idx, map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001196
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001197 insn_idx = prog->reloc_desc[i].insn_idx;
1198 map_idx = prog->reloc_desc[i].map_idx;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001199
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001200 if (insn_idx >= (int)prog->insns_cnt) {
1201 pr_warning("relocation out of range: '%s'\n",
1202 prog->section_name);
1203 return -LIBBPF_ERRNO__RELOC;
1204 }
1205 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1206 insns[insn_idx].imm = obj->maps[map_idx].fd;
1207 } else {
1208 err = bpf_program__reloc_text(prog, obj,
1209 &prog->reloc_desc[i]);
1210 if (err)
1211 return err;
Wang Nan8a47a6c2015-07-01 02:14:05 +00001212 }
Wang Nan8a47a6c2015-07-01 02:14:05 +00001213 }
1214
1215 zfree(&prog->reloc_desc);
1216 prog->nr_reloc = 0;
1217 return 0;
1218}
1219
1220
1221static int
1222bpf_object__relocate(struct bpf_object *obj)
1223{
1224 struct bpf_program *prog;
1225 size_t i;
1226 int err;
1227
1228 for (i = 0; i < obj->nr_programs; i++) {
1229 prog = &obj->programs[i];
1230
Wang Nan9d759a92015-11-27 08:47:35 +00001231 err = bpf_program__relocate(prog, obj);
Wang Nan8a47a6c2015-07-01 02:14:05 +00001232 if (err) {
1233 pr_warning("failed to relocate '%s'\n",
1234 prog->section_name);
1235 return err;
1236 }
1237 }
1238 return 0;
1239}
1240
Wang Nan34090912015-07-01 02:14:02 +00001241static int bpf_object__collect_reloc(struct bpf_object *obj)
1242{
1243 int i, err;
1244
1245 if (!obj_elf_valid(obj)) {
1246 pr_warning("Internal error: elf object is closed\n");
Wang Nan6371ca32015-11-06 13:49:37 +00001247 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001248 }
1249
1250 for (i = 0; i < obj->efile.nr_reloc; i++) {
1251 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1252 Elf_Data *data = obj->efile.reloc[i].data;
1253 int idx = shdr->sh_info;
1254 struct bpf_program *prog;
Wang Nan34090912015-07-01 02:14:02 +00001255
1256 if (shdr->sh_type != SHT_REL) {
1257 pr_warning("internal error at %d\n", __LINE__);
Wang Nan6371ca32015-11-06 13:49:37 +00001258 return -LIBBPF_ERRNO__INTERNAL;
Wang Nan34090912015-07-01 02:14:02 +00001259 }
1260
1261 prog = bpf_object__find_prog_by_idx(obj, idx);
1262 if (!prog) {
Jesper Dangaard Brouer077c0662018-02-08 12:48:17 +01001263 pr_warning("relocation failed: no section(%d)\n", idx);
Wang Nan6371ca32015-11-06 13:49:37 +00001264 return -LIBBPF_ERRNO__RELOC;
Wang Nan34090912015-07-01 02:14:02 +00001265 }
1266
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001267 err = bpf_program__collect_reloc(prog,
Wang Nan34090912015-07-01 02:14:02 +00001268 shdr, data,
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001269 obj);
Wang Nan34090912015-07-01 02:14:02 +00001270 if (err)
Wang Nan6371ca32015-11-06 13:49:37 +00001271 return err;
Wang Nan34090912015-07-01 02:14:02 +00001272 }
1273 return 0;
1274}
1275
Wang Nan55cffde2015-07-01 02:14:07 +00001276static int
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001277load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1278 const char *name, struct bpf_insn *insns, int insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001279 char *license, u32 kern_version, int *pfd, int prog_ifindex)
Wang Nan55cffde2015-07-01 02:14:07 +00001280{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001281 struct bpf_load_program_attr load_attr;
Wang Nan55cffde2015-07-01 02:14:07 +00001282 char *log_buf;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001283 int ret;
Wang Nan55cffde2015-07-01 02:14:07 +00001284
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001285 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1286 load_attr.prog_type = type;
1287 load_attr.expected_attach_type = expected_attach_type;
1288 load_attr.name = name;
1289 load_attr.insns = insns;
1290 load_attr.insns_cnt = insns_cnt;
1291 load_attr.license = license;
1292 load_attr.kern_version = kern_version;
David Beckettf0307a72018-05-16 14:02:49 -07001293 load_attr.prog_ifindex = prog_ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001294
1295 if (!load_attr.insns || !load_attr.insns_cnt)
Wang Nan55cffde2015-07-01 02:14:07 +00001296 return -EINVAL;
1297
1298 log_buf = malloc(BPF_LOG_BUF_SIZE);
1299 if (!log_buf)
1300 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1301
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001302 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
Wang Nan55cffde2015-07-01 02:14:07 +00001303
1304 if (ret >= 0) {
1305 *pfd = ret;
1306 ret = 0;
1307 goto out;
1308 }
1309
Wang Nan6371ca32015-11-06 13:49:37 +00001310 ret = -LIBBPF_ERRNO__LOAD;
Wang Nan55cffde2015-07-01 02:14:07 +00001311 pr_warning("load bpf program failed: %s\n", strerror(errno));
1312
Wang Nan6371ca32015-11-06 13:49:37 +00001313 if (log_buf && log_buf[0] != '\0') {
1314 ret = -LIBBPF_ERRNO__VERIFY;
Wang Nan55cffde2015-07-01 02:14:07 +00001315 pr_warning("-- BEGIN DUMP LOG ---\n");
1316 pr_warning("\n%s\n", log_buf);
1317 pr_warning("-- END LOG --\n");
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001318 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1319 pr_warning("Program too large (%zu insns), at most %d insns\n",
1320 load_attr.insns_cnt, BPF_MAXINSNS);
Wang Nan705fa212016-07-13 10:44:02 +00001321 ret = -LIBBPF_ERRNO__PROG2BIG;
Wang Nan6371ca32015-11-06 13:49:37 +00001322 } else {
Wang Nan705fa212016-07-13 10:44:02 +00001323 /* Wrong program type? */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001324 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
Wang Nan705fa212016-07-13 10:44:02 +00001325 int fd;
1326
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001327 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1328 load_attr.expected_attach_type = 0;
1329 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
Wang Nan705fa212016-07-13 10:44:02 +00001330 if (fd >= 0) {
1331 close(fd);
1332 ret = -LIBBPF_ERRNO__PROGTYPE;
1333 goto out;
1334 }
Wang Nan6371ca32015-11-06 13:49:37 +00001335 }
Wang Nan705fa212016-07-13 10:44:02 +00001336
1337 if (log_buf)
1338 ret = -LIBBPF_ERRNO__KVER;
Wang Nan55cffde2015-07-01 02:14:07 +00001339 }
1340
1341out:
1342 free(log_buf);
1343 return ret;
1344}
1345
1346static int
1347bpf_program__load(struct bpf_program *prog,
1348 char *license, u32 kern_version)
1349{
Wang Nanb5805632015-11-16 12:10:09 +00001350 int err = 0, fd, i;
Wang Nan55cffde2015-07-01 02:14:07 +00001351
Wang Nanb5805632015-11-16 12:10:09 +00001352 if (prog->instances.nr < 0 || !prog->instances.fds) {
1353 if (prog->preprocessor) {
1354 pr_warning("Internal error: can't load program '%s'\n",
1355 prog->section_name);
1356 return -LIBBPF_ERRNO__INTERNAL;
1357 }
Wang Nan55cffde2015-07-01 02:14:07 +00001358
Wang Nanb5805632015-11-16 12:10:09 +00001359 prog->instances.fds = malloc(sizeof(int));
1360 if (!prog->instances.fds) {
1361 pr_warning("Not enough memory for BPF fds\n");
1362 return -ENOMEM;
1363 }
1364 prog->instances.nr = 1;
1365 prog->instances.fds[0] = -1;
1366 }
1367
1368 if (!prog->preprocessor) {
1369 if (prog->instances.nr != 1) {
1370 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1371 prog->section_name, prog->instances.nr);
1372 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001373 err = load_program(prog->type, prog->expected_attach_type,
1374 prog->name, prog->insns, prog->insns_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001375 license, kern_version, &fd,
1376 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001377 if (!err)
1378 prog->instances.fds[0] = fd;
1379 goto out;
1380 }
1381
1382 for (i = 0; i < prog->instances.nr; i++) {
1383 struct bpf_prog_prep_result result;
1384 bpf_program_prep_t preprocessor = prog->preprocessor;
1385
1386 bzero(&result, sizeof(result));
1387 err = preprocessor(prog, i, prog->insns,
1388 prog->insns_cnt, &result);
1389 if (err) {
1390 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1391 i, prog->section_name);
1392 goto out;
1393 }
1394
1395 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1396 pr_debug("Skip loading the %dth instance of program '%s'\n",
1397 i, prog->section_name);
1398 prog->instances.fds[i] = -1;
1399 if (result.pfd)
1400 *result.pfd = -1;
1401 continue;
1402 }
1403
Andrey Ignatovd7be1432018-03-30 15:08:01 -07001404 err = load_program(prog->type, prog->expected_attach_type,
1405 prog->name, result.new_insn_ptr,
Wang Nanb5805632015-11-16 12:10:09 +00001406 result.new_insn_cnt,
David Beckettf0307a72018-05-16 14:02:49 -07001407 license, kern_version, &fd,
1408 prog->prog_ifindex);
Wang Nanb5805632015-11-16 12:10:09 +00001409
1410 if (err) {
1411 pr_warning("Loading the %dth instance of program '%s' failed\n",
1412 i, prog->section_name);
1413 goto out;
1414 }
1415
1416 if (result.pfd)
1417 *result.pfd = fd;
1418 prog->instances.fds[i] = fd;
1419 }
1420out:
Wang Nan55cffde2015-07-01 02:14:07 +00001421 if (err)
1422 pr_warning("failed to load program '%s'\n",
1423 prog->section_name);
1424 zfree(&prog->insns);
1425 prog->insns_cnt = 0;
1426 return err;
1427}
1428
1429static int
1430bpf_object__load_progs(struct bpf_object *obj)
1431{
1432 size_t i;
1433 int err;
1434
1435 for (i = 0; i < obj->nr_programs; i++) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08001436 if (obj->programs[i].idx == obj->efile.text_shndx)
1437 continue;
Wang Nan55cffde2015-07-01 02:14:07 +00001438 err = bpf_program__load(&obj->programs[i],
1439 obj->license,
1440 obj->kern_version);
1441 if (err)
1442 return err;
1443 }
1444 return 0;
1445}
1446
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001447static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
Wang Nancb1e5e92015-07-01 02:13:57 +00001448{
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001449 switch (type) {
1450 case BPF_PROG_TYPE_SOCKET_FILTER:
1451 case BPF_PROG_TYPE_SCHED_CLS:
1452 case BPF_PROG_TYPE_SCHED_ACT:
1453 case BPF_PROG_TYPE_XDP:
1454 case BPF_PROG_TYPE_CGROUP_SKB:
1455 case BPF_PROG_TYPE_CGROUP_SOCK:
1456 case BPF_PROG_TYPE_LWT_IN:
1457 case BPF_PROG_TYPE_LWT_OUT:
1458 case BPF_PROG_TYPE_LWT_XMIT:
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001459 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001460 case BPF_PROG_TYPE_SOCK_OPS:
1461 case BPF_PROG_TYPE_SK_SKB:
1462 case BPF_PROG_TYPE_CGROUP_DEVICE:
1463 case BPF_PROG_TYPE_SK_MSG:
1464 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
Sean Young6bdd5332018-05-27 12:24:10 +01001465 case BPF_PROG_TYPE_LIRC_MODE2:
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001466 return false;
1467 case BPF_PROG_TYPE_UNSPEC:
1468 case BPF_PROG_TYPE_KPROBE:
1469 case BPF_PROG_TYPE_TRACEPOINT:
1470 case BPF_PROG_TYPE_PERF_EVENT:
1471 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1472 default:
1473 return true;
1474 }
1475}
1476
1477static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1478{
1479 if (needs_kver && obj->kern_version == 0) {
Wang Nancb1e5e92015-07-01 02:13:57 +00001480 pr_warning("%s doesn't provide kernel version\n",
1481 obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +00001482 return -LIBBPF_ERRNO__KVERSION;
Wang Nancb1e5e92015-07-01 02:13:57 +00001483 }
1484 return 0;
1485}
1486
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001487static struct bpf_object *
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001488__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1489 bool needs_kver)
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001490{
1491 struct bpf_object *obj;
Wang Nan6371ca32015-11-06 13:49:37 +00001492 int err;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001493
1494 if (elf_version(EV_CURRENT) == EV_NONE) {
1495 pr_warning("failed to init libelf for %s\n", path);
Wang Nan6371ca32015-11-06 13:49:37 +00001496 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001497 }
1498
Wang Nan6c956392015-07-01 02:13:54 +00001499 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
Wang Nan6371ca32015-11-06 13:49:37 +00001500 if (IS_ERR(obj))
1501 return obj;
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001502
Wang Nan6371ca32015-11-06 13:49:37 +00001503 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1504 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1505 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1506 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001507 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001508
1509 bpf_object__elf_finish(obj);
1510 return obj;
1511out:
1512 bpf_object__close(obj);
Wang Nan6371ca32015-11-06 13:49:37 +00001513 return ERR_PTR(err);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001514}
1515
1516struct bpf_object *bpf_object__open(const char *path)
1517{
1518 /* param validation */
1519 if (!path)
1520 return NULL;
1521
1522 pr_debug("loading %s\n", path);
1523
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001524 return __bpf_object__open(path, NULL, 0, true);
Wang Nan6c956392015-07-01 02:13:54 +00001525}
1526
1527struct bpf_object *bpf_object__open_buffer(void *obj_buf,
Wang Nanacf860a2015-08-27 02:30:55 +00001528 size_t obj_buf_sz,
1529 const char *name)
Wang Nan6c956392015-07-01 02:13:54 +00001530{
Wang Nanacf860a2015-08-27 02:30:55 +00001531 char tmp_name[64];
1532
Wang Nan6c956392015-07-01 02:13:54 +00001533 /* param validation */
1534 if (!obj_buf || obj_buf_sz <= 0)
1535 return NULL;
1536
Wang Nanacf860a2015-08-27 02:30:55 +00001537 if (!name) {
1538 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1539 (unsigned long)obj_buf,
1540 (unsigned long)obj_buf_sz);
1541 tmp_name[sizeof(tmp_name) - 1] = '\0';
1542 name = tmp_name;
1543 }
1544 pr_debug("loading object '%s' from buffer\n",
1545 name);
Wang Nan6c956392015-07-01 02:13:54 +00001546
Jakub Kicinski17387dd2018-05-10 10:24:42 -07001547 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001548}
1549
Wang Nan52d33522015-07-01 02:14:04 +00001550int bpf_object__unload(struct bpf_object *obj)
1551{
1552 size_t i;
1553
1554 if (!obj)
1555 return -EINVAL;
1556
Wang Nan9d759a92015-11-27 08:47:35 +00001557 for (i = 0; i < obj->nr_maps; i++)
1558 zclose(obj->maps[i].fd);
Wang Nan52d33522015-07-01 02:14:04 +00001559
Wang Nan55cffde2015-07-01 02:14:07 +00001560 for (i = 0; i < obj->nr_programs; i++)
1561 bpf_program__unload(&obj->programs[i]);
1562
Wang Nan52d33522015-07-01 02:14:04 +00001563 return 0;
1564}
1565
1566int bpf_object__load(struct bpf_object *obj)
1567{
Wang Nan6371ca32015-11-06 13:49:37 +00001568 int err;
1569
Wang Nan52d33522015-07-01 02:14:04 +00001570 if (!obj)
1571 return -EINVAL;
1572
1573 if (obj->loaded) {
1574 pr_warning("object should not be loaded twice\n");
1575 return -EINVAL;
1576 }
1577
1578 obj->loaded = true;
Wang Nan6371ca32015-11-06 13:49:37 +00001579
1580 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1581 CHECK_ERR(bpf_object__relocate(obj), err, out);
1582 CHECK_ERR(bpf_object__load_progs(obj), err, out);
Wang Nan52d33522015-07-01 02:14:04 +00001583
1584 return 0;
1585out:
1586 bpf_object__unload(obj);
1587 pr_warning("failed to load object '%s'\n", obj->path);
Wang Nan6371ca32015-11-06 13:49:37 +00001588 return err;
Wang Nan52d33522015-07-01 02:14:04 +00001589}
1590
Joe Stringerf3675402017-01-26 13:19:56 -08001591static int check_path(const char *path)
1592{
1593 struct statfs st_fs;
1594 char *dname, *dir;
1595 int err = 0;
1596
1597 if (path == NULL)
1598 return -EINVAL;
1599
1600 dname = strdup(path);
1601 if (dname == NULL)
1602 return -ENOMEM;
1603
1604 dir = dirname(dname);
1605 if (statfs(dir, &st_fs)) {
1606 pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
1607 err = -errno;
1608 }
1609 free(dname);
1610
1611 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1612 pr_warning("specified path %s is not on BPF FS\n", path);
1613 err = -EINVAL;
1614 }
1615
1616 return err;
1617}
1618
1619int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1620 int instance)
1621{
1622 int err;
1623
1624 err = check_path(path);
1625 if (err)
1626 return err;
1627
1628 if (prog == NULL) {
1629 pr_warning("invalid program pointer\n");
1630 return -EINVAL;
1631 }
1632
1633 if (instance < 0 || instance >= prog->instances.nr) {
1634 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1635 instance, prog->section_name, prog->instances.nr);
1636 return -EINVAL;
1637 }
1638
1639 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1640 pr_warning("failed to pin program: %s\n", strerror(errno));
1641 return -errno;
1642 }
1643 pr_debug("pinned program '%s'\n", path);
1644
1645 return 0;
1646}
1647
1648static int make_dir(const char *path)
1649{
1650 int err = 0;
1651
1652 if (mkdir(path, 0700) && errno != EEXIST)
1653 err = -errno;
1654
1655 if (err)
1656 pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1657 return err;
1658}
1659
1660int bpf_program__pin(struct bpf_program *prog, const char *path)
1661{
1662 int i, err;
1663
1664 err = check_path(path);
1665 if (err)
1666 return err;
1667
1668 if (prog == NULL) {
1669 pr_warning("invalid program pointer\n");
1670 return -EINVAL;
1671 }
1672
1673 if (prog->instances.nr <= 0) {
1674 pr_warning("no instances of prog %s to pin\n",
1675 prog->section_name);
1676 return -EINVAL;
1677 }
1678
1679 err = make_dir(path);
1680 if (err)
1681 return err;
1682
1683 for (i = 0; i < prog->instances.nr; i++) {
1684 char buf[PATH_MAX];
1685 int len;
1686
1687 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1688 if (len < 0)
1689 return -EINVAL;
1690 else if (len >= PATH_MAX)
1691 return -ENAMETOOLONG;
1692
1693 err = bpf_program__pin_instance(prog, buf, i);
1694 if (err)
1695 return err;
1696 }
1697
1698 return 0;
1699}
1700
Joe Stringerb6989f32017-01-26 13:19:57 -08001701int bpf_map__pin(struct bpf_map *map, const char *path)
1702{
1703 int err;
1704
1705 err = check_path(path);
1706 if (err)
1707 return err;
1708
1709 if (map == NULL) {
1710 pr_warning("invalid map pointer\n");
1711 return -EINVAL;
1712 }
1713
1714 if (bpf_obj_pin(map->fd, path)) {
1715 pr_warning("failed to pin map: %s\n", strerror(errno));
1716 return -errno;
1717 }
1718
1719 pr_debug("pinned map '%s'\n", path);
1720 return 0;
1721}
1722
Joe Stringerd5148d82017-01-26 13:19:58 -08001723int bpf_object__pin(struct bpf_object *obj, const char *path)
1724{
1725 struct bpf_program *prog;
1726 struct bpf_map *map;
1727 int err;
1728
1729 if (!obj)
1730 return -ENOENT;
1731
1732 if (!obj->loaded) {
1733 pr_warning("object not yet loaded; load it first\n");
1734 return -ENOENT;
1735 }
1736
1737 err = make_dir(path);
1738 if (err)
1739 return err;
1740
1741 bpf_map__for_each(map, obj) {
1742 char buf[PATH_MAX];
1743 int len;
1744
1745 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1746 bpf_map__name(map));
1747 if (len < 0)
1748 return -EINVAL;
1749 else if (len >= PATH_MAX)
1750 return -ENAMETOOLONG;
1751
1752 err = bpf_map__pin(map, buf);
1753 if (err)
1754 return err;
1755 }
1756
1757 bpf_object__for_each_program(prog, obj) {
1758 char buf[PATH_MAX];
1759 int len;
1760
1761 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1762 prog->section_name);
1763 if (len < 0)
1764 return -EINVAL;
1765 else if (len >= PATH_MAX)
1766 return -ENAMETOOLONG;
1767
1768 err = bpf_program__pin(prog, buf);
1769 if (err)
1770 return err;
1771 }
1772
1773 return 0;
1774}
1775
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001776void bpf_object__close(struct bpf_object *obj)
1777{
Wang Nana5b8bd42015-07-01 02:14:00 +00001778 size_t i;
1779
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001780 if (!obj)
1781 return;
1782
Wang Nan10931d22016-11-26 07:03:26 +00001783 if (obj->clear_priv)
1784 obj->clear_priv(obj, obj->priv);
1785
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001786 bpf_object__elf_finish(obj);
Wang Nan52d33522015-07-01 02:14:04 +00001787 bpf_object__unload(obj);
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001788 btf__free(obj->btf);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001789
Wang Nan9d759a92015-11-27 08:47:35 +00001790 for (i = 0; i < obj->nr_maps; i++) {
Wang Nan561bbcc2015-11-27 08:47:36 +00001791 zfree(&obj->maps[i].name);
Wang Nan9d759a92015-11-27 08:47:35 +00001792 if (obj->maps[i].clear_priv)
1793 obj->maps[i].clear_priv(&obj->maps[i],
1794 obj->maps[i].priv);
1795 obj->maps[i].priv = NULL;
1796 obj->maps[i].clear_priv = NULL;
1797 }
1798 zfree(&obj->maps);
1799 obj->nr_maps = 0;
Wang Nana5b8bd42015-07-01 02:14:00 +00001800
1801 if (obj->programs && obj->nr_programs) {
1802 for (i = 0; i < obj->nr_programs; i++)
1803 bpf_program__exit(&obj->programs[i]);
1804 }
1805 zfree(&obj->programs);
1806
Wang Nan9a208ef2015-07-01 02:14:10 +00001807 list_del(&obj->list);
Wang Nan1a5e3fb2015-07-01 02:13:53 +00001808 free(obj);
1809}
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001810
Wang Nan9a208ef2015-07-01 02:14:10 +00001811struct bpf_object *
1812bpf_object__next(struct bpf_object *prev)
1813{
1814 struct bpf_object *next;
1815
1816 if (!prev)
1817 next = list_first_entry(&bpf_objects_list,
1818 struct bpf_object,
1819 list);
1820 else
1821 next = list_next_entry(prev, list);
1822
1823 /* Empty list is noticed here so don't need checking on entry. */
1824 if (&next->list == &bpf_objects_list)
1825 return NULL;
1826
1827 return next;
1828}
1829
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001830const char *bpf_object__name(struct bpf_object *obj)
Wang Nanacf860a2015-08-27 02:30:55 +00001831{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001832 return obj ? obj->path : ERR_PTR(-EINVAL);
Wang Nanacf860a2015-08-27 02:30:55 +00001833}
1834
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001835unsigned int bpf_object__kversion(struct bpf_object *obj)
Wang Nan45825d82015-11-06 13:49:38 +00001836{
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03001837 return obj ? obj->kern_version : 0;
Wang Nan45825d82015-11-06 13:49:38 +00001838}
1839
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07001840int bpf_object__btf_fd(const struct bpf_object *obj)
1841{
1842 return obj->btf ? btf__fd(obj->btf) : -1;
1843}
1844
Wang Nan10931d22016-11-26 07:03:26 +00001845int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1846 bpf_object_clear_priv_t clear_priv)
1847{
1848 if (obj->priv && obj->clear_priv)
1849 obj->clear_priv(obj, obj->priv);
1850
1851 obj->priv = priv;
1852 obj->clear_priv = clear_priv;
1853 return 0;
1854}
1855
1856void *bpf_object__priv(struct bpf_object *obj)
1857{
1858 return obj ? obj->priv : ERR_PTR(-EINVAL);
1859}
1860
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001861struct bpf_program *
1862bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1863{
1864 size_t idx;
1865
1866 if (!obj->programs)
1867 return NULL;
1868 /* First handler */
1869 if (prev == NULL)
1870 return &obj->programs[0];
1871
1872 if (prev->obj != obj) {
1873 pr_warning("error: program handler doesn't match object\n");
1874 return NULL;
1875 }
1876
1877 idx = (prev - obj->programs) + 1;
1878 if (idx >= obj->nr_programs)
1879 return NULL;
1880 return &obj->programs[idx];
1881}
1882
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03001883int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1884 bpf_program_clear_priv_t clear_priv)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001885{
1886 if (prog->priv && prog->clear_priv)
1887 prog->clear_priv(prog, prog->priv);
1888
1889 prog->priv = priv;
1890 prog->clear_priv = clear_priv;
1891 return 0;
1892}
1893
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001894void *bpf_program__priv(struct bpf_program *prog)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001895{
Arnaldo Carvalho de Melobe834ff2016-06-03 12:36:39 -03001896 return prog ? prog->priv : ERR_PTR(-EINVAL);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001897}
1898
Namhyung Kim715f8db2015-11-03 20:21:05 +09001899const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001900{
1901 const char *title;
1902
1903 title = prog->section_name;
Namhyung Kim715f8db2015-11-03 20:21:05 +09001904 if (needs_copy) {
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001905 title = strdup(title);
1906 if (!title) {
1907 pr_warning("failed to strdup program title\n");
Wang Nan6371ca32015-11-06 13:49:37 +00001908 return ERR_PTR(-ENOMEM);
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001909 }
1910 }
1911
1912 return title;
1913}
1914
1915int bpf_program__fd(struct bpf_program *prog)
1916{
Wang Nanb5805632015-11-16 12:10:09 +00001917 return bpf_program__nth_fd(prog, 0);
1918}
1919
1920int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1921 bpf_program_prep_t prep)
1922{
1923 int *instances_fds;
1924
1925 if (nr_instances <= 0 || !prep)
1926 return -EINVAL;
1927
1928 if (prog->instances.nr > 0 || prog->instances.fds) {
1929 pr_warning("Can't set pre-processor after loading\n");
1930 return -EINVAL;
1931 }
1932
1933 instances_fds = malloc(sizeof(int) * nr_instances);
1934 if (!instances_fds) {
1935 pr_warning("alloc memory failed for fds\n");
1936 return -ENOMEM;
1937 }
1938
1939 /* fill all fd with -1 */
1940 memset(instances_fds, -1, sizeof(int) * nr_instances);
1941
1942 prog->instances.nr = nr_instances;
1943 prog->instances.fds = instances_fds;
1944 prog->preprocessor = prep;
1945 return 0;
1946}
1947
1948int bpf_program__nth_fd(struct bpf_program *prog, int n)
1949{
1950 int fd;
1951
1952 if (n >= prog->instances.nr || n < 0) {
1953 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1954 n, prog->section_name, prog->instances.nr);
1955 return -EINVAL;
1956 }
1957
1958 fd = prog->instances.fds[n];
1959 if (fd < 0) {
1960 pr_warning("%dth instance of program '%s' is invalid\n",
1961 n, prog->section_name);
1962 return -ENOENT;
1963 }
1964
1965 return fd;
Wang Nanaa9b1ac2015-07-01 02:14:08 +00001966}
Wang Nan9d759a92015-11-27 08:47:35 +00001967
Alexei Starovoitovdd26b7f2017-03-30 21:45:40 -07001968void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
Wang Nan5f44e4c82016-07-13 10:44:01 +00001969{
1970 prog->type = type;
1971}
1972
Wang Nan5f44e4c82016-07-13 10:44:01 +00001973static bool bpf_program__is_type(struct bpf_program *prog,
1974 enum bpf_prog_type type)
1975{
1976 return prog ? (prog->type == type) : false;
1977}
1978
Joe Stringered794072017-01-22 17:11:23 -08001979#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
1980int bpf_program__set_##NAME(struct bpf_program *prog) \
1981{ \
1982 if (!prog) \
1983 return -EINVAL; \
1984 bpf_program__set_type(prog, TYPE); \
1985 return 0; \
1986} \
1987 \
1988bool bpf_program__is_##NAME(struct bpf_program *prog) \
1989{ \
1990 return bpf_program__is_type(prog, TYPE); \
1991} \
Wang Nan5f44e4c82016-07-13 10:44:01 +00001992
Joe Stringer7803ba72017-01-22 17:11:24 -08001993BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
Joe Stringered794072017-01-22 17:11:23 -08001994BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
Joe Stringer7803ba72017-01-22 17:11:24 -08001995BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
1996BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
Joe Stringered794072017-01-22 17:11:23 -08001997BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
Andrey Ignatove14c93f2018-04-17 10:28:46 -07001998BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
Joe Stringer7803ba72017-01-22 17:11:24 -08001999BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2000BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
Wang Nan5f44e4c82016-07-13 10:44:01 +00002001
John Fastabend16962b22018-04-23 14:30:38 -07002002void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2003 enum bpf_attach_type type)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002004{
2005 prog->expected_attach_type = type;
2006}
2007
2008#define BPF_PROG_SEC_FULL(string, ptype, atype) \
2009 { string, sizeof(string) - 1, ptype, atype }
2010
2011#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2012
Andrey Ignatov81efee72018-04-17 10:28:45 -07002013#define BPF_S_PROG_SEC(string, ptype) \
2014 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2015
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002016#define BPF_SA_PROG_SEC(string, ptype) \
2017 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2018
Roman Gushchin583c9002017-12-13 15:18:51 +00002019static const struct {
2020 const char *sec;
2021 size_t len;
2022 enum bpf_prog_type prog_type;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002023 enum bpf_attach_type expected_attach_type;
Roman Gushchin583c9002017-12-13 15:18:51 +00002024} section_names[] = {
2025 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2026 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2027 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002028 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2029 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002030 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
Andrey Ignatove14c93f2018-04-17 10:28:46 -07002031 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
Roman Gushchin583c9002017-12-13 15:18:51 +00002032 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2033 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2034 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2035 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
2036 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
Quentin Monnet0badd332018-02-07 20:27:13 -08002037 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2038 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2039 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
Jakub Kicinskid9b683d2018-06-28 14:41:36 -07002040 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
Roman Gushchin583c9002017-12-13 15:18:51 +00002041 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
2042 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
John Fastabend4c4c3c22018-03-18 12:57:41 -07002043 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
Jakub Kicinskid9b683d2018-06-28 14:41:36 -07002044 BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002045 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
2046 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
Andrey Ignatov622adaf2018-03-30 15:08:06 -07002047 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2048 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
Andrey Ignatov72481f32018-05-25 08:55:25 -07002049 BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
2050 BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
Andrey Ignatov81efee72018-04-17 10:28:45 -07002051 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2052 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
Roman Gushchin583c9002017-12-13 15:18:51 +00002053};
Roman Gushchin583c9002017-12-13 15:18:51 +00002054
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002055#undef BPF_PROG_SEC
2056#undef BPF_PROG_SEC_FULL
Andrey Ignatov81efee72018-04-17 10:28:45 -07002057#undef BPF_S_PROG_SEC
Andrey Ignatove50b0a62018-03-30 15:08:03 -07002058#undef BPF_SA_PROG_SEC
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002059
2060static int bpf_program__identify_section(struct bpf_program *prog)
Roman Gushchin583c9002017-12-13 15:18:51 +00002061{
2062 int i;
2063
2064 if (!prog->section_name)
2065 goto err;
2066
2067 for (i = 0; i < ARRAY_SIZE(section_names); i++)
2068 if (strncmp(prog->section_name, section_names[i].sec,
2069 section_names[i].len) == 0)
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002070 return i;
Roman Gushchin583c9002017-12-13 15:18:51 +00002071
2072err:
2073 pr_warning("failed to guess program type based on section name %s\n",
2074 prog->section_name);
2075
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002076 return -1;
Roman Gushchin583c9002017-12-13 15:18:51 +00002077}
2078
Arnaldo Carvalho de Melo6e009e62016-06-03 12:15:52 -03002079int bpf_map__fd(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002080{
Arnaldo Carvalho de Melo6e009e62016-06-03 12:15:52 -03002081 return map ? map->fd : -EINVAL;
Wang Nan9d759a92015-11-27 08:47:35 +00002082}
2083
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002084const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002085{
Arnaldo Carvalho de Melo53897a72016-06-02 14:21:06 -03002086 return map ? &map->def : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002087}
2088
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002089const char *bpf_map__name(struct bpf_map *map)
Wang Nan561bbcc2015-11-27 08:47:36 +00002090{
Arnaldo Carvalho de Melo009ad5d2016-06-02 11:02:05 -03002091 return map ? map->name : NULL;
Wang Nan561bbcc2015-11-27 08:47:36 +00002092}
2093
Martin KaFai Lau61746db2018-05-22 15:04:24 -07002094uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002095{
Martin KaFai Lau61746db2018-05-22 15:04:24 -07002096 return map ? map->btf_key_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002097}
2098
Martin KaFai Lau61746db2018-05-22 15:04:24 -07002099uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002100{
Martin KaFai Lau61746db2018-05-22 15:04:24 -07002101 return map ? map->btf_value_type_id : 0;
Martin KaFai Lau8a138ae2018-04-18 15:56:05 -07002102}
2103
Arnaldo Carvalho de Meloedb13ed2016-06-03 12:38:21 -03002104int bpf_map__set_priv(struct bpf_map *map, void *priv,
2105 bpf_map_clear_priv_t clear_priv)
Wang Nan9d759a92015-11-27 08:47:35 +00002106{
2107 if (!map)
2108 return -EINVAL;
2109
2110 if (map->priv) {
2111 if (map->clear_priv)
2112 map->clear_priv(map, map->priv);
2113 }
2114
2115 map->priv = priv;
2116 map->clear_priv = clear_priv;
2117 return 0;
2118}
2119
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002120void *bpf_map__priv(struct bpf_map *map)
Wang Nan9d759a92015-11-27 08:47:35 +00002121{
Arnaldo Carvalho de Melob4cbfa52016-06-02 10:51:59 -03002122 return map ? map->priv : ERR_PTR(-EINVAL);
Wang Nan9d759a92015-11-27 08:47:35 +00002123}
2124
2125struct bpf_map *
2126bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2127{
2128 size_t idx;
2129 struct bpf_map *s, *e;
2130
2131 if (!obj || !obj->maps)
2132 return NULL;
2133
2134 s = obj->maps;
2135 e = obj->maps + obj->nr_maps;
2136
2137 if (prev == NULL)
2138 return s;
2139
2140 if ((prev < s) || (prev >= e)) {
2141 pr_warning("error in %s: map handler doesn't belong to object\n",
2142 __func__);
2143 return NULL;
2144 }
2145
2146 idx = (prev - obj->maps) + 1;
2147 if (idx >= obj->nr_maps)
2148 return NULL;
2149 return &obj->maps[idx];
2150}
Wang Nan561bbcc2015-11-27 08:47:36 +00002151
2152struct bpf_map *
Arnaldo Carvalho de Meloa7fe0452016-06-03 12:22:51 -03002153bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
Wang Nan561bbcc2015-11-27 08:47:36 +00002154{
2155 struct bpf_map *pos;
2156
2157 bpf_map__for_each(pos, obj) {
Wang Nan973170e2015-12-08 02:25:29 +00002158 if (pos->name && !strcmp(pos->name, name))
Wang Nan561bbcc2015-11-27 08:47:36 +00002159 return pos;
2160 }
2161 return NULL;
2162}
Wang Nan5a6acad2016-11-26 07:03:27 +00002163
2164struct bpf_map *
2165bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2166{
2167 int i;
2168
2169 for (i = 0; i < obj->nr_maps; i++) {
2170 if (obj->maps[i].offset == offset)
2171 return &obj->maps[i];
2172 }
2173 return ERR_PTR(-ENOENT);
2174}
Joe Stringere28ff1a2017-01-22 17:11:25 -08002175
2176long libbpf_get_error(const void *ptr)
2177{
2178 if (IS_ERR(ptr))
2179 return PTR_ERR(ptr);
2180 return 0;
2181}
John Fastabend6f6d33f2017-08-15 22:34:22 -07002182
2183int bpf_prog_load(const char *file, enum bpf_prog_type type,
2184 struct bpf_object **pobj, int *prog_fd)
2185{
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002186 struct bpf_prog_load_attr attr;
2187
2188 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2189 attr.file = file;
2190 attr.prog_type = type;
2191 attr.expected_attach_type = 0;
2192
2193 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2194}
2195
2196int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2197 struct bpf_object **pobj, int *prog_fd)
2198{
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002199 struct bpf_program *prog, *first_prog = NULL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002200 enum bpf_attach_type expected_attach_type;
2201 enum bpf_prog_type prog_type;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002202 struct bpf_object *obj;
David Beckettf0307a72018-05-16 14:02:49 -07002203 struct bpf_map *map;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002204 int section_idx;
John Fastabend6f6d33f2017-08-15 22:34:22 -07002205 int err;
2206
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002207 if (!attr)
2208 return -EINVAL;
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002209 if (!attr->file)
2210 return -EINVAL;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002211
Jakub Kicinski17387dd2018-05-10 10:24:42 -07002212 obj = __bpf_object__open(attr->file, NULL, 0,
2213 bpf_prog_type__needs_kver(attr->prog_type));
Jakub Kicinski35976832018-05-10 10:09:34 -07002214 if (IS_ERR_OR_NULL(obj))
John Fastabend6f6d33f2017-08-15 22:34:22 -07002215 return -ENOENT;
2216
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002217 bpf_object__for_each_program(prog, obj) {
2218 /*
2219 * If type is not specified, try to guess it based on
2220 * section name.
2221 */
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002222 prog_type = attr->prog_type;
David Beckettf0307a72018-05-16 14:02:49 -07002223 prog->prog_ifindex = attr->ifindex;
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002224 expected_attach_type = attr->expected_attach_type;
2225 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2226 section_idx = bpf_program__identify_section(prog);
2227 if (section_idx < 0) {
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002228 bpf_object__close(obj);
2229 return -EINVAL;
2230 }
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002231 prog_type = section_names[section_idx].prog_type;
2232 expected_attach_type =
2233 section_names[section_idx].expected_attach_type;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002234 }
2235
Andrey Ignatovd7be1432018-03-30 15:08:01 -07002236 bpf_program__set_type(prog, prog_type);
2237 bpf_program__set_expected_attach_type(prog,
2238 expected_attach_type);
2239
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002240 if (prog->idx != obj->efile.text_shndx && !first_prog)
2241 first_prog = prog;
2242 }
2243
David Beckettf0307a72018-05-16 14:02:49 -07002244 bpf_map__for_each(map, obj) {
2245 map->map_ifindex = attr->ifindex;
2246 }
2247
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002248 if (!first_prog) {
2249 pr_warning("object file doesn't contain bpf program\n");
John Fastabend6f6d33f2017-08-15 22:34:22 -07002250 bpf_object__close(obj);
2251 return -ENOENT;
2252 }
2253
John Fastabend6f6d33f2017-08-15 22:34:22 -07002254 err = bpf_object__load(obj);
2255 if (err) {
2256 bpf_object__close(obj);
2257 return -EINVAL;
2258 }
2259
2260 *pobj = obj;
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -08002261 *prog_fd = bpf_program__fd(first_prog);
John Fastabend6f6d33f2017-08-15 22:34:22 -07002262 return 0;
2263}
Jakub Kicinskid0cabbb2018-05-10 10:24:40 -07002264
2265enum bpf_perf_event_ret
2266bpf_perf_event_read_simple(void *mem, unsigned long size,
2267 unsigned long page_size, void **buf, size_t *buf_len,
2268 bpf_perf_event_print_t fn, void *priv)
2269{
2270 volatile struct perf_event_mmap_page *header = mem;
2271 __u64 data_tail = header->data_tail;
2272 __u64 data_head = header->data_head;
2273 void *base, *begin, *end;
2274 int ret;
2275
2276 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2277 if (data_head == data_tail)
2278 return LIBBPF_PERF_EVENT_CONT;
2279
2280 base = ((char *)header) + page_size;
2281
2282 begin = base + data_tail % size;
2283 end = base + data_head % size;
2284
2285 while (begin != end) {
2286 struct perf_event_header *ehdr;
2287
2288 ehdr = begin;
2289 if (begin + ehdr->size > base + size) {
2290 long len = base + size - begin;
2291
2292 if (*buf_len < ehdr->size) {
2293 free(*buf);
2294 *buf = malloc(ehdr->size);
2295 if (!*buf) {
2296 ret = LIBBPF_PERF_EVENT_ERROR;
2297 break;
2298 }
2299 *buf_len = ehdr->size;
2300 }
2301
2302 memcpy(*buf, begin, len);
2303 memcpy(*buf + len, base, ehdr->size - len);
2304 ehdr = (void *)*buf;
2305 begin = base + ehdr->size - len;
2306 } else if (begin + ehdr->size == base + size) {
2307 begin = base;
2308 } else {
2309 begin += ehdr->size;
2310 }
2311
2312 ret = fn(ehdr, priv);
2313 if (ret != LIBBPF_PERF_EVENT_CONT)
2314 break;
2315
2316 data_tail += ehdr->size;
2317 }
2318
2319 __sync_synchronize(); /* smp_mb() */
2320 header->data_tail = data_tail;
2321
2322 return ret;
2323}