blob: e0500055f1a66be661acce5367603843a16ef27a [file] [log] [blame]
Eric Leblond6061a3d2018-01-30 21:55:03 +01001// SPDX-License-Identifier: LGPL-2.1
2
Wang Nane3ed2fe2015-07-01 02:14:03 +00003/*
4 * common eBPF ELF operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Wang Nan203d1ca2016-07-04 11:02:42 +00009 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
Wang Nane3ed2fe2015-07-01 02:14:03 +000022 */
23
24#include <stdlib.h>
25#include <memory.h>
26#include <unistd.h>
27#include <asm/unistd.h>
28#include <linux/bpf.h>
29#include "bpf.h"
Eric Leblond949abbe2018-01-30 21:55:01 +010030#include "libbpf.h"
31#include "nlattr.h"
32#include <linux/rtnetlink.h>
33#include <linux/if_link.h>
34#include <sys/socket.h>
35#include <errno.h>
Wang Nane3ed2fe2015-07-01 02:14:03 +000036
Eric Leblondbbf48c12018-01-30 21:55:02 +010037#ifndef SOL_NETLINK
38#define SOL_NETLINK 270
39#endif
40
Wang Nane3ed2fe2015-07-01 02:14:03 +000041/*
Masahiro Yamada03671052017-02-27 14:29:28 -080042 * When building perf, unistd.h is overridden. __NR_bpf is
Wang Nan8f9e05f2016-01-11 13:47:57 +000043 * required to be defined explicitly.
Wang Nane3ed2fe2015-07-01 02:14:03 +000044 */
45#ifndef __NR_bpf
46# if defined(__i386__)
47# define __NR_bpf 357
48# elif defined(__x86_64__)
49# define __NR_bpf 321
50# elif defined(__aarch64__)
51# define __NR_bpf 280
David S. Millerb0c47802017-04-22 12:31:05 -070052# elif defined(__sparc__)
53# define __NR_bpf 349
Daniel Borkmannbad19262017-08-04 14:20:55 +020054# elif defined(__s390__)
55# define __NR_bpf 351
Wang Nane3ed2fe2015-07-01 02:14:03 +000056# else
57# error __NR_bpf not defined. libbpf does not support your arch.
58# endif
59#endif
60
Eric Leblond949abbe2018-01-30 21:55:01 +010061#ifndef min
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070062#define min(x, y) ((x) < (y) ? (x) : (y))
Eric Leblond949abbe2018-01-30 21:55:01 +010063#endif
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070064
Mickaël Salaüncdc6a4b2017-02-11 20:37:08 +010065static inline __u64 ptr_to_u64(const void *ptr)
Wang Nan7bf98362015-07-01 02:14:06 +000066{
67 return (__u64) (unsigned long) ptr;
68}
69
Mickaël Salaüncdc6a4b2017-02-11 20:37:08 +010070static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
71 unsigned int size)
Wang Nane3ed2fe2015-07-01 02:14:03 +000072{
73 return syscall(__NR_bpf, cmd, attr, size);
74}
75
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070076int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
77 int key_size, int value_size, int max_entries,
78 __u32 map_flags, int node)
Wang Nane3ed2fe2015-07-01 02:14:03 +000079{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070080 __u32 name_len = name ? strlen(name) : 0;
Wang Nane3ed2fe2015-07-01 02:14:03 +000081 union bpf_attr attr;
82
83 memset(&attr, '\0', sizeof(attr));
84
85 attr.map_type = map_type;
86 attr.key_size = key_size;
87 attr.value_size = value_size;
88 attr.max_entries = max_entries;
Joe Stringera5580c72016-12-08 18:46:16 -080089 attr.map_flags = map_flags;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070090 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
91
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -070092 if (node >= 0) {
93 attr.map_flags |= BPF_F_NUMA_NODE;
94 attr.numa_node = node;
95 }
Wang Nane3ed2fe2015-07-01 02:14:03 +000096
97 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
98}
Wang Nan7bf98362015-07-01 02:14:06 +000099
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700100int bpf_create_map(enum bpf_map_type map_type, int key_size,
101 int value_size, int max_entries, __u32 map_flags)
102{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700103 return bpf_create_map_node(map_type, NULL, key_size, value_size,
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700104 max_entries, map_flags, -1);
105}
106
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700107int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
108 int key_size, int value_size, int max_entries,
109 __u32 map_flags)
110{
111 return bpf_create_map_node(map_type, name, key_size, value_size,
112 max_entries, map_flags, -1);
113}
114
115int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
116 int key_size, int inner_map_fd, int max_entries,
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700117 __u32 map_flags, int node)
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700118{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700119 __u32 name_len = name ? strlen(name) : 0;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700120 union bpf_attr attr;
121
122 memset(&attr, '\0', sizeof(attr));
123
124 attr.map_type = map_type;
125 attr.key_size = key_size;
126 attr.value_size = 4;
127 attr.inner_map_fd = inner_map_fd;
128 attr.max_entries = max_entries;
129 attr.map_flags = map_flags;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700130 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
131
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700132 if (node >= 0) {
133 attr.map_flags |= BPF_F_NUMA_NODE;
134 attr.numa_node = node;
135 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700136
137 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
138}
139
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700140int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
141 int key_size, int inner_map_fd, int max_entries,
142 __u32 map_flags)
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700143{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700144 return bpf_create_map_in_map_node(map_type, name, key_size,
145 inner_map_fd, max_entries, map_flags,
146 -1);
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700147}
148
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700149int bpf_load_program_name(enum bpf_prog_type type, const char *name,
150 const struct bpf_insn *insns,
151 size_t insns_cnt, const char *license,
152 __u32 kern_version, char *log_buf,
153 size_t log_buf_sz)
Wang Nan7bf98362015-07-01 02:14:06 +0000154{
155 int fd;
156 union bpf_attr attr;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700157 __u32 name_len = name ? strlen(name) : 0;
Wang Nan7bf98362015-07-01 02:14:06 +0000158
159 bzero(&attr, sizeof(attr));
160 attr.prog_type = type;
161 attr.insn_cnt = (__u32)insns_cnt;
162 attr.insns = ptr_to_u64(insns);
163 attr.license = ptr_to_u64(license);
164 attr.log_buf = ptr_to_u64(NULL);
165 attr.log_size = 0;
166 attr.log_level = 0;
167 attr.kern_version = kern_version;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700168 memcpy(attr.prog_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
Wang Nan7bf98362015-07-01 02:14:06 +0000169
170 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
171 if (fd >= 0 || !log_buf || !log_buf_sz)
172 return fd;
173
174 /* Try again with log */
175 attr.log_buf = ptr_to_u64(log_buf);
176 attr.log_size = log_buf_sz;
177 attr.log_level = 1;
178 log_buf[0] = 0;
179 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
180}
He Kuang43798bf2015-11-24 13:36:08 +0000181
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700182int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
183 size_t insns_cnt, const char *license,
184 __u32 kern_version, char *log_buf,
185 size_t log_buf_sz)
186{
187 return bpf_load_program_name(type, NULL, insns, insns_cnt, license,
188 kern_version, log_buf, log_buf_sz);
189}
190
David S. Miller91045f52017-05-10 11:42:48 -0700191int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
192 size_t insns_cnt, int strict_alignment,
193 const char *license, __u32 kern_version,
Daniel Borkmannd6554902017-07-21 00:00:22 +0200194 char *log_buf, size_t log_buf_sz, int log_level)
David S. Miller91045f52017-05-10 11:42:48 -0700195{
196 union bpf_attr attr;
197
198 bzero(&attr, sizeof(attr));
199 attr.prog_type = type;
200 attr.insn_cnt = (__u32)insns_cnt;
201 attr.insns = ptr_to_u64(insns);
202 attr.license = ptr_to_u64(license);
203 attr.log_buf = ptr_to_u64(log_buf);
204 attr.log_size = log_buf_sz;
Daniel Borkmannd6554902017-07-21 00:00:22 +0200205 attr.log_level = log_level;
David S. Miller91045f52017-05-10 11:42:48 -0700206 log_buf[0] = 0;
207 attr.kern_version = kern_version;
208 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
209
210 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
211}
212
Mickaël Salaün10ecc722017-02-10 00:21:39 +0100213int bpf_map_update_elem(int fd, const void *key, const void *value,
Joe Stringer83d994d2016-12-08 18:46:15 -0800214 __u64 flags)
He Kuang43798bf2015-11-24 13:36:08 +0000215{
216 union bpf_attr attr;
217
218 bzero(&attr, sizeof(attr));
219 attr.map_fd = fd;
220 attr.key = ptr_to_u64(key);
221 attr.value = ptr_to_u64(value);
222 attr.flags = flags;
223
224 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
225}
Wang Nan9742da02016-11-26 07:03:25 +0000226
Mickaël Salaüne5ff7c42017-02-10 00:21:40 +0100227int bpf_map_lookup_elem(int fd, const void *key, void *value)
Wang Nan9742da02016-11-26 07:03:25 +0000228{
229 union bpf_attr attr;
230
231 bzero(&attr, sizeof(attr));
232 attr.map_fd = fd;
233 attr.key = ptr_to_u64(key);
234 attr.value = ptr_to_u64(value);
235
236 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
237}
238
Mickaël Salaüne58383b2017-02-10 00:21:41 +0100239int bpf_map_delete_elem(int fd, const void *key)
Wang Nan9742da02016-11-26 07:03:25 +0000240{
241 union bpf_attr attr;
242
243 bzero(&attr, sizeof(attr));
244 attr.map_fd = fd;
245 attr.key = ptr_to_u64(key);
246
247 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
248}
249
Mickaël Salaün5f155c22017-02-10 00:21:42 +0100250int bpf_map_get_next_key(int fd, const void *key, void *next_key)
Wang Nan9742da02016-11-26 07:03:25 +0000251{
252 union bpf_attr attr;
253
254 bzero(&attr, sizeof(attr));
255 attr.map_fd = fd;
256 attr.key = ptr_to_u64(key);
257 attr.next_key = ptr_to_u64(next_key);
258
259 return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
260}
261
262int bpf_obj_pin(int fd, const char *pathname)
263{
264 union bpf_attr attr;
265
266 bzero(&attr, sizeof(attr));
267 attr.pathname = ptr_to_u64((void *)pathname);
268 attr.bpf_fd = fd;
269
270 return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
271}
272
273int bpf_obj_get(const char *pathname)
274{
275 union bpf_attr attr;
276
277 bzero(&attr, sizeof(attr));
278 attr.pathname = ptr_to_u64((void *)pathname);
279
280 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
281}
Joe Stringer5dc880d2016-12-14 14:05:26 -0800282
John Fastabend464bc0f2017-08-28 07:10:04 -0700283int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
284 unsigned int flags)
Joe Stringer5dc880d2016-12-14 14:05:26 -0800285{
286 union bpf_attr attr;
287
288 bzero(&attr, sizeof(attr));
289 attr.target_fd = target_fd;
John Fastabend464bc0f2017-08-28 07:10:04 -0700290 attr.attach_bpf_fd = prog_fd;
Joe Stringer5dc880d2016-12-14 14:05:26 -0800291 attr.attach_type = type;
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800292 attr.attach_flags = flags;
Joe Stringer5dc880d2016-12-14 14:05:26 -0800293
294 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
295}
296
297int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
298{
299 union bpf_attr attr;
300
301 bzero(&attr, sizeof(attr));
302 attr.target_fd = target_fd;
303 attr.attach_type = type;
304
305 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
306}
Alexei Starovoitov30848872017-03-30 21:45:39 -0700307
Alexei Starovoitov244d20e2017-10-02 22:50:24 -0700308int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
309{
310 union bpf_attr attr;
311
312 bzero(&attr, sizeof(attr));
313 attr.target_fd = target_fd;
314 attr.attach_bpf_fd = prog_fd;
315 attr.attach_type = type;
316
317 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
318}
319
Alexei Starovoitov5d0cbf92017-10-02 22:50:27 -0700320int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
321 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
322{
323 union bpf_attr attr;
324 int ret;
325
326 bzero(&attr, sizeof(attr));
327 attr.query.target_fd = target_fd;
328 attr.query.attach_type = type;
329 attr.query.query_flags = query_flags;
330 attr.query.prog_cnt = *prog_cnt;
331 attr.query.prog_ids = ptr_to_u64(prog_ids);
332
333 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
334 if (attach_flags)
335 *attach_flags = attr.query.attach_flags;
336 *prog_cnt = attr.query.prog_cnt;
337 return ret;
338}
339
Alexei Starovoitov30848872017-03-30 21:45:39 -0700340int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
341 void *data_out, __u32 *size_out, __u32 *retval,
342 __u32 *duration)
343{
344 union bpf_attr attr;
345 int ret;
346
347 bzero(&attr, sizeof(attr));
348 attr.test.prog_fd = prog_fd;
349 attr.test.data_in = ptr_to_u64(data);
350 attr.test.data_out = ptr_to_u64(data_out);
351 attr.test.data_size_in = size;
352 attr.test.repeat = repeat;
353
354 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
355 if (size_out)
356 *size_out = attr.test.data_size_out;
357 if (retval)
358 *retval = attr.test.retval;
359 if (duration)
360 *duration = attr.test.duration;
361 return ret;
362}
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700363
364int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
365{
366 union bpf_attr attr;
367 int err;
368
369 bzero(&attr, sizeof(attr));
370 attr.start_id = start_id;
371
372 err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
373 if (!err)
374 *next_id = attr.next_id;
375
376 return err;
377}
378
379int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
380{
381 union bpf_attr attr;
382 int err;
383
384 bzero(&attr, sizeof(attr));
385 attr.start_id = start_id;
386
387 err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
388 if (!err)
389 *next_id = attr.next_id;
390
391 return err;
392}
393
394int bpf_prog_get_fd_by_id(__u32 id)
395{
396 union bpf_attr attr;
397
398 bzero(&attr, sizeof(attr));
399 attr.prog_id = id;
400
401 return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
402}
403
404int bpf_map_get_fd_by_id(__u32 id)
405{
406 union bpf_attr attr;
407
408 bzero(&attr, sizeof(attr));
409 attr.map_id = id;
410
411 return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
412}
413
414int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
415{
416 union bpf_attr attr;
417 int err;
418
419 bzero(&attr, sizeof(attr));
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700420 attr.info.bpf_fd = prog_fd;
421 attr.info.info_len = *info_len;
422 attr.info.info = ptr_to_u64(info);
423
424 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
425 if (!err)
426 *info_len = attr.info.info_len;
427
428 return err;
429}
Eric Leblond949abbe2018-01-30 21:55:01 +0100430
Alexei Starovoitova0fe3e52018-03-28 12:05:38 -0700431int bpf_raw_tracepoint_open(const char *name, int prog_fd)
432{
433 union bpf_attr attr;
434
435 bzero(&attr, sizeof(attr));
436 attr.raw_tracepoint.name = ptr_to_u64(name);
437 attr.raw_tracepoint.prog_fd = prog_fd;
438
439 return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
440}
441
Eric Leblond949abbe2018-01-30 21:55:01 +0100442int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
443{
444 struct sockaddr_nl sa;
445 int sock, seq = 0, len, ret = -1;
446 char buf[4096];
447 struct nlattr *nla, *nla_xdp;
448 struct {
449 struct nlmsghdr nh;
450 struct ifinfomsg ifinfo;
451 char attrbuf[64];
452 } req;
453 struct nlmsghdr *nh;
454 struct nlmsgerr *err;
455 socklen_t addrlen;
Eric Leblondbbf48c12018-01-30 21:55:02 +0100456 int one = 1;
Eric Leblond949abbe2018-01-30 21:55:01 +0100457
458 memset(&sa, 0, sizeof(sa));
459 sa.nl_family = AF_NETLINK;
460
461 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
462 if (sock < 0) {
463 return -errno;
464 }
465
Eric Leblondbbf48c12018-01-30 21:55:02 +0100466 if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
467 &one, sizeof(one)) < 0) {
468 fprintf(stderr, "Netlink error reporting not supported\n");
469 }
470
Eric Leblond949abbe2018-01-30 21:55:01 +0100471 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
472 ret = -errno;
473 goto cleanup;
474 }
475
476 addrlen = sizeof(sa);
477 if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
478 ret = -errno;
479 goto cleanup;
480 }
481
482 if (addrlen != sizeof(sa)) {
483 ret = -LIBBPF_ERRNO__INTERNAL;
484 goto cleanup;
485 }
486
487 memset(&req, 0, sizeof(req));
488 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
489 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
490 req.nh.nlmsg_type = RTM_SETLINK;
491 req.nh.nlmsg_pid = 0;
492 req.nh.nlmsg_seq = ++seq;
493 req.ifinfo.ifi_family = AF_UNSPEC;
494 req.ifinfo.ifi_index = ifindex;
495
496 /* started nested attribute for XDP */
497 nla = (struct nlattr *)(((char *)&req)
498 + NLMSG_ALIGN(req.nh.nlmsg_len));
499 nla->nla_type = NLA_F_NESTED | IFLA_XDP;
500 nla->nla_len = NLA_HDRLEN;
501
502 /* add XDP fd */
503 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
504 nla_xdp->nla_type = IFLA_XDP_FD;
505 nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
506 memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
507 nla->nla_len += nla_xdp->nla_len;
508
509 /* if user passed in any flags, add those too */
510 if (flags) {
511 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
512 nla_xdp->nla_type = IFLA_XDP_FLAGS;
513 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
514 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
515 nla->nla_len += nla_xdp->nla_len;
516 }
517
518 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
519
520 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
521 ret = -errno;
522 goto cleanup;
523 }
524
525 len = recv(sock, buf, sizeof(buf), 0);
526 if (len < 0) {
527 ret = -errno;
528 goto cleanup;
529 }
530
531 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
532 nh = NLMSG_NEXT(nh, len)) {
533 if (nh->nlmsg_pid != sa.nl_pid) {
534 ret = -LIBBPF_ERRNO__WRNGPID;
535 goto cleanup;
536 }
537 if (nh->nlmsg_seq != seq) {
538 ret = -LIBBPF_ERRNO__INVSEQ;
539 goto cleanup;
540 }
541 switch (nh->nlmsg_type) {
542 case NLMSG_ERROR:
543 err = (struct nlmsgerr *)NLMSG_DATA(nh);
544 if (!err->error)
545 continue;
546 ret = err->error;
Eric Leblondbbf48c12018-01-30 21:55:02 +0100547 nla_dump_errormsg(nh);
Eric Leblond949abbe2018-01-30 21:55:01 +0100548 goto cleanup;
549 case NLMSG_DONE:
550 break;
551 default:
552 break;
553 }
554 }
555
556 ret = 0;
557
558cleanup:
559 close(sock);
560 return ret;
561}