blob: 9c88f6e4156d6262273b0dc27fdf88af42d922e7 [file] [log] [blame]
Wang Nane3ed2fe2015-07-01 02:14:03 +00001/*
2 * common eBPF ELF operations.
3 *
4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
Wang Nan203d1ca2016-07-04 11:02:42 +00007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation;
11 * version 2.1 of the License (not later!)
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses>
Wang Nane3ed2fe2015-07-01 02:14:03 +000020 */
21
22#include <stdlib.h>
23#include <memory.h>
24#include <unistd.h>
25#include <asm/unistd.h>
26#include <linux/bpf.h>
27#include "bpf.h"
Eric Leblond949abbe2018-01-30 21:55:01 +010028#include "libbpf.h"
29#include "nlattr.h"
30#include <linux/rtnetlink.h>
31#include <linux/if_link.h>
32#include <sys/socket.h>
33#include <errno.h>
Wang Nane3ed2fe2015-07-01 02:14:03 +000034
Eric Leblondbbf48c12018-01-30 21:55:02 +010035#ifndef SOL_NETLINK
36#define SOL_NETLINK 270
37#endif
38
Wang Nane3ed2fe2015-07-01 02:14:03 +000039/*
Masahiro Yamada03671052017-02-27 14:29:28 -080040 * When building perf, unistd.h is overridden. __NR_bpf is
Wang Nan8f9e05f2016-01-11 13:47:57 +000041 * required to be defined explicitly.
Wang Nane3ed2fe2015-07-01 02:14:03 +000042 */
43#ifndef __NR_bpf
44# if defined(__i386__)
45# define __NR_bpf 357
46# elif defined(__x86_64__)
47# define __NR_bpf 321
48# elif defined(__aarch64__)
49# define __NR_bpf 280
David S. Millerb0c47802017-04-22 12:31:05 -070050# elif defined(__sparc__)
51# define __NR_bpf 349
Daniel Borkmannbad19262017-08-04 14:20:55 +020052# elif defined(__s390__)
53# define __NR_bpf 351
Wang Nane3ed2fe2015-07-01 02:14:03 +000054# else
55# error __NR_bpf not defined. libbpf does not support your arch.
56# endif
57#endif
58
Eric Leblond949abbe2018-01-30 21:55:01 +010059#ifndef min
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070060#define min(x, y) ((x) < (y) ? (x) : (y))
Eric Leblond949abbe2018-01-30 21:55:01 +010061#endif
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070062
Mickaël Salaüncdc6a4b2017-02-11 20:37:08 +010063static inline __u64 ptr_to_u64(const void *ptr)
Wang Nan7bf98362015-07-01 02:14:06 +000064{
65 return (__u64) (unsigned long) ptr;
66}
67
Mickaël Salaüncdc6a4b2017-02-11 20:37:08 +010068static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
69 unsigned int size)
Wang Nane3ed2fe2015-07-01 02:14:03 +000070{
71 return syscall(__NR_bpf, cmd, attr, size);
72}
73
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070074int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
75 int key_size, int value_size, int max_entries,
76 __u32 map_flags, int node)
Wang Nane3ed2fe2015-07-01 02:14:03 +000077{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070078 __u32 name_len = name ? strlen(name) : 0;
Wang Nane3ed2fe2015-07-01 02:14:03 +000079 union bpf_attr attr;
80
81 memset(&attr, '\0', sizeof(attr));
82
83 attr.map_type = map_type;
84 attr.key_size = key_size;
85 attr.value_size = value_size;
86 attr.max_entries = max_entries;
Joe Stringera5580c72016-12-08 18:46:16 -080087 attr.map_flags = map_flags;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070088 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
89
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -070090 if (node >= 0) {
91 attr.map_flags |= BPF_F_NUMA_NODE;
92 attr.numa_node = node;
93 }
Wang Nane3ed2fe2015-07-01 02:14:03 +000094
95 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
96}
Wang Nan7bf98362015-07-01 02:14:06 +000097
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -070098int bpf_create_map(enum bpf_map_type map_type, int key_size,
99 int value_size, int max_entries, __u32 map_flags)
100{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700101 return bpf_create_map_node(map_type, NULL, key_size, value_size,
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700102 max_entries, map_flags, -1);
103}
104
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700105int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
106 int key_size, int value_size, int max_entries,
107 __u32 map_flags)
108{
109 return bpf_create_map_node(map_type, name, key_size, value_size,
110 max_entries, map_flags, -1);
111}
112
113int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
114 int key_size, int inner_map_fd, int max_entries,
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700115 __u32 map_flags, int node)
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700116{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700117 __u32 name_len = name ? strlen(name) : 0;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700118 union bpf_attr attr;
119
120 memset(&attr, '\0', sizeof(attr));
121
122 attr.map_type = map_type;
123 attr.key_size = key_size;
124 attr.value_size = 4;
125 attr.inner_map_fd = inner_map_fd;
126 attr.max_entries = max_entries;
127 attr.map_flags = map_flags;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700128 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
129
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700130 if (node >= 0) {
131 attr.map_flags |= BPF_F_NUMA_NODE;
132 attr.numa_node = node;
133 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700134
135 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
136}
137
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700138int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
139 int key_size, int inner_map_fd, int max_entries,
140 __u32 map_flags)
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700141{
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700142 return bpf_create_map_in_map_node(map_type, name, key_size,
143 inner_map_fd, max_entries, map_flags,
144 -1);
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700145}
146
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700147int bpf_load_program_name(enum bpf_prog_type type, const char *name,
148 const struct bpf_insn *insns,
149 size_t insns_cnt, const char *license,
150 __u32 kern_version, char *log_buf,
151 size_t log_buf_sz)
Wang Nan7bf98362015-07-01 02:14:06 +0000152{
153 int fd;
154 union bpf_attr attr;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700155 __u32 name_len = name ? strlen(name) : 0;
Wang Nan7bf98362015-07-01 02:14:06 +0000156
157 bzero(&attr, sizeof(attr));
158 attr.prog_type = type;
159 attr.insn_cnt = (__u32)insns_cnt;
160 attr.insns = ptr_to_u64(insns);
161 attr.license = ptr_to_u64(license);
162 attr.log_buf = ptr_to_u64(NULL);
163 attr.log_size = 0;
164 attr.log_level = 0;
165 attr.kern_version = kern_version;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700166 memcpy(attr.prog_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
Wang Nan7bf98362015-07-01 02:14:06 +0000167
168 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
169 if (fd >= 0 || !log_buf || !log_buf_sz)
170 return fd;
171
172 /* Try again with log */
173 attr.log_buf = ptr_to_u64(log_buf);
174 attr.log_size = log_buf_sz;
175 attr.log_level = 1;
176 log_buf[0] = 0;
177 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
178}
He Kuang43798bf2015-11-24 13:36:08 +0000179
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700180int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
181 size_t insns_cnt, const char *license,
182 __u32 kern_version, char *log_buf,
183 size_t log_buf_sz)
184{
185 return bpf_load_program_name(type, NULL, insns, insns_cnt, license,
186 kern_version, log_buf, log_buf_sz);
187}
188
David S. Miller91045f52017-05-10 11:42:48 -0700189int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
190 size_t insns_cnt, int strict_alignment,
191 const char *license, __u32 kern_version,
Daniel Borkmannd6554902017-07-21 00:00:22 +0200192 char *log_buf, size_t log_buf_sz, int log_level)
David S. Miller91045f52017-05-10 11:42:48 -0700193{
194 union bpf_attr attr;
195
196 bzero(&attr, sizeof(attr));
197 attr.prog_type = type;
198 attr.insn_cnt = (__u32)insns_cnt;
199 attr.insns = ptr_to_u64(insns);
200 attr.license = ptr_to_u64(license);
201 attr.log_buf = ptr_to_u64(log_buf);
202 attr.log_size = log_buf_sz;
Daniel Borkmannd6554902017-07-21 00:00:22 +0200203 attr.log_level = log_level;
David S. Miller91045f52017-05-10 11:42:48 -0700204 log_buf[0] = 0;
205 attr.kern_version = kern_version;
206 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
207
208 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
209}
210
Mickaël Salaün10ecc722017-02-10 00:21:39 +0100211int bpf_map_update_elem(int fd, const void *key, const void *value,
Joe Stringer83d994d2016-12-08 18:46:15 -0800212 __u64 flags)
He Kuang43798bf2015-11-24 13:36:08 +0000213{
214 union bpf_attr attr;
215
216 bzero(&attr, sizeof(attr));
217 attr.map_fd = fd;
218 attr.key = ptr_to_u64(key);
219 attr.value = ptr_to_u64(value);
220 attr.flags = flags;
221
222 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
223}
Wang Nan9742da02016-11-26 07:03:25 +0000224
Mickaël Salaüne5ff7c42017-02-10 00:21:40 +0100225int bpf_map_lookup_elem(int fd, const void *key, void *value)
Wang Nan9742da02016-11-26 07:03:25 +0000226{
227 union bpf_attr attr;
228
229 bzero(&attr, sizeof(attr));
230 attr.map_fd = fd;
231 attr.key = ptr_to_u64(key);
232 attr.value = ptr_to_u64(value);
233
234 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
235}
236
Mickaël Salaüne58383b2017-02-10 00:21:41 +0100237int bpf_map_delete_elem(int fd, const void *key)
Wang Nan9742da02016-11-26 07:03:25 +0000238{
239 union bpf_attr attr;
240
241 bzero(&attr, sizeof(attr));
242 attr.map_fd = fd;
243 attr.key = ptr_to_u64(key);
244
245 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
246}
247
Mickaël Salaün5f155c22017-02-10 00:21:42 +0100248int bpf_map_get_next_key(int fd, const void *key, void *next_key)
Wang Nan9742da02016-11-26 07:03:25 +0000249{
250 union bpf_attr attr;
251
252 bzero(&attr, sizeof(attr));
253 attr.map_fd = fd;
254 attr.key = ptr_to_u64(key);
255 attr.next_key = ptr_to_u64(next_key);
256
257 return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
258}
259
260int bpf_obj_pin(int fd, const char *pathname)
261{
262 union bpf_attr attr;
263
264 bzero(&attr, sizeof(attr));
265 attr.pathname = ptr_to_u64((void *)pathname);
266 attr.bpf_fd = fd;
267
268 return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
269}
270
271int bpf_obj_get(const char *pathname)
272{
273 union bpf_attr attr;
274
275 bzero(&attr, sizeof(attr));
276 attr.pathname = ptr_to_u64((void *)pathname);
277
278 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
279}
Joe Stringer5dc880d2016-12-14 14:05:26 -0800280
John Fastabend464bc0f2017-08-28 07:10:04 -0700281int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
282 unsigned int flags)
Joe Stringer5dc880d2016-12-14 14:05:26 -0800283{
284 union bpf_attr attr;
285
286 bzero(&attr, sizeof(attr));
287 attr.target_fd = target_fd;
John Fastabend464bc0f2017-08-28 07:10:04 -0700288 attr.attach_bpf_fd = prog_fd;
Joe Stringer5dc880d2016-12-14 14:05:26 -0800289 attr.attach_type = type;
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800290 attr.attach_flags = flags;
Joe Stringer5dc880d2016-12-14 14:05:26 -0800291
292 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
293}
294
295int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
296{
297 union bpf_attr attr;
298
299 bzero(&attr, sizeof(attr));
300 attr.target_fd = target_fd;
301 attr.attach_type = type;
302
303 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
304}
Alexei Starovoitov30848872017-03-30 21:45:39 -0700305
Alexei Starovoitov244d20e2017-10-02 22:50:24 -0700306int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
307{
308 union bpf_attr attr;
309
310 bzero(&attr, sizeof(attr));
311 attr.target_fd = target_fd;
312 attr.attach_bpf_fd = prog_fd;
313 attr.attach_type = type;
314
315 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
316}
317
Alexei Starovoitov5d0cbf92017-10-02 22:50:27 -0700318int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
319 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
320{
321 union bpf_attr attr;
322 int ret;
323
324 bzero(&attr, sizeof(attr));
325 attr.query.target_fd = target_fd;
326 attr.query.attach_type = type;
327 attr.query.query_flags = query_flags;
328 attr.query.prog_cnt = *prog_cnt;
329 attr.query.prog_ids = ptr_to_u64(prog_ids);
330
331 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
332 if (attach_flags)
333 *attach_flags = attr.query.attach_flags;
334 *prog_cnt = attr.query.prog_cnt;
335 return ret;
336}
337
Alexei Starovoitov30848872017-03-30 21:45:39 -0700338int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
339 void *data_out, __u32 *size_out, __u32 *retval,
340 __u32 *duration)
341{
342 union bpf_attr attr;
343 int ret;
344
345 bzero(&attr, sizeof(attr));
346 attr.test.prog_fd = prog_fd;
347 attr.test.data_in = ptr_to_u64(data);
348 attr.test.data_out = ptr_to_u64(data_out);
349 attr.test.data_size_in = size;
350 attr.test.repeat = repeat;
351
352 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
353 if (size_out)
354 *size_out = attr.test.data_size_out;
355 if (retval)
356 *retval = attr.test.retval;
357 if (duration)
358 *duration = attr.test.duration;
359 return ret;
360}
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700361
362int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
363{
364 union bpf_attr attr;
365 int err;
366
367 bzero(&attr, sizeof(attr));
368 attr.start_id = start_id;
369
370 err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
371 if (!err)
372 *next_id = attr.next_id;
373
374 return err;
375}
376
377int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
378{
379 union bpf_attr attr;
380 int err;
381
382 bzero(&attr, sizeof(attr));
383 attr.start_id = start_id;
384
385 err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
386 if (!err)
387 *next_id = attr.next_id;
388
389 return err;
390}
391
392int bpf_prog_get_fd_by_id(__u32 id)
393{
394 union bpf_attr attr;
395
396 bzero(&attr, sizeof(attr));
397 attr.prog_id = id;
398
399 return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
400}
401
402int bpf_map_get_fd_by_id(__u32 id)
403{
404 union bpf_attr attr;
405
406 bzero(&attr, sizeof(attr));
407 attr.map_id = id;
408
409 return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
410}
411
412int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
413{
414 union bpf_attr attr;
415 int err;
416
417 bzero(&attr, sizeof(attr));
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700418 attr.info.bpf_fd = prog_fd;
419 attr.info.info_len = *info_len;
420 attr.info.info = ptr_to_u64(info);
421
422 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
423 if (!err)
424 *info_len = attr.info.info_len;
425
426 return err;
427}
Eric Leblond949abbe2018-01-30 21:55:01 +0100428
429int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
430{
431 struct sockaddr_nl sa;
432 int sock, seq = 0, len, ret = -1;
433 char buf[4096];
434 struct nlattr *nla, *nla_xdp;
435 struct {
436 struct nlmsghdr nh;
437 struct ifinfomsg ifinfo;
438 char attrbuf[64];
439 } req;
440 struct nlmsghdr *nh;
441 struct nlmsgerr *err;
442 socklen_t addrlen;
Eric Leblondbbf48c12018-01-30 21:55:02 +0100443 int one = 1;
Eric Leblond949abbe2018-01-30 21:55:01 +0100444
445 memset(&sa, 0, sizeof(sa));
446 sa.nl_family = AF_NETLINK;
447
448 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
449 if (sock < 0) {
450 return -errno;
451 }
452
Eric Leblondbbf48c12018-01-30 21:55:02 +0100453 if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
454 &one, sizeof(one)) < 0) {
455 fprintf(stderr, "Netlink error reporting not supported\n");
456 }
457
Eric Leblond949abbe2018-01-30 21:55:01 +0100458 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
459 ret = -errno;
460 goto cleanup;
461 }
462
463 addrlen = sizeof(sa);
464 if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
465 ret = -errno;
466 goto cleanup;
467 }
468
469 if (addrlen != sizeof(sa)) {
470 ret = -LIBBPF_ERRNO__INTERNAL;
471 goto cleanup;
472 }
473
474 memset(&req, 0, sizeof(req));
475 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
476 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
477 req.nh.nlmsg_type = RTM_SETLINK;
478 req.nh.nlmsg_pid = 0;
479 req.nh.nlmsg_seq = ++seq;
480 req.ifinfo.ifi_family = AF_UNSPEC;
481 req.ifinfo.ifi_index = ifindex;
482
483 /* started nested attribute for XDP */
484 nla = (struct nlattr *)(((char *)&req)
485 + NLMSG_ALIGN(req.nh.nlmsg_len));
486 nla->nla_type = NLA_F_NESTED | IFLA_XDP;
487 nla->nla_len = NLA_HDRLEN;
488
489 /* add XDP fd */
490 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
491 nla_xdp->nla_type = IFLA_XDP_FD;
492 nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
493 memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
494 nla->nla_len += nla_xdp->nla_len;
495
496 /* if user passed in any flags, add those too */
497 if (flags) {
498 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
499 nla_xdp->nla_type = IFLA_XDP_FLAGS;
500 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
501 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
502 nla->nla_len += nla_xdp->nla_len;
503 }
504
505 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
506
507 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
508 ret = -errno;
509 goto cleanup;
510 }
511
512 len = recv(sock, buf, sizeof(buf), 0);
513 if (len < 0) {
514 ret = -errno;
515 goto cleanup;
516 }
517
518 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
519 nh = NLMSG_NEXT(nh, len)) {
520 if (nh->nlmsg_pid != sa.nl_pid) {
521 ret = -LIBBPF_ERRNO__WRNGPID;
522 goto cleanup;
523 }
524 if (nh->nlmsg_seq != seq) {
525 ret = -LIBBPF_ERRNO__INVSEQ;
526 goto cleanup;
527 }
528 switch (nh->nlmsg_type) {
529 case NLMSG_ERROR:
530 err = (struct nlmsgerr *)NLMSG_DATA(nh);
531 if (!err->error)
532 continue;
533 ret = err->error;
Eric Leblondbbf48c12018-01-30 21:55:02 +0100534 nla_dump_errormsg(nh);
Eric Leblond949abbe2018-01-30 21:55:01 +0100535 goto cleanup;
536 case NLMSG_DONE:
537 break;
538 default:
539 break;
540 }
541 }
542
543 ret = 0;
544
545cleanup:
546 close(sock);
547 return ret;
548}