| Greg Kroah-Hartman | e2be04c | 2017-11-01 15:09:13 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or | 
|  | 5 | * modify it under the terms of version 2 of the GNU General Public | 
|  | 6 | * License as published by the Free Software Foundation. | 
|  | 7 | */ | 
|  | 8 | #ifndef _UAPI__LINUX_BPF_H__ | 
|  | 9 | #define _UAPI__LINUX_BPF_H__ | 
|  | 10 |  | 
|  | 11 | #include <linux/types.h> | 
| Alexei Starovoitov | c15952d | 2014-10-14 02:08:54 -0700 | [diff] [blame] | 12 | #include <linux/bpf_common.h> | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 13 |  | 
|  | 14 | /* Extended instruction set based on top of classic BPF */ | 
|  | 15 |  | 
|  | 16 | /* instruction classes */ | 
|  | 17 | #define BPF_ALU64	0x07	/* alu mode in double word width */ | 
|  | 18 |  | 
|  | 19 | /* ld/ldx fields */ | 
| Jesper Dangaard Brouer | cb5f733 | 2018-01-17 12:05:36 +0100 | [diff] [blame] | 20 | #define BPF_DW		0x18	/* double word (64-bit) */ | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 21 | #define BPF_XADD	0xc0	/* exclusive add */ | 
|  | 22 |  | 
|  | 23 | /* alu/jmp fields */ | 
|  | 24 | #define BPF_MOV		0xb0	/* mov reg to reg */ | 
|  | 25 | #define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */ | 
|  | 26 |  | 
|  | 27 | /* change endianness of a register */ | 
|  | 28 | #define BPF_END		0xd0	/* flags for endianness conversion: */ | 
|  | 29 | #define BPF_TO_LE	0x00	/* convert to little-endian */ | 
|  | 30 | #define BPF_TO_BE	0x08	/* convert to big-endian */ | 
|  | 31 | #define BPF_FROM_LE	BPF_TO_LE | 
|  | 32 | #define BPF_FROM_BE	BPF_TO_BE | 
|  | 33 |  | 
| Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 34 | /* jmp encodings */ | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 35 | #define BPF_JNE		0x50	/* jump != */ | 
| Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 36 | #define BPF_JLT		0xa0	/* LT is unsigned, '<' */ | 
|  | 37 | #define BPF_JLE		0xb0	/* LE is unsigned, '<=' */ | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 38 | #define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */ | 
|  | 39 | #define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */ | 
| Daniel Borkmann | 92b31a9 | 2017-08-10 01:39:55 +0200 | [diff] [blame] | 40 | #define BPF_JSLT	0xc0	/* SLT is signed, '<' */ | 
|  | 41 | #define BPF_JSLE	0xd0	/* SLE is signed, '<=' */ | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 42 | #define BPF_CALL	0x80	/* function call */ | 
|  | 43 | #define BPF_EXIT	0x90	/* function return */ | 
|  | 44 |  | 
|  | 45 | /* Register numbers */ | 
|  | 46 | enum { | 
|  | 47 | BPF_REG_0 = 0, | 
|  | 48 | BPF_REG_1, | 
|  | 49 | BPF_REG_2, | 
|  | 50 | BPF_REG_3, | 
|  | 51 | BPF_REG_4, | 
|  | 52 | BPF_REG_5, | 
|  | 53 | BPF_REG_6, | 
|  | 54 | BPF_REG_7, | 
|  | 55 | BPF_REG_8, | 
|  | 56 | BPF_REG_9, | 
|  | 57 | BPF_REG_10, | 
|  | 58 | __MAX_BPF_REG, | 
|  | 59 | }; | 
|  | 60 |  | 
|  | 61 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ | 
|  | 62 | #define MAX_BPF_REG	__MAX_BPF_REG | 
|  | 63 |  | 
|  | 64 | struct bpf_insn { | 
|  | 65 | __u8	code;		/* opcode */ | 
|  | 66 | __u8	dst_reg:4;	/* dest register */ | 
|  | 67 | __u8	src_reg:4;	/* source register */ | 
|  | 68 | __s16	off;		/* signed offset */ | 
|  | 69 | __s32	imm;		/* signed immediate constant */ | 
|  | 70 | }; | 
|  | 71 |  | 
| Daniel Mack | b95a5c4 | 2017-01-21 17:26:11 +0100 | [diff] [blame] | 72 | /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ | 
|  | 73 | struct bpf_lpm_trie_key { | 
|  | 74 | __u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */ | 
|  | 75 | __u8	data[0];	/* Arbitrary size */ | 
|  | 76 | }; | 
|  | 77 |  | 
| Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 78 | /* BPF syscall commands, see bpf(2) man-page for details. */ | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 79 | enum bpf_cmd { | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 80 | BPF_MAP_CREATE, | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 81 | BPF_MAP_LOOKUP_ELEM, | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 82 | BPF_MAP_UPDATE_ELEM, | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 83 | BPF_MAP_DELETE_ELEM, | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 84 | BPF_MAP_GET_NEXT_KEY, | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 85 | BPF_PROG_LOAD, | 
| Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 86 | BPF_OBJ_PIN, | 
|  | 87 | BPF_OBJ_GET, | 
| Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 88 | BPF_PROG_ATTACH, | 
|  | 89 | BPF_PROG_DETACH, | 
| Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 90 | BPF_PROG_TEST_RUN, | 
| Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 91 | BPF_PROG_GET_NEXT_ID, | 
|  | 92 | BPF_MAP_GET_NEXT_ID, | 
| Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 93 | BPF_PROG_GET_FD_BY_ID, | 
| Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 94 | BPF_MAP_GET_FD_BY_ID, | 
| Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 95 | BPF_OBJ_GET_INFO_BY_FD, | 
| Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 96 | BPF_PROG_QUERY, | 
| Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 97 | BPF_RAW_TRACEPOINT_OPEN, | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 98 | }; | 
|  | 99 |  | 
|  | 100 | enum bpf_map_type { | 
|  | 101 | BPF_MAP_TYPE_UNSPEC, | 
| Alexei Starovoitov | 0f8e4bd | 2014-11-13 17:36:45 -0800 | [diff] [blame] | 102 | BPF_MAP_TYPE_HASH, | 
| Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 103 | BPF_MAP_TYPE_ARRAY, | 
| Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 104 | BPF_MAP_TYPE_PROG_ARRAY, | 
| Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 105 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, | 
| Alexei Starovoitov | 824bd0c | 2016-02-01 22:39:53 -0800 | [diff] [blame] | 106 | BPF_MAP_TYPE_PERCPU_HASH, | 
| Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 107 | BPF_MAP_TYPE_PERCPU_ARRAY, | 
| Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 108 | BPF_MAP_TYPE_STACK_TRACE, | 
| Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 109 | BPF_MAP_TYPE_CGROUP_ARRAY, | 
| Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 110 | BPF_MAP_TYPE_LRU_HASH, | 
| Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 111 | BPF_MAP_TYPE_LRU_PERCPU_HASH, | 
| Daniel Mack | b95a5c4 | 2017-01-21 17:26:11 +0100 | [diff] [blame] | 112 | BPF_MAP_TYPE_LPM_TRIE, | 
| Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 113 | BPF_MAP_TYPE_ARRAY_OF_MAPS, | 
| Martin KaFai Lau | bcc6b1b | 2017-03-22 10:00:34 -0700 | [diff] [blame] | 114 | BPF_MAP_TYPE_HASH_OF_MAPS, | 
| John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 115 | BPF_MAP_TYPE_DEVMAP, | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 116 | BPF_MAP_TYPE_SOCKMAP, | 
| Jesper Dangaard Brouer | 6710e11 | 2017-10-16 12:19:28 +0200 | [diff] [blame] | 117 | BPF_MAP_TYPE_CPUMAP, | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 118 | }; | 
|  | 119 |  | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 120 | enum bpf_prog_type { | 
|  | 121 | BPF_PROG_TYPE_UNSPEC, | 
| Alexei Starovoitov | ddd872b | 2014-12-01 15:06:34 -0800 | [diff] [blame] | 122 | BPF_PROG_TYPE_SOCKET_FILTER, | 
| Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 123 | BPF_PROG_TYPE_KPROBE, | 
| Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 124 | BPF_PROG_TYPE_SCHED_CLS, | 
| Daniel Borkmann | 94caee8 | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 125 | BPF_PROG_TYPE_SCHED_ACT, | 
| Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 126 | BPF_PROG_TYPE_TRACEPOINT, | 
| Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 127 | BPF_PROG_TYPE_XDP, | 
| Alexei Starovoitov | 0515e59 | 2016-09-01 18:37:22 -0700 | [diff] [blame] | 128 | BPF_PROG_TYPE_PERF_EVENT, | 
| Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 129 | BPF_PROG_TYPE_CGROUP_SKB, | 
| David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 130 | BPF_PROG_TYPE_CGROUP_SOCK, | 
| Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 131 | BPF_PROG_TYPE_LWT_IN, | 
|  | 132 | BPF_PROG_TYPE_LWT_OUT, | 
|  | 133 | BPF_PROG_TYPE_LWT_XMIT, | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 134 | BPF_PROG_TYPE_SOCK_OPS, | 
| John Fastabend | b005fd1 | 2017-08-15 22:31:58 -0700 | [diff] [blame] | 135 | BPF_PROG_TYPE_SK_SKB, | 
| Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 136 | BPF_PROG_TYPE_CGROUP_DEVICE, | 
| John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 137 | BPF_PROG_TYPE_SK_MSG, | 
| Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 138 | BPF_PROG_TYPE_RAW_TRACEPOINT, | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 139 | }; | 
|  | 140 |  | 
| Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 141 | enum bpf_attach_type { | 
|  | 142 | BPF_CGROUP_INET_INGRESS, | 
|  | 143 | BPF_CGROUP_INET_EGRESS, | 
| David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 144 | BPF_CGROUP_INET_SOCK_CREATE, | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 145 | BPF_CGROUP_SOCK_OPS, | 
| John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 146 | BPF_SK_SKB_STREAM_PARSER, | 
|  | 147 | BPF_SK_SKB_STREAM_VERDICT, | 
| Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 148 | BPF_CGROUP_DEVICE, | 
| John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 149 | BPF_SK_MSG_VERDICT, | 
| Daniel Mack | 0e33661 | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 150 | __MAX_BPF_ATTACH_TYPE | 
|  | 151 | }; | 
|  | 152 |  | 
|  | 153 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE | 
|  | 154 |  | 
| Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 155 | /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command | 
|  | 156 | * | 
|  | 157 | * NONE(default): No further bpf programs allowed in the subtree. | 
|  | 158 | * | 
|  | 159 | * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, | 
|  | 160 | * the program in this cgroup yields to sub-cgroup program. | 
|  | 161 | * | 
|  | 162 | * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, | 
|  | 163 | * that cgroup program gets run in addition to the program in this cgroup. | 
|  | 164 | * | 
|  | 165 | * Only one program is allowed to be attached to a cgroup with | 
|  | 166 | * NONE or BPF_F_ALLOW_OVERRIDE flag. | 
|  | 167 | * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will | 
|  | 168 | * release old program and attach the new one. Attach flags has to match. | 
|  | 169 | * | 
|  | 170 | * Multiple programs are allowed to be attached to a cgroup with | 
|  | 171 | * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order | 
|  | 172 | * (those that were attached first, run first) | 
|  | 173 | * The programs of sub-cgroup are executed first, then programs of | 
|  | 174 | * this cgroup and then programs of parent cgroup. | 
|  | 175 | * When children program makes decision (like picking TCP CA or sock bind) | 
|  | 176 | * parent program has a chance to override it. | 
|  | 177 | * | 
|  | 178 | * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. | 
|  | 179 | * A cgroup with NONE doesn't allow any programs in sub-cgroups. | 
|  | 180 | * Ex1: | 
|  | 181 | * cgrp1 (MULTI progs A, B) -> | 
|  | 182 | *    cgrp2 (OVERRIDE prog C) -> | 
|  | 183 | *      cgrp3 (MULTI prog D) -> | 
|  | 184 | *        cgrp4 (OVERRIDE prog E) -> | 
|  | 185 | *          cgrp5 (NONE prog F) | 
|  | 186 | * the event in cgrp5 triggers execution of F,D,A,B in that order. | 
|  | 187 | * if prog F is detached, the execution is E,D,A,B | 
|  | 188 | * if prog F and D are detached, the execution is E,A,B | 
|  | 189 | * if prog F, E and D are detached, the execution is C,A,B | 
|  | 190 | * | 
|  | 191 | * All eligible programs are executed regardless of return code from | 
|  | 192 | * earlier programs. | 
| Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 193 | */ | 
|  | 194 | #define BPF_F_ALLOW_OVERRIDE	(1U << 0) | 
| Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 195 | #define BPF_F_ALLOW_MULTI	(1U << 1) | 
| Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 196 |  | 
| David S. Miller | e07b98d | 2017-05-10 11:38:07 -0700 | [diff] [blame] | 197 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the | 
|  | 198 | * verifier will perform strict alignment checking as if the kernel | 
|  | 199 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, | 
|  | 200 | * and NET_IP_ALIGN defined to 2. | 
|  | 201 | */ | 
|  | 202 | #define BPF_F_STRICT_ALIGNMENT	(1U << 0) | 
|  | 203 |  | 
| Alexei Starovoitov | cc8b0b9 | 2017-12-14 17:55:05 -0800 | [diff] [blame] | 204 | /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ | 
| Daniel Borkmann | f1a66f8 | 2015-03-01 12:31:43 +0100 | [diff] [blame] | 205 | #define BPF_PSEUDO_MAP_FD	1 | 
|  | 206 |  | 
| Alexei Starovoitov | cc8b0b9 | 2017-12-14 17:55:05 -0800 | [diff] [blame] | 207 | /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative | 
|  | 208 | * offset to another bpf function | 
|  | 209 | */ | 
|  | 210 | #define BPF_PSEUDO_CALL		1 | 
|  | 211 |  | 
| Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 212 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 
|  | 213 | #define BPF_ANY		0 /* create new element or update existing */ | 
|  | 214 | #define BPF_NOEXIST	1 /* create new element if it didn't exist */ | 
|  | 215 | #define BPF_EXIST	2 /* update existing element */ | 
|  | 216 |  | 
| Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 217 | /* flags for BPF_MAP_CREATE command */ | 
| Alexei Starovoitov | 6c90598 | 2016-03-07 21:57:15 -0800 | [diff] [blame] | 218 | #define BPF_F_NO_PREALLOC	(1U << 0) | 
| Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 219 | /* Instead of having one common LRU list in the | 
| Martin KaFai Lau | 8f84493 | 2016-11-11 10:55:10 -0800 | [diff] [blame] | 220 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list | 
| Martin KaFai Lau | 29ba732 | 2016-11-11 10:55:09 -0800 | [diff] [blame] | 221 | * which can scale and perform better. | 
|  | 222 | * Note, the LRU nodes (including free nodes) cannot be moved | 
|  | 223 | * across different LRU lists. | 
|  | 224 | */ | 
|  | 225 | #define BPF_F_NO_COMMON_LRU	(1U << 1) | 
| Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 226 | /* Specify numa node during map creation */ | 
|  | 227 | #define BPF_F_NUMA_NODE		(1U << 2) | 
| Alexei Starovoitov | 6c90598 | 2016-03-07 21:57:15 -0800 | [diff] [blame] | 228 |  | 
| Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 229 | /* flags for BPF_PROG_QUERY */ | 
|  | 230 | #define BPF_F_QUERY_EFFECTIVE	(1U << 0) | 
|  | 231 |  | 
| Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 232 | #define BPF_OBJ_NAME_LEN 16U | 
|  | 233 |  | 
| Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 234 | /* Flags for accessing BPF object */ | 
|  | 235 | #define BPF_F_RDONLY		(1U << 3) | 
|  | 236 | #define BPF_F_WRONLY		(1U << 4) | 
|  | 237 |  | 
| Song Liu | 615755a | 2018-03-14 10:23:21 -0700 | [diff] [blame] | 238 | /* Flag for stack_map, store build_id+offset instead of pointer */ | 
|  | 239 | #define BPF_F_STACK_BUILD_ID	(1U << 5) | 
|  | 240 |  | 
|  | 241 | enum bpf_stack_build_id_status { | 
|  | 242 | /* user space need an empty entry to identify end of a trace */ | 
|  | 243 | BPF_STACK_BUILD_ID_EMPTY = 0, | 
|  | 244 | /* with valid build_id and offset */ | 
|  | 245 | BPF_STACK_BUILD_ID_VALID = 1, | 
|  | 246 | /* couldn't get build_id, fallback to ip */ | 
|  | 247 | BPF_STACK_BUILD_ID_IP = 2, | 
|  | 248 | }; | 
|  | 249 |  | 
|  | 250 | #define BPF_BUILD_ID_SIZE 20 | 
|  | 251 | struct bpf_stack_build_id { | 
|  | 252 | __s32		status; | 
|  | 253 | unsigned char	build_id[BPF_BUILD_ID_SIZE]; | 
|  | 254 | union { | 
|  | 255 | __u64	offset; | 
|  | 256 | __u64	ip; | 
|  | 257 | }; | 
|  | 258 | }; | 
|  | 259 |  | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 260 | union bpf_attr { | 
|  | 261 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ | 
|  | 262 | __u32	map_type;	/* one of enum bpf_map_type */ | 
|  | 263 | __u32	key_size;	/* size of key in bytes */ | 
|  | 264 | __u32	value_size;	/* size of value in bytes */ | 
|  | 265 | __u32	max_entries;	/* max number of entries in a map */ | 
| Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 266 | __u32	map_flags;	/* BPF_MAP_CREATE related | 
|  | 267 | * flags defined above. | 
|  | 268 | */ | 
| Martin KaFai Lau | 56f668d | 2017-03-22 10:00:33 -0700 | [diff] [blame] | 269 | __u32	inner_map_fd;	/* fd pointing to the inner map */ | 
| Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 270 | __u32	numa_node;	/* numa node (effective only if | 
|  | 271 | * BPF_F_NUMA_NODE is set). | 
|  | 272 | */ | 
| Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 273 | char	map_name[BPF_OBJ_NAME_LEN]; | 
| Jakub Kicinski | a388457 | 2018-01-11 20:29:09 -0800 | [diff] [blame] | 274 | __u32	map_ifindex;	/* ifindex of netdev to create on */ | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 275 | }; | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 276 |  | 
|  | 277 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ | 
|  | 278 | __u32		map_fd; | 
|  | 279 | __aligned_u64	key; | 
|  | 280 | union { | 
|  | 281 | __aligned_u64 value; | 
|  | 282 | __aligned_u64 next_key; | 
|  | 283 | }; | 
| Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 284 | __u64		flags; | 
| Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 285 | }; | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 286 |  | 
|  | 287 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ | 
|  | 288 | __u32		prog_type;	/* one of enum bpf_prog_type */ | 
|  | 289 | __u32		insn_cnt; | 
|  | 290 | __aligned_u64	insns; | 
|  | 291 | __aligned_u64	license; | 
| Alexei Starovoitov | cbd3570 | 2014-09-26 00:17:03 -0700 | [diff] [blame] | 292 | __u32		log_level;	/* verbosity level of verifier */ | 
|  | 293 | __u32		log_size;	/* size of user buffer */ | 
|  | 294 | __aligned_u64	log_buf;	/* user supplied buffer */ | 
| Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 295 | __u32		kern_version;	/* checked when prog_type=kprobe */ | 
| David S. Miller | e07b98d | 2017-05-10 11:38:07 -0700 | [diff] [blame] | 296 | __u32		prog_flags; | 
| Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 297 | char		prog_name[BPF_OBJ_NAME_LEN]; | 
| Jakub Kicinski | 1f6f4cb | 2017-11-20 15:21:53 -0800 | [diff] [blame] | 298 | __u32		prog_ifindex;	/* ifindex of netdev to prep for */ | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 299 | }; | 
| Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 300 |  | 
|  | 301 | struct { /* anonymous struct used by BPF_OBJ_* commands */ | 
|  | 302 | __aligned_u64	pathname; | 
|  | 303 | __u32		bpf_fd; | 
| Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 304 | __u32		file_flags; | 
| Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 305 | }; | 
| Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 306 |  | 
|  | 307 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ | 
|  | 308 | __u32		target_fd;	/* container object to attach to */ | 
|  | 309 | __u32		attach_bpf_fd;	/* eBPF program to attach */ | 
|  | 310 | __u32		attach_type; | 
| Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 311 | __u32		attach_flags; | 
| Daniel Mack | f432455 | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 312 | }; | 
| Alexei Starovoitov | 1cf1cae | 2017-03-30 21:45:38 -0700 | [diff] [blame] | 313 |  | 
|  | 314 | struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ | 
|  | 315 | __u32		prog_fd; | 
|  | 316 | __u32		retval; | 
|  | 317 | __u32		data_size_in; | 
|  | 318 | __u32		data_size_out; | 
|  | 319 | __aligned_u64	data_in; | 
|  | 320 | __aligned_u64	data_out; | 
|  | 321 | __u32		repeat; | 
|  | 322 | __u32		duration; | 
|  | 323 | } test; | 
| Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 324 |  | 
| Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 325 | struct { /* anonymous struct used by BPF_*_GET_*_ID */ | 
|  | 326 | union { | 
|  | 327 | __u32		start_id; | 
|  | 328 | __u32		prog_id; | 
| Martin KaFai Lau | bd5f5f4e | 2017-06-05 12:15:50 -0700 | [diff] [blame] | 329 | __u32		map_id; | 
| Martin KaFai Lau | b16d9aa | 2017-06-05 12:15:49 -0700 | [diff] [blame] | 330 | }; | 
| Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 331 | __u32		next_id; | 
| Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 332 | __u32		open_flags; | 
| Martin KaFai Lau | 34ad558 | 2017-06-05 12:15:48 -0700 | [diff] [blame] | 333 | }; | 
| Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 334 |  | 
|  | 335 | struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ | 
|  | 336 | __u32		bpf_fd; | 
|  | 337 | __u32		info_len; | 
|  | 338 | __aligned_u64	info; | 
|  | 339 | } info; | 
| Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 340 |  | 
|  | 341 | struct { /* anonymous struct used by BPF_PROG_QUERY command */ | 
|  | 342 | __u32		target_fd;	/* container object to query */ | 
|  | 343 | __u32		attach_type; | 
|  | 344 | __u32		query_flags; | 
|  | 345 | __u32		attach_flags; | 
|  | 346 | __aligned_u64	prog_ids; | 
|  | 347 | __u32		prog_cnt; | 
|  | 348 | } query; | 
| Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 349 |  | 
|  | 350 | struct { | 
|  | 351 | __u64 name; | 
|  | 352 | __u32 prog_fd; | 
|  | 353 | } raw_tracepoint; | 
| Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 354 | } __attribute__((aligned(8))); | 
|  | 355 |  | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 356 | /* BPF helper function descriptions: | 
|  | 357 | * | 
|  | 358 | * void *bpf_map_lookup_elem(&map, &key) | 
|  | 359 | *     Return: Map value or NULL | 
|  | 360 | * | 
|  | 361 | * int bpf_map_update_elem(&map, &key, &value, flags) | 
|  | 362 | *     Return: 0 on success or negative error | 
|  | 363 | * | 
|  | 364 | * int bpf_map_delete_elem(&map, &key) | 
|  | 365 | *     Return: 0 on success or negative error | 
|  | 366 | * | 
|  | 367 | * int bpf_probe_read(void *dst, int size, void *src) | 
|  | 368 | *     Return: 0 on success or negative error | 
|  | 369 | * | 
|  | 370 | * u64 bpf_ktime_get_ns(void) | 
|  | 371 | *     Return: current ktime | 
|  | 372 | * | 
|  | 373 | * int bpf_trace_printk(const char *fmt, int fmt_size, ...) | 
|  | 374 | *     Return: length of buffer written or negative error | 
|  | 375 | * | 
|  | 376 | * u32 bpf_prandom_u32(void) | 
|  | 377 | *     Return: random value | 
|  | 378 | * | 
|  | 379 | * u32 bpf_raw_smp_processor_id(void) | 
|  | 380 | *     Return: SMP processor ID | 
|  | 381 | * | 
|  | 382 | * int bpf_skb_store_bytes(skb, offset, from, len, flags) | 
|  | 383 | *     store bytes into packet | 
|  | 384 | *     @skb: pointer to skb | 
|  | 385 | *     @offset: offset within packet from skb->mac_header | 
|  | 386 | *     @from: pointer where to copy bytes from | 
|  | 387 | *     @len: number of bytes to store into packet | 
|  | 388 | *     @flags: bit 0 - if true, recompute skb->csum | 
|  | 389 | *             other bits - reserved | 
|  | 390 | *     Return: 0 on success or negative error | 
|  | 391 | * | 
|  | 392 | * int bpf_l3_csum_replace(skb, offset, from, to, flags) | 
|  | 393 | *     recompute IP checksum | 
|  | 394 | *     @skb: pointer to skb | 
|  | 395 | *     @offset: offset within packet where IP checksum is located | 
|  | 396 | *     @from: old value of header field | 
|  | 397 | *     @to: new value of header field | 
|  | 398 | *     @flags: bits 0-3 - size of header field | 
|  | 399 | *             other bits - reserved | 
|  | 400 | *     Return: 0 on success or negative error | 
|  | 401 | * | 
|  | 402 | * int bpf_l4_csum_replace(skb, offset, from, to, flags) | 
|  | 403 | *     recompute TCP/UDP checksum | 
|  | 404 | *     @skb: pointer to skb | 
|  | 405 | *     @offset: offset within packet where TCP/UDP checksum is located | 
|  | 406 | *     @from: old value of header field | 
|  | 407 | *     @to: new value of header field | 
|  | 408 | *     @flags: bits 0-3 - size of header field | 
|  | 409 | *             bit 4 - is pseudo header | 
|  | 410 | *             other bits - reserved | 
|  | 411 | *     Return: 0 on success or negative error | 
|  | 412 | * | 
|  | 413 | * int bpf_tail_call(ctx, prog_array_map, index) | 
|  | 414 | *     jump into another BPF program | 
|  | 415 | *     @ctx: context pointer passed to next program | 
|  | 416 | *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 
| Alexei Starovoitov | 90caccd | 2017-10-03 15:37:20 -0700 | [diff] [blame] | 417 | *     @index: 32-bit index inside array that selects specific program to run | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 418 | *     Return: 0 on success or negative error | 
|  | 419 | * | 
|  | 420 | * int bpf_clone_redirect(skb, ifindex, flags) | 
|  | 421 | *     redirect to another netdev | 
|  | 422 | *     @skb: pointer to skb | 
|  | 423 | *     @ifindex: ifindex of the net device | 
|  | 424 | *     @flags: bit 0 - if set, redirect to ingress instead of egress | 
|  | 425 | *             other bits - reserved | 
|  | 426 | *     Return: 0 on success or negative error | 
|  | 427 | * | 
|  | 428 | * u64 bpf_get_current_pid_tgid(void) | 
|  | 429 | *     Return: current->tgid << 32 | current->pid | 
|  | 430 | * | 
|  | 431 | * u64 bpf_get_current_uid_gid(void) | 
|  | 432 | *     Return: current_gid << 32 | current_uid | 
|  | 433 | * | 
|  | 434 | * int bpf_get_current_comm(char *buf, int size_of_buf) | 
|  | 435 | *     stores current->comm into buf | 
|  | 436 | *     Return: 0 on success or negative error | 
|  | 437 | * | 
|  | 438 | * u32 bpf_get_cgroup_classid(skb) | 
|  | 439 | *     retrieve a proc's classid | 
|  | 440 | *     @skb: pointer to skb | 
|  | 441 | *     Return: classid if != 0 | 
|  | 442 | * | 
|  | 443 | * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) | 
|  | 444 | *     Return: 0 on success or negative error | 
|  | 445 | * | 
|  | 446 | * int bpf_skb_vlan_pop(skb) | 
|  | 447 | *     Return: 0 on success or negative error | 
|  | 448 | * | 
|  | 449 | * int bpf_skb_get_tunnel_key(skb, key, size, flags) | 
|  | 450 | * int bpf_skb_set_tunnel_key(skb, key, size, flags) | 
|  | 451 | *     retrieve or populate tunnel metadata | 
|  | 452 | *     @skb: pointer to skb | 
|  | 453 | *     @key: pointer to 'struct bpf_tunnel_key' | 
|  | 454 | *     @size: size of 'struct bpf_tunnel_key' | 
|  | 455 | *     @flags: room for future extensions | 
|  | 456 | *     Return: 0 on success or negative error | 
|  | 457 | * | 
| Teng Qin | b7d3ed5 | 2017-06-02 21:03:54 -0700 | [diff] [blame] | 458 | * u64 bpf_perf_event_read(map, flags) | 
|  | 459 | *     read perf event counter value | 
|  | 460 | *     @map: pointer to perf_event_array map | 
|  | 461 | *     @flags: index of event in the map or bitmask flags | 
|  | 462 | *     Return: value of perf event counter read or error code | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 463 | * | 
|  | 464 | * int bpf_redirect(ifindex, flags) | 
|  | 465 | *     redirect to another netdev | 
|  | 466 | *     @ifindex: ifindex of the net device | 
| John Fastabend | 56ce097 | 2017-08-04 08:24:05 -0700 | [diff] [blame] | 467 | *     @flags: | 
|  | 468 | *	  cls_bpf: | 
|  | 469 | *          bit 0 - if set, redirect to ingress instead of egress | 
|  | 470 | *          other bits - reserved | 
|  | 471 | *	  xdp_bpf: | 
|  | 472 | *	    all bits - reserved | 
|  | 473 | *     Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error | 
|  | 474 | *	       xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error | 
|  | 475 | * int bpf_redirect_map(map, key, flags) | 
| John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 476 | *     redirect to endpoint in map | 
| John Fastabend | 56ce097 | 2017-08-04 08:24:05 -0700 | [diff] [blame] | 477 | *     @map: pointer to dev map | 
| John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 478 | *     @key: index in map to lookup | 
| John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 479 | *     @flags: -- | 
| John Fastabend | 56ce097 | 2017-08-04 08:24:05 -0700 | [diff] [blame] | 480 | *     Return: XDP_REDIRECT on success or XDP_ABORT on error | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 481 | * | 
|  | 482 | * u32 bpf_get_route_realm(skb) | 
|  | 483 | *     retrieve a dst's tclassid | 
|  | 484 | *     @skb: pointer to skb | 
|  | 485 | *     Return: realm if != 0 | 
|  | 486 | * | 
| Teng Qin | b7d3ed5 | 2017-06-02 21:03:54 -0700 | [diff] [blame] | 487 | * int bpf_perf_event_output(ctx, map, flags, data, size) | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 488 | *     output perf raw sample | 
|  | 489 | *     @ctx: struct pt_regs* | 
|  | 490 | *     @map: pointer to perf_event_array map | 
| Teng Qin | b7d3ed5 | 2017-06-02 21:03:54 -0700 | [diff] [blame] | 491 | *     @flags: index of event in the map or bitmask flags | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 492 | *     @data: data on stack to be output as raw data | 
|  | 493 | *     @size: size of data | 
|  | 494 | *     Return: 0 on success or negative error | 
|  | 495 | * | 
|  | 496 | * int bpf_get_stackid(ctx, map, flags) | 
|  | 497 | *     walk user or kernel stack and return id | 
|  | 498 | *     @ctx: struct pt_regs* | 
|  | 499 | *     @map: pointer to stack_trace map | 
|  | 500 | *     @flags: bits 0-7 - numer of stack frames to skip | 
|  | 501 | *             bit 8 - collect user stack instead of kernel | 
|  | 502 | *             bit 9 - compare stacks by hash only | 
|  | 503 | *             bit 10 - if two different stacks hash into the same stackid | 
|  | 504 | *                      discard old | 
|  | 505 | *             other bits - reserved | 
|  | 506 | *     Return: >= 0 stackid on success or negative error | 
|  | 507 | * | 
|  | 508 | * s64 bpf_csum_diff(from, from_size, to, to_size, seed) | 
|  | 509 | *     calculate csum diff | 
|  | 510 | *     @from: raw from buffer | 
|  | 511 | *     @from_size: length of from buffer | 
|  | 512 | *     @to: raw to buffer | 
|  | 513 | *     @to_size: length of to buffer | 
|  | 514 | *     @seed: optional seed | 
|  | 515 | *     Return: csum result or negative error code | 
|  | 516 | * | 
|  | 517 | * int bpf_skb_get_tunnel_opt(skb, opt, size) | 
|  | 518 | *     retrieve tunnel options metadata | 
|  | 519 | *     @skb: pointer to skb | 
|  | 520 | *     @opt: pointer to raw tunnel option data | 
|  | 521 | *     @size: size of @opt | 
|  | 522 | *     Return: option size | 
|  | 523 | * | 
|  | 524 | * int bpf_skb_set_tunnel_opt(skb, opt, size) | 
|  | 525 | *     populate tunnel options metadata | 
|  | 526 | *     @skb: pointer to skb | 
|  | 527 | *     @opt: pointer to raw tunnel option data | 
|  | 528 | *     @size: size of @opt | 
|  | 529 | *     Return: 0 on success or negative error | 
|  | 530 | * | 
|  | 531 | * int bpf_skb_change_proto(skb, proto, flags) | 
|  | 532 | *     Change protocol of the skb. Currently supported is v4 -> v6, | 
|  | 533 | *     v6 -> v4 transitions. The helper will also resize the skb. eBPF | 
|  | 534 | *     program is expected to fill the new headers via skb_store_bytes | 
|  | 535 | *     and lX_csum_replace. | 
|  | 536 | *     @skb: pointer to skb | 
|  | 537 | *     @proto: new skb->protocol type | 
|  | 538 | *     @flags: reserved | 
|  | 539 | *     Return: 0 on success or negative error | 
|  | 540 | * | 
|  | 541 | * int bpf_skb_change_type(skb, type) | 
|  | 542 | *     Change packet type of skb. | 
|  | 543 | *     @skb: pointer to skb | 
|  | 544 | *     @type: new skb->pkt_type type | 
|  | 545 | *     Return: 0 on success or negative error | 
|  | 546 | * | 
|  | 547 | * int bpf_skb_under_cgroup(skb, map, index) | 
|  | 548 | *     Check cgroup2 membership of skb | 
|  | 549 | *     @skb: pointer to skb | 
|  | 550 | *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | 
|  | 551 | *     @index: index of the cgroup in the bpf_map | 
|  | 552 | *     Return: | 
|  | 553 | *       == 0 skb failed the cgroup2 descendant test | 
|  | 554 | *       == 1 skb succeeded the cgroup2 descendant test | 
|  | 555 | *        < 0 error | 
|  | 556 | * | 
|  | 557 | * u32 bpf_get_hash_recalc(skb) | 
|  | 558 | *     Retrieve and possibly recalculate skb->hash. | 
|  | 559 | *     @skb: pointer to skb | 
|  | 560 | *     Return: hash | 
|  | 561 | * | 
|  | 562 | * u64 bpf_get_current_task(void) | 
|  | 563 | *     Returns current task_struct | 
|  | 564 | *     Return: current | 
|  | 565 | * | 
|  | 566 | * int bpf_probe_write_user(void *dst, void *src, int len) | 
|  | 567 | *     safely attempt to write to a location | 
|  | 568 | *     @dst: destination address in userspace | 
|  | 569 | *     @src: source address on stack | 
|  | 570 | *     @len: number of bytes to copy | 
|  | 571 | *     Return: 0 on success or negative error | 
|  | 572 | * | 
|  | 573 | * int bpf_current_task_under_cgroup(map, index) | 
|  | 574 | *     Check cgroup2 membership of current task | 
|  | 575 | *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | 
|  | 576 | *     @index: index of the cgroup in the bpf_map | 
|  | 577 | *     Return: | 
|  | 578 | *       == 0 current failed the cgroup2 descendant test | 
|  | 579 | *       == 1 current succeeded the cgroup2 descendant test | 
|  | 580 | *        < 0 error | 
|  | 581 | * | 
|  | 582 | * int bpf_skb_change_tail(skb, len, flags) | 
|  | 583 | *     The helper will resize the skb to the given new size, to be used f.e. | 
|  | 584 | *     with control messages. | 
|  | 585 | *     @skb: pointer to skb | 
|  | 586 | *     @len: new skb length | 
|  | 587 | *     @flags: reserved | 
|  | 588 | *     Return: 0 on success or negative error | 
|  | 589 | * | 
|  | 590 | * int bpf_skb_pull_data(skb, len) | 
|  | 591 | *     The helper will pull in non-linear data in case the skb is non-linear | 
|  | 592 | *     and not all of len are part of the linear section. Only needed for | 
|  | 593 | *     read/write with direct packet access. | 
|  | 594 | *     @skb: pointer to skb | 
|  | 595 | *     @len: len to make read/writeable | 
|  | 596 | *     Return: 0 on success or negative error | 
|  | 597 | * | 
|  | 598 | * s64 bpf_csum_update(skb, csum) | 
|  | 599 | *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE. | 
|  | 600 | *     @skb: pointer to skb | 
|  | 601 | *     @csum: csum to add | 
|  | 602 | *     Return: csum on success or negative error | 
|  | 603 | * | 
|  | 604 | * void bpf_set_hash_invalid(skb) | 
|  | 605 | *     Invalidate current skb->hash. | 
|  | 606 | *     @skb: pointer to skb | 
|  | 607 | * | 
|  | 608 | * int bpf_get_numa_node_id() | 
|  | 609 | *     Return: Id of current NUMA node. | 
| Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 610 | * | 
|  | 611 | * int bpf_skb_change_head() | 
|  | 612 | *     Grows headroom of skb and adjusts MAC header offset accordingly. | 
|  | 613 | *     Will extends/reallocae as required automatically. | 
|  | 614 | *     May change skb data pointer and will thus invalidate any check | 
|  | 615 | *     performed for direct packet access. | 
|  | 616 | *     @skb: pointer to skb | 
|  | 617 | *     @len: length of header to be pushed in front | 
|  | 618 | *     @flags: Flags (unused for now) | 
|  | 619 | *     Return: 0 on success or negative error | 
| Martin KaFai Lau | 17bedab | 2016-12-07 15:53:11 -0800 | [diff] [blame] | 620 | * | 
|  | 621 | * int bpf_xdp_adjust_head(xdp_md, delta) | 
|  | 622 | *     Adjust the xdp_md.data by delta | 
|  | 623 | *     @xdp_md: pointer to xdp_md | 
|  | 624 | *     @delta: An positive/negative integer to be added to xdp_md.data | 
|  | 625 | *     Return: 0 on success or negative on error | 
| Gianluca Borello | a5e8c07 | 2017-01-18 17:55:49 +0000 | [diff] [blame] | 626 | * | 
|  | 627 | * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) | 
|  | 628 | *     Copy a NUL terminated string from unsafe address. In case the string | 
|  | 629 | *     length is smaller than size, the target is not padded with further NUL | 
|  | 630 | *     bytes. In case the string length is larger than size, just count-1 | 
|  | 631 | *     bytes are copied and the last byte is set to NUL. | 
|  | 632 | *     @dst: destination address | 
|  | 633 | *     @size: maximum number of bytes to copy, including the trailing NUL | 
|  | 634 | *     @unsafe_ptr: unsafe address | 
|  | 635 | *     Return: | 
|  | 636 | *       > 0 length of the string including the trailing NUL on success | 
|  | 637 | *       < 0 error | 
| Chenbo Feng | 91b8270 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 638 | * | 
| Alexander Alemayhu | 3c60a53 | 2017-04-08 22:08:10 +0200 | [diff] [blame] | 639 | * u64 bpf_get_socket_cookie(skb) | 
| Chenbo Feng | 91b8270 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 640 | *     Get the cookie for the socket stored inside sk_buff. | 
|  | 641 | *     @skb: pointer to skb | 
|  | 642 | *     Return: 8 Bytes non-decreasing number on success or 0 if the socket | 
|  | 643 | *     field is missing inside sk_buff | 
| Chenbo Feng | 6acc5c2 | 2017-03-22 17:27:35 -0700 | [diff] [blame] | 644 | * | 
|  | 645 | * u32 bpf_get_socket_uid(skb) | 
|  | 646 | *     Get the owner uid of the socket stored inside sk_buff. | 
|  | 647 | *     @skb: pointer to skb | 
| Chenbo Feng | 5d4e344 | 2017-04-26 16:41:23 -0700 | [diff] [blame] | 648 | *     Return: uid of the socket owner on success or overflowuid if failed. | 
| Daniel Borkmann | ded092c | 2017-06-11 00:50:47 +0200 | [diff] [blame] | 649 | * | 
|  | 650 | * u32 bpf_set_hash(skb, hash) | 
|  | 651 | *     Set full skb->hash. | 
|  | 652 | *     @skb: pointer to skb | 
|  | 653 | *     @hash: hash to set | 
| Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 654 | * | 
|  | 655 | * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) | 
|  | 656 | *     Calls setsockopt. Not all opts are available, only those with | 
|  | 657 | *     integer optvals plus TCP_CONGESTION. | 
| Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 658 | *     Supported levels: SOL_SOCKET and IPPROTO_TCP | 
| Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 659 | *     @bpf_socket: pointer to bpf_socket | 
| Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 660 | *     @level: SOL_SOCKET or IPPROTO_TCP | 
| Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 661 | *     @optname: option name | 
|  | 662 | *     @optval: pointer to option value | 
| Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 663 | *     @optlen: length of optval in bytes | 
|  | 664 | *     Return: 0 or negative error | 
|  | 665 | * | 
|  | 666 | * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen) | 
|  | 667 | *     Calls getsockopt. Not all opts are available. | 
|  | 668 | *     Supported levels: IPPROTO_TCP | 
|  | 669 | *     @bpf_socket: pointer to bpf_socket | 
|  | 670 | *     @level: IPPROTO_TCP | 
|  | 671 | *     @optname: option name | 
|  | 672 | *     @optval: pointer to option value | 
|  | 673 | *     @optlen: length of optval in bytes | 
| Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 674 | *     Return: 0 or negative error | 
| Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 675 | * | 
| Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 676 | * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags) | 
|  | 677 | *     Set callback flags for sock_ops | 
|  | 678 | *     @bpf_sock_ops: pointer to bpf_sock_ops_kern struct | 
|  | 679 | *     @flags: flags value | 
|  | 680 | *     Return: 0 for no error | 
|  | 681 | *             -EINVAL if there is no full tcp socket | 
|  | 682 | *             bits in flags that are not supported by current kernel | 
|  | 683 | * | 
| Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 684 | * int bpf_skb_adjust_room(skb, len_diff, mode, flags) | 
|  | 685 | *     Grow or shrink room in sk_buff. | 
|  | 686 | *     @skb: pointer to skb | 
|  | 687 | *     @len_diff: (signed) amount of room to grow/shrink | 
|  | 688 | *     @mode: operation mode (enum bpf_adj_room_mode) | 
|  | 689 | *     @flags: reserved for future use | 
|  | 690 | *     Return: 0 on success or negative error code | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 691 | * | 
|  | 692 | * int bpf_sk_redirect_map(map, key, flags) | 
|  | 693 | *     Redirect skb to a sock in map using key as a lookup key for the | 
|  | 694 | *     sock in map. | 
|  | 695 | *     @map: pointer to sockmap | 
|  | 696 | *     @key: key to lookup sock in map | 
|  | 697 | *     @flags: reserved for future use | 
| John Fastabend | bfa64075 | 2017-10-27 09:45:53 -0700 | [diff] [blame] | 698 | *     Return: SK_PASS | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 699 | * | 
| John Fastabend | 464bc0f | 2017-08-28 07:10:04 -0700 | [diff] [blame] | 700 | * int bpf_sock_map_update(skops, map, key, flags) | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 701 | *	@skops: pointer to bpf_sock_ops | 
|  | 702 | *	@map: pointer to sockmap to update | 
|  | 703 | *	@key: key to insert/update sock in map | 
|  | 704 | *	@flags: same flags as map update elem | 
| Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 705 | * | 
|  | 706 | * int bpf_xdp_adjust_meta(xdp_md, delta) | 
|  | 707 | *     Adjust the xdp_md.data_meta by delta | 
|  | 708 | *     @xdp_md: pointer to xdp_md | 
|  | 709 | *     @delta: An positive/negative integer to be added to xdp_md.data_meta | 
|  | 710 | *     Return: 0 on success or negative on error | 
| Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 711 | * | 
|  | 712 | * int bpf_perf_event_read_value(map, flags, buf, buf_size) | 
|  | 713 | *     read perf event counter value and perf event enabled/running time | 
|  | 714 | *     @map: pointer to perf_event_array map | 
|  | 715 | *     @flags: index of event in the map or bitmask flags | 
|  | 716 | *     @buf: buf to fill | 
|  | 717 | *     @buf_size: size of the buf | 
|  | 718 | *     Return: 0 on success or negative error code | 
| Yonghong Song | 4bebdc7 | 2017-10-05 09:19:22 -0700 | [diff] [blame] | 719 | * | 
|  | 720 | * int bpf_perf_prog_read_value(ctx, buf, buf_size) | 
|  | 721 | *     read perf prog attached perf event counter and enabled/running time | 
|  | 722 | *     @ctx: pointer to ctx | 
|  | 723 | *     @buf: buf to fill | 
|  | 724 | *     @buf_size: size of the buf | 
|  | 725 | *     Return : 0 on success or negative error code | 
| Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 726 | * | 
|  | 727 | * int bpf_override_return(pt_regs, rc) | 
|  | 728 | *	@pt_regs: pointer to struct pt_regs | 
|  | 729 | *	@rc: the return value to set | 
| John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 730 | * | 
|  | 731 | * int bpf_msg_redirect_map(map, key, flags) | 
|  | 732 | *     Redirect msg to a sock in map using key as a lookup key for the | 
|  | 733 | *     sock in map. | 
|  | 734 | *     @map: pointer to sockmap | 
|  | 735 | *     @key: key to lookup sock in map | 
|  | 736 | *     @flags: reserved for future use | 
|  | 737 | *     Return: SK_PASS | 
|  | 738 | * | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 739 | */ | 
|  | 740 | #define __BPF_FUNC_MAPPER(FN)		\ | 
|  | 741 | FN(unspec),			\ | 
|  | 742 | FN(map_lookup_elem),		\ | 
|  | 743 | FN(map_update_elem),		\ | 
|  | 744 | FN(map_delete_elem),		\ | 
|  | 745 | FN(probe_read),			\ | 
|  | 746 | FN(ktime_get_ns),		\ | 
|  | 747 | FN(trace_printk),		\ | 
|  | 748 | FN(get_prandom_u32),		\ | 
|  | 749 | FN(get_smp_processor_id),	\ | 
|  | 750 | FN(skb_store_bytes),		\ | 
|  | 751 | FN(l3_csum_replace),		\ | 
|  | 752 | FN(l4_csum_replace),		\ | 
|  | 753 | FN(tail_call),			\ | 
|  | 754 | FN(clone_redirect),		\ | 
|  | 755 | FN(get_current_pid_tgid),	\ | 
|  | 756 | FN(get_current_uid_gid),	\ | 
|  | 757 | FN(get_current_comm),		\ | 
|  | 758 | FN(get_cgroup_classid),		\ | 
|  | 759 | FN(skb_vlan_push),		\ | 
|  | 760 | FN(skb_vlan_pop),		\ | 
|  | 761 | FN(skb_get_tunnel_key),		\ | 
|  | 762 | FN(skb_set_tunnel_key),		\ | 
|  | 763 | FN(perf_event_read),		\ | 
|  | 764 | FN(redirect),			\ | 
|  | 765 | FN(get_route_realm),		\ | 
|  | 766 | FN(perf_event_output),		\ | 
|  | 767 | FN(skb_load_bytes),		\ | 
|  | 768 | FN(get_stackid),		\ | 
|  | 769 | FN(csum_diff),			\ | 
|  | 770 | FN(skb_get_tunnel_opt),		\ | 
|  | 771 | FN(skb_set_tunnel_opt),		\ | 
|  | 772 | FN(skb_change_proto),		\ | 
|  | 773 | FN(skb_change_type),		\ | 
|  | 774 | FN(skb_under_cgroup),		\ | 
|  | 775 | FN(get_hash_recalc),		\ | 
|  | 776 | FN(get_current_task),		\ | 
|  | 777 | FN(probe_write_user),		\ | 
|  | 778 | FN(current_task_under_cgroup),	\ | 
|  | 779 | FN(skb_change_tail),		\ | 
|  | 780 | FN(skb_pull_data),		\ | 
|  | 781 | FN(csum_update),		\ | 
|  | 782 | FN(set_hash_invalid),		\ | 
| Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 783 | FN(get_numa_node_id),		\ | 
| Martin KaFai Lau | 17bedab | 2016-12-07 15:53:11 -0800 | [diff] [blame] | 784 | FN(skb_change_head),		\ | 
| Gianluca Borello | a5e8c07 | 2017-01-18 17:55:49 +0000 | [diff] [blame] | 785 | FN(xdp_adjust_head),		\ | 
| Chenbo Feng | 91b8270 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 786 | FN(probe_read_str),		\ | 
| Chenbo Feng | 6acc5c2 | 2017-03-22 17:27:35 -0700 | [diff] [blame] | 787 | FN(get_socket_cookie),		\ | 
| Daniel Borkmann | ded092c | 2017-06-11 00:50:47 +0200 | [diff] [blame] | 788 | FN(get_socket_uid),		\ | 
| Lawrence Brakmo | 8c4b4c7 | 2017-06-30 20:02:46 -0700 | [diff] [blame] | 789 | FN(set_hash),			\ | 
| Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 790 | FN(setsockopt),			\ | 
| John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 791 | FN(skb_adjust_room),		\ | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 792 | FN(redirect_map),		\ | 
|  | 793 | FN(sk_redirect_map),		\ | 
|  | 794 | FN(sock_map_update),		\ | 
| Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 795 | FN(xdp_adjust_meta),		\ | 
| Yonghong Song | 4bebdc7 | 2017-10-05 09:19:22 -0700 | [diff] [blame] | 796 | FN(perf_event_read_value),	\ | 
| Lawrence Brakmo | cd86d1f | 2017-10-20 11:05:40 -0700 | [diff] [blame] | 797 | FN(perf_prog_read_value),	\ | 
| Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 798 | FN(getsockopt),			\ | 
| Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 799 | FN(override_return),		\ | 
| John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 800 | FN(sock_ops_cb_flags_set),	\ | 
| John Fastabend | 2a10031 | 2018-03-18 12:57:15 -0700 | [diff] [blame] | 801 | FN(msg_redirect_map),		\ | 
| John Fastabend | 91843d5 | 2018-03-18 12:57:20 -0700 | [diff] [blame] | 802 | FN(msg_apply_bytes),		\ | 
| John Fastabend | 015632b | 2018-03-18 12:57:25 -0700 | [diff] [blame] | 803 | FN(msg_cork_bytes),		\ | 
|  | 804 | FN(msg_pull_data), | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 805 |  | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 806 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 
|  | 807 | * function eBPF program intends to call | 
|  | 808 | */ | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 809 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 810 | enum bpf_func_id { | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 811 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 812 | __BPF_FUNC_MAX_ID, | 
|  | 813 | }; | 
| Thomas Graf | ebb676d | 2016-10-27 11:23:51 +0200 | [diff] [blame] | 814 | #undef __BPF_ENUM_FN | 
| Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 815 |  | 
| Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 816 | /* All flags used by eBPF helper functions, placed here. */ | 
|  | 817 |  | 
|  | 818 | /* BPF_FUNC_skb_store_bytes flags. */ | 
|  | 819 | #define BPF_F_RECOMPUTE_CSUM		(1ULL << 0) | 
| Daniel Borkmann | 8afd54c | 2016-03-04 15:15:03 +0100 | [diff] [blame] | 820 | #define BPF_F_INVALIDATE_HASH		(1ULL << 1) | 
| Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 821 |  | 
|  | 822 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. | 
|  | 823 | * First 4 bits are for passing the header field size. | 
|  | 824 | */ | 
|  | 825 | #define BPF_F_HDR_FIELD_MASK		0xfULL | 
|  | 826 |  | 
|  | 827 | /* BPF_FUNC_l4_csum_replace flags. */ | 
|  | 828 | #define BPF_F_PSEUDO_HDR		(1ULL << 4) | 
| Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 829 | #define BPF_F_MARK_MANGLED_0		(1ULL << 5) | 
| Daniel Borkmann | d1b662a | 2017-01-24 01:06:28 +0100 | [diff] [blame] | 830 | #define BPF_F_MARK_ENFORCE		(1ULL << 6) | 
| Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 831 |  | 
|  | 832 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ | 
|  | 833 | #define BPF_F_INGRESS			(1ULL << 0) | 
|  | 834 |  | 
| Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 835 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ | 
|  | 836 | #define BPF_F_TUNINFO_IPV6		(1ULL << 0) | 
|  | 837 |  | 
| Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 838 | /* BPF_FUNC_get_stackid flags. */ | 
|  | 839 | #define BPF_F_SKIP_FIELD_MASK		0xffULL | 
|  | 840 | #define BPF_F_USER_STACK		(1ULL << 8) | 
|  | 841 | #define BPF_F_FAST_STACK_CMP		(1ULL << 9) | 
|  | 842 | #define BPF_F_REUSE_STACKID		(1ULL << 10) | 
|  | 843 |  | 
| Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 844 | /* BPF_FUNC_skb_set_tunnel_key flags. */ | 
|  | 845 | #define BPF_F_ZERO_CSUM_TX		(1ULL << 1) | 
| Daniel Borkmann | 2208087 | 2016-03-04 15:15:05 +0100 | [diff] [blame] | 846 | #define BPF_F_DONT_FRAGMENT		(1ULL << 2) | 
| William Tu | 77a5196 | 2018-03-01 13:49:57 -0800 | [diff] [blame] | 847 | #define BPF_F_SEQ_NUMBER		(1ULL << 3) | 
| Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 848 |  | 
| Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 849 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and | 
|  | 850 | * BPF_FUNC_perf_event_read_value flags. | 
|  | 851 | */ | 
| Daniel Borkmann | 1e33759 | 2016-04-18 21:01:23 +0200 | [diff] [blame] | 852 | #define BPF_F_INDEX_MASK		0xffffffffULL | 
|  | 853 | #define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK | 
| Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 854 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 
|  | 855 | #define BPF_F_CTXLEN_MASK		(0xfffffULL << 32) | 
| Daniel Borkmann | 1e33759 | 2016-04-18 21:01:23 +0200 | [diff] [blame] | 856 |  | 
| Daniel Borkmann | 2be7e21 | 2017-07-02 02:13:26 +0200 | [diff] [blame] | 857 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ | 
|  | 858 | enum bpf_adj_room_mode { | 
|  | 859 | BPF_ADJ_ROOM_NET, | 
|  | 860 | }; | 
|  | 861 |  | 
| Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 862 | /* user accessible mirror of in-kernel sk_buff. | 
|  | 863 | * new fields can only be added to the end of this structure | 
|  | 864 | */ | 
|  | 865 | struct __sk_buff { | 
|  | 866 | __u32 len; | 
|  | 867 | __u32 pkt_type; | 
|  | 868 | __u32 mark; | 
|  | 869 | __u32 queue_mapping; | 
| Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 870 | __u32 protocol; | 
|  | 871 | __u32 vlan_present; | 
|  | 872 | __u32 vlan_tci; | 
| Michal Sekletar | 27cd545 | 2015-03-24 14:48:41 +0100 | [diff] [blame] | 873 | __u32 vlan_proto; | 
| Daniel Borkmann | bcad571 | 2015-04-03 20:52:24 +0200 | [diff] [blame] | 874 | __u32 priority; | 
| Alexei Starovoitov | 37e82c2 | 2015-05-27 15:30:39 -0700 | [diff] [blame] | 875 | __u32 ingress_ifindex; | 
|  | 876 | __u32 ifindex; | 
| Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 877 | __u32 tc_index; | 
|  | 878 | __u32 cb[5]; | 
| Daniel Borkmann | ba7591d | 2015-08-01 00:46:29 +0200 | [diff] [blame] | 879 | __u32 hash; | 
| Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 880 | __u32 tc_classid; | 
| Alexei Starovoitov | 969bf05 | 2016-05-05 19:49:10 -0700 | [diff] [blame] | 881 | __u32 data; | 
|  | 882 | __u32 data_end; | 
| Daniel Borkmann | b1d9fc4 | 2017-04-19 23:01:17 +0200 | [diff] [blame] | 883 | __u32 napi_id; | 
| John Fastabend | 8a31db5 | 2017-08-15 22:33:09 -0700 | [diff] [blame] | 884 |  | 
| Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 885 | /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ | 
| John Fastabend | 8a31db5 | 2017-08-15 22:33:09 -0700 | [diff] [blame] | 886 | __u32 family; | 
|  | 887 | __u32 remote_ip4;	/* Stored in network byte order */ | 
|  | 888 | __u32 local_ip4;	/* Stored in network byte order */ | 
|  | 889 | __u32 remote_ip6[4];	/* Stored in network byte order */ | 
|  | 890 | __u32 local_ip6[4];	/* Stored in network byte order */ | 
|  | 891 | __u32 remote_port;	/* Stored in network byte order */ | 
|  | 892 | __u32 local_port;	/* stored in host byte order */ | 
| Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 893 | /* ... here. */ | 
|  | 894 |  | 
|  | 895 | __u32 data_meta; | 
| Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 896 | }; | 
|  | 897 |  | 
| Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 898 | struct bpf_tunnel_key { | 
|  | 899 | __u32 tunnel_id; | 
| Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 900 | union { | 
|  | 901 | __u32 remote_ipv4; | 
|  | 902 | __u32 remote_ipv6[4]; | 
|  | 903 | }; | 
|  | 904 | __u8 tunnel_tos; | 
|  | 905 | __u8 tunnel_ttl; | 
| Daniel Borkmann | c0e760c | 2016-03-30 00:02:00 +0200 | [diff] [blame] | 906 | __u16 tunnel_ext; | 
| Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 907 | __u32 tunnel_label; | 
| Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 908 | }; | 
|  | 909 |  | 
| Thomas Graf | 3a0af8f | 2016-11-30 17:10:10 +0100 | [diff] [blame] | 910 | /* Generic BPF return codes which all BPF program types may support. | 
|  | 911 | * The values are binary compatible with their TC_ACT_* counter-part to | 
|  | 912 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT | 
|  | 913 | * programs. | 
|  | 914 | * | 
|  | 915 | * XDP is handled seprately, see XDP_*. | 
|  | 916 | */ | 
|  | 917 | enum bpf_ret_code { | 
|  | 918 | BPF_OK = 0, | 
|  | 919 | /* 1 reserved */ | 
|  | 920 | BPF_DROP = 2, | 
|  | 921 | /* 3-6 reserved */ | 
|  | 922 | BPF_REDIRECT = 7, | 
|  | 923 | /* >127 are reserved for prog type specific return codes */ | 
|  | 924 | }; | 
|  | 925 |  | 
| David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 926 | struct bpf_sock { | 
|  | 927 | __u32 bound_dev_if; | 
| David Ahern | aa4c103 | 2016-12-01 08:48:06 -0800 | [diff] [blame] | 928 | __u32 family; | 
|  | 929 | __u32 type; | 
|  | 930 | __u32 protocol; | 
| David Ahern | 482dca9 | 2017-08-31 15:05:44 -0700 | [diff] [blame] | 931 | __u32 mark; | 
|  | 932 | __u32 priority; | 
| David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 933 | }; | 
|  | 934 |  | 
| Martin KaFai Lau | 17bedab | 2016-12-07 15:53:11 -0800 | [diff] [blame] | 935 | #define XDP_PACKET_HEADROOM 256 | 
|  | 936 |  | 
| Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 937 | /* User return codes for XDP prog type. | 
|  | 938 | * A valid XDP program must return one of these defined values. All other | 
| Daniel Borkmann | 9beb8be | 2017-09-09 01:40:35 +0200 | [diff] [blame] | 939 | * return codes are reserved for future use. Unknown return codes will | 
|  | 940 | * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). | 
| Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 941 | */ | 
|  | 942 | enum xdp_action { | 
|  | 943 | XDP_ABORTED = 0, | 
|  | 944 | XDP_DROP, | 
|  | 945 | XDP_PASS, | 
| Brenden Blanco | 6ce96ca | 2016-07-19 12:16:53 -0700 | [diff] [blame] | 946 | XDP_TX, | 
| John Fastabend | 814abfa | 2017-07-17 09:27:07 -0700 | [diff] [blame] | 947 | XDP_REDIRECT, | 
| Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 948 | }; | 
|  | 949 |  | 
|  | 950 | /* user accessible metadata for XDP packet hook | 
|  | 951 | * new fields must be added to the end of this structure | 
|  | 952 | */ | 
|  | 953 | struct xdp_md { | 
|  | 954 | __u32 data; | 
|  | 955 | __u32 data_end; | 
| Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 956 | __u32 data_meta; | 
| Jesper Dangaard Brouer | daaf24c | 2018-01-11 17:39:09 +0100 | [diff] [blame] | 957 | /* Below access go through struct xdp_rxq_info */ | 
| Jesper Dangaard Brouer | 02dd329 | 2018-01-03 11:26:14 +0100 | [diff] [blame] | 958 | __u32 ingress_ifindex; /* rxq->dev->ifindex */ | 
|  | 959 | __u32 rx_queue_index;  /* rxq->queue_index  */ | 
| Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 960 | }; | 
|  | 961 |  | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 962 | enum sk_action { | 
| John Fastabend | bfa64075 | 2017-10-27 09:45:53 -0700 | [diff] [blame] | 963 | SK_DROP = 0, | 
|  | 964 | SK_PASS, | 
| John Fastabend | 174a79f | 2017-08-15 22:32:47 -0700 | [diff] [blame] | 965 | }; | 
|  | 966 |  | 
| John Fastabend | 4f738ad | 2018-03-18 12:57:10 -0700 | [diff] [blame] | 967 | /* user accessible metadata for SK_MSG packet hook, new fields must | 
|  | 968 | * be added to the end of this structure | 
|  | 969 | */ | 
|  | 970 | struct sk_msg_md { | 
|  | 971 | void *data; | 
|  | 972 | void *data_end; | 
|  | 973 | }; | 
|  | 974 |  | 
| Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 975 | #define BPF_TAG_SIZE	8 | 
|  | 976 |  | 
|  | 977 | struct bpf_prog_info { | 
|  | 978 | __u32 type; | 
|  | 979 | __u32 id; | 
|  | 980 | __u8  tag[BPF_TAG_SIZE]; | 
|  | 981 | __u32 jited_prog_len; | 
|  | 982 | __u32 xlated_prog_len; | 
|  | 983 | __aligned_u64 jited_prog_insns; | 
|  | 984 | __aligned_u64 xlated_prog_insns; | 
| Martin KaFai Lau | cb4d2b3 | 2017-09-27 14:37:52 -0700 | [diff] [blame] | 985 | __u64 load_time;	/* ns since boottime */ | 
|  | 986 | __u32 created_by_uid; | 
|  | 987 | __u32 nr_map_ids; | 
|  | 988 | __aligned_u64 map_ids; | 
| Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 989 | char name[BPF_OBJ_NAME_LEN]; | 
| Jakub Kicinski | 675fc27 | 2017-12-27 18:39:09 -0800 | [diff] [blame] | 990 | __u32 ifindex; | 
|  | 991 | __u64 netns_dev; | 
|  | 992 | __u64 netns_ino; | 
| Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 993 | } __attribute__((aligned(8))); | 
|  | 994 |  | 
|  | 995 | struct bpf_map_info { | 
|  | 996 | __u32 type; | 
|  | 997 | __u32 id; | 
|  | 998 | __u32 key_size; | 
|  | 999 | __u32 value_size; | 
|  | 1000 | __u32 max_entries; | 
|  | 1001 | __u32 map_flags; | 
| Martin KaFai Lau | 067cae4 | 2017-10-05 21:52:12 -0700 | [diff] [blame] | 1002 | char  name[BPF_OBJ_NAME_LEN]; | 
| Jakub Kicinski | 52775b3 | 2018-01-17 19:13:28 -0800 | [diff] [blame] | 1003 | __u32 ifindex; | 
|  | 1004 | __u64 netns_dev; | 
|  | 1005 | __u64 netns_ino; | 
| Martin KaFai Lau | 1e27097 | 2017-06-05 12:15:52 -0700 | [diff] [blame] | 1006 | } __attribute__((aligned(8))); | 
|  | 1007 |  | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1008 | /* User bpf_sock_ops struct to access socket values and specify request ops | 
|  | 1009 | * and their replies. | 
|  | 1010 | * Some of this fields are in network (bigendian) byte order and may need | 
|  | 1011 | * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). | 
|  | 1012 | * New fields can only be added at the end of this structure | 
|  | 1013 | */ | 
|  | 1014 | struct bpf_sock_ops { | 
|  | 1015 | __u32 op; | 
|  | 1016 | union { | 
| Lawrence Brakmo | de525be | 2018-01-25 16:14:09 -0800 | [diff] [blame] | 1017 | __u32 args[4];		/* Optionally passed to bpf program */ | 
|  | 1018 | __u32 reply;		/* Returned by bpf program	    */ | 
|  | 1019 | __u32 replylong[4];	/* Optionally returned by bpf prog  */ | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1020 | }; | 
|  | 1021 | __u32 family; | 
|  | 1022 | __u32 remote_ip4;	/* Stored in network byte order */ | 
|  | 1023 | __u32 local_ip4;	/* Stored in network byte order */ | 
|  | 1024 | __u32 remote_ip6[4];	/* Stored in network byte order */ | 
|  | 1025 | __u32 local_ip6[4];	/* Stored in network byte order */ | 
|  | 1026 | __u32 remote_port;	/* Stored in network byte order */ | 
|  | 1027 | __u32 local_port;	/* stored in host byte order */ | 
| Lawrence Brakmo | f19397a | 2017-12-01 10:15:04 -0800 | [diff] [blame] | 1028 | __u32 is_fullsock;	/* Some TCP fields are only valid if | 
|  | 1029 | * there is a full socket. If not, the | 
|  | 1030 | * fields read as zero. | 
|  | 1031 | */ | 
|  | 1032 | __u32 snd_cwnd; | 
|  | 1033 | __u32 srtt_us;		/* Averaged RTT << 3 in usecs */ | 
| Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 1034 | __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ | 
| Lawrence Brakmo | 44f0e43 | 2018-01-25 16:14:12 -0800 | [diff] [blame] | 1035 | __u32 state; | 
|  | 1036 | __u32 rtt_min; | 
|  | 1037 | __u32 snd_ssthresh; | 
|  | 1038 | __u32 rcv_nxt; | 
|  | 1039 | __u32 snd_nxt; | 
|  | 1040 | __u32 snd_una; | 
|  | 1041 | __u32 mss_cache; | 
|  | 1042 | __u32 ecn_flags; | 
|  | 1043 | __u32 rate_delivered; | 
|  | 1044 | __u32 rate_interval_us; | 
|  | 1045 | __u32 packets_out; | 
|  | 1046 | __u32 retrans_out; | 
|  | 1047 | __u32 total_retrans; | 
|  | 1048 | __u32 segs_in; | 
|  | 1049 | __u32 data_segs_in; | 
|  | 1050 | __u32 segs_out; | 
|  | 1051 | __u32 data_segs_out; | 
|  | 1052 | __u32 lost_out; | 
|  | 1053 | __u32 sacked_out; | 
|  | 1054 | __u32 sk_txhash; | 
|  | 1055 | __u64 bytes_received; | 
|  | 1056 | __u64 bytes_acked; | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1057 | }; | 
|  | 1058 |  | 
| Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 1059 | /* Definitions for bpf_sock_ops_cb_flags */ | 
| Lawrence Brakmo | f89013f | 2018-01-25 16:14:11 -0800 | [diff] [blame] | 1060 | #define BPF_SOCK_OPS_RTO_CB_FLAG	(1<<0) | 
| Lawrence Brakmo | a31ad29 | 2018-01-25 16:14:14 -0800 | [diff] [blame] | 1061 | #define BPF_SOCK_OPS_RETRANS_CB_FLAG	(1<<1) | 
| Lawrence Brakmo | d448749 | 2018-01-25 16:14:15 -0800 | [diff] [blame] | 1062 | #define BPF_SOCK_OPS_STATE_CB_FLAG	(1<<2) | 
|  | 1063 | #define BPF_SOCK_OPS_ALL_CB_FLAGS       0x7		/* Mask of all currently | 
| Lawrence Brakmo | b13d880 | 2018-01-25 16:14:10 -0800 | [diff] [blame] | 1064 | * supported cb flags | 
|  | 1065 | */ | 
|  | 1066 |  | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1067 | /* List of known BPF sock_ops operators. | 
|  | 1068 | * New entries can only be added at the end | 
|  | 1069 | */ | 
|  | 1070 | enum { | 
|  | 1071 | BPF_SOCK_OPS_VOID, | 
| Lawrence Brakmo | 8550f32 | 2017-06-30 20:02:42 -0700 | [diff] [blame] | 1072 | BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or | 
|  | 1073 | * -1 if default value should be used | 
|  | 1074 | */ | 
| Lawrence Brakmo | 13d3b1e | 2017-06-30 20:02:44 -0700 | [diff] [blame] | 1075 | BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized | 
|  | 1076 | * window (in packets) or -1 if default | 
|  | 1077 | * value should be used | 
|  | 1078 | */ | 
| Lawrence Brakmo | 9872a4b | 2017-06-30 20:02:47 -0700 | [diff] [blame] | 1079 | BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an | 
|  | 1080 | * active connection is initialized | 
|  | 1081 | */ | 
|  | 1082 | BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an | 
|  | 1083 | * active connection is | 
|  | 1084 | * established | 
|  | 1085 | */ | 
|  | 1086 | BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a | 
|  | 1087 | * passive connection is | 
|  | 1088 | * established | 
|  | 1089 | */ | 
| Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 1090 | BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control | 
|  | 1091 | * needs ECN | 
|  | 1092 | */ | 
| Lawrence Brakmo | e6546ef | 2017-10-20 11:05:39 -0700 | [diff] [blame] | 1093 | BPF_SOCK_OPS_BASE_RTT,		/* Get base RTT. The correct value is | 
|  | 1094 | * based on the path and may be | 
|  | 1095 | * dependent on the congestion control | 
|  | 1096 | * algorithm. In general it indicates | 
|  | 1097 | * a congestion threshold. RTTs above | 
|  | 1098 | * this indicate congestion | 
|  | 1099 | */ | 
| Lawrence Brakmo | f89013f | 2018-01-25 16:14:11 -0800 | [diff] [blame] | 1100 | BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered. | 
|  | 1101 | * Arg1: value of icsk_retransmits | 
|  | 1102 | * Arg2: value of icsk_rto | 
|  | 1103 | * Arg3: whether RTO has expired | 
|  | 1104 | */ | 
| Lawrence Brakmo | a31ad29 | 2018-01-25 16:14:14 -0800 | [diff] [blame] | 1105 | BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted. | 
|  | 1106 | * Arg1: sequence number of 1st byte | 
|  | 1107 | * Arg2: # segments | 
|  | 1108 | * Arg3: return value of | 
|  | 1109 | *       tcp_transmit_skb (0 => success) | 
|  | 1110 | */ | 
| Lawrence Brakmo | d448749 | 2018-01-25 16:14:15 -0800 | [diff] [blame] | 1111 | BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state. | 
|  | 1112 | * Arg1: old_state | 
|  | 1113 | * Arg2: new_state | 
|  | 1114 | */ | 
|  | 1115 | }; | 
|  | 1116 |  | 
|  | 1117 | /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect | 
|  | 1118 | * changes between the TCP and BPF versions. Ideally this should never happen. | 
|  | 1119 | * If it does, we need to add code to convert them before calling | 
|  | 1120 | * the BPF sock_ops function. | 
|  | 1121 | */ | 
|  | 1122 | enum { | 
|  | 1123 | BPF_TCP_ESTABLISHED = 1, | 
|  | 1124 | BPF_TCP_SYN_SENT, | 
|  | 1125 | BPF_TCP_SYN_RECV, | 
|  | 1126 | BPF_TCP_FIN_WAIT1, | 
|  | 1127 | BPF_TCP_FIN_WAIT2, | 
|  | 1128 | BPF_TCP_TIME_WAIT, | 
|  | 1129 | BPF_TCP_CLOSE, | 
|  | 1130 | BPF_TCP_CLOSE_WAIT, | 
|  | 1131 | BPF_TCP_LAST_ACK, | 
|  | 1132 | BPF_TCP_LISTEN, | 
|  | 1133 | BPF_TCP_CLOSING,	/* Now a valid state */ | 
|  | 1134 | BPF_TCP_NEW_SYN_RECV, | 
|  | 1135 |  | 
|  | 1136 | BPF_TCP_MAX_STATES	/* Leave at the end! */ | 
| Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 1137 | }; | 
|  | 1138 |  | 
| Lawrence Brakmo | fc74781 | 2017-06-30 20:02:51 -0700 | [diff] [blame] | 1139 | #define TCP_BPF_IW		1001	/* Set TCP initial congestion window */ | 
| Lawrence Brakmo | 13bf964 | 2017-06-30 20:02:53 -0700 | [diff] [blame] | 1140 | #define TCP_BPF_SNDCWND_CLAMP	1002	/* Set sndcwnd_clamp */ | 
| Lawrence Brakmo | fc74781 | 2017-06-30 20:02:51 -0700 | [diff] [blame] | 1141 |  | 
| Yonghong Song | 908432c | 2017-10-05 09:19:20 -0700 | [diff] [blame] | 1142 | struct bpf_perf_event_value { | 
|  | 1143 | __u64 counter; | 
|  | 1144 | __u64 enabled; | 
|  | 1145 | __u64 running; | 
|  | 1146 | }; | 
|  | 1147 |  | 
| Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 1148 | #define BPF_DEVCG_ACC_MKNOD	(1ULL << 0) | 
|  | 1149 | #define BPF_DEVCG_ACC_READ	(1ULL << 1) | 
|  | 1150 | #define BPF_DEVCG_ACC_WRITE	(1ULL << 2) | 
|  | 1151 |  | 
|  | 1152 | #define BPF_DEVCG_DEV_BLOCK	(1ULL << 0) | 
|  | 1153 | #define BPF_DEVCG_DEV_CHAR	(1ULL << 1) | 
|  | 1154 |  | 
|  | 1155 | struct bpf_cgroup_dev_ctx { | 
| Yonghong Song | 06ef0cc | 2017-12-18 10:13:44 -0800 | [diff] [blame] | 1156 | /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ | 
|  | 1157 | __u32 access_type; | 
| Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 1158 | __u32 major; | 
|  | 1159 | __u32 minor; | 
|  | 1160 | }; | 
|  | 1161 |  | 
| Alexei Starovoitov | c4f6699 | 2018-03-28 12:05:37 -0700 | [diff] [blame] | 1162 | struct bpf_raw_tracepoint_args { | 
|  | 1163 | __u64 args[0]; | 
|  | 1164 | }; | 
|  | 1165 |  | 
| Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 1166 | #endif /* _UAPI__LINUX_BPF_H__ */ |