Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | */ |
| 7 | #ifndef _UAPI__LINUX_BPF_H__ |
| 8 | #define _UAPI__LINUX_BPF_H__ |
| 9 | |
| 10 | #include <linux/types.h> |
Alexei Starovoitov | c15952d | 2014-10-14 02:08:54 -0700 | [diff] [blame] | 11 | #include <linux/bpf_common.h> |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 12 | |
| 13 | /* Extended instruction set based on top of classic BPF */ |
| 14 | |
| 15 | /* instruction classes */ |
| 16 | #define BPF_ALU64 0x07 /* alu mode in double word width */ |
| 17 | |
| 18 | /* ld/ldx fields */ |
| 19 | #define BPF_DW 0x18 /* double word */ |
| 20 | #define BPF_XADD 0xc0 /* exclusive add */ |
| 21 | |
| 22 | /* alu/jmp fields */ |
| 23 | #define BPF_MOV 0xb0 /* mov reg to reg */ |
| 24 | #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ |
| 25 | |
| 26 | /* change endianness of a register */ |
| 27 | #define BPF_END 0xd0 /* flags for endianness conversion: */ |
| 28 | #define BPF_TO_LE 0x00 /* convert to little-endian */ |
| 29 | #define BPF_TO_BE 0x08 /* convert to big-endian */ |
| 30 | #define BPF_FROM_LE BPF_TO_LE |
| 31 | #define BPF_FROM_BE BPF_TO_BE |
| 32 | |
| 33 | #define BPF_JNE 0x50 /* jump != */ |
| 34 | #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ |
| 35 | #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ |
| 36 | #define BPF_CALL 0x80 /* function call */ |
| 37 | #define BPF_EXIT 0x90 /* function return */ |
| 38 | |
| 39 | /* Register numbers */ |
| 40 | enum { |
| 41 | BPF_REG_0 = 0, |
| 42 | BPF_REG_1, |
| 43 | BPF_REG_2, |
| 44 | BPF_REG_3, |
| 45 | BPF_REG_4, |
| 46 | BPF_REG_5, |
| 47 | BPF_REG_6, |
| 48 | BPF_REG_7, |
| 49 | BPF_REG_8, |
| 50 | BPF_REG_9, |
| 51 | BPF_REG_10, |
| 52 | __MAX_BPF_REG, |
| 53 | }; |
| 54 | |
| 55 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ |
| 56 | #define MAX_BPF_REG __MAX_BPF_REG |
| 57 | |
| 58 | struct bpf_insn { |
| 59 | __u8 code; /* opcode */ |
| 60 | __u8 dst_reg:4; /* dest register */ |
| 61 | __u8 src_reg:4; /* source register */ |
| 62 | __s16 off; /* signed offset */ |
| 63 | __s32 imm; /* signed immediate constant */ |
| 64 | }; |
| 65 | |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 66 | /* BPF syscall commands, see bpf(2) man-page for details. */ |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 67 | enum bpf_cmd { |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 68 | BPF_MAP_CREATE, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 69 | BPF_MAP_LOOKUP_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 70 | BPF_MAP_UPDATE_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 71 | BPF_MAP_DELETE_ELEM, |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 72 | BPF_MAP_GET_NEXT_KEY, |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 73 | BPF_PROG_LOAD, |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 74 | BPF_OBJ_PIN, |
| 75 | BPF_OBJ_GET, |
Daniel Mack | 00615df | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 76 | BPF_PROG_ATTACH, |
| 77 | BPF_PROG_DETACH, |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 78 | }; |
| 79 | |
| 80 | enum bpf_map_type { |
| 81 | BPF_MAP_TYPE_UNSPEC, |
Alexei Starovoitov | 0f8e4bd | 2014-11-13 17:36:45 -0800 | [diff] [blame] | 82 | BPF_MAP_TYPE_HASH, |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 83 | BPF_MAP_TYPE_ARRAY, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 84 | BPF_MAP_TYPE_PROG_ARRAY, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 85 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
Alexei Starovoitov | 824bd0c | 2016-02-01 22:39:53 -0800 | [diff] [blame] | 86 | BPF_MAP_TYPE_PERCPU_HASH, |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 87 | BPF_MAP_TYPE_PERCPU_ARRAY, |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 88 | BPF_MAP_TYPE_STACK_TRACE, |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 89 | BPF_MAP_TYPE_CGROUP_ARRAY, |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 90 | }; |
| 91 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 92 | enum bpf_prog_type { |
| 93 | BPF_PROG_TYPE_UNSPEC, |
Alexei Starovoitov | ddd872b | 2014-12-01 15:06:34 -0800 | [diff] [blame] | 94 | BPF_PROG_TYPE_SOCKET_FILTER, |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 95 | BPF_PROG_TYPE_KPROBE, |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 96 | BPF_PROG_TYPE_SCHED_CLS, |
Daniel Borkmann | 94caee8 | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 97 | BPF_PROG_TYPE_SCHED_ACT, |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 98 | BPF_PROG_TYPE_TRACEPOINT, |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 99 | BPF_PROG_TYPE_XDP, |
Alexei Starovoitov | 0515e59 | 2016-09-01 18:37:22 -0700 | [diff] [blame] | 100 | BPF_PROG_TYPE_PERF_EVENT, |
Daniel Mack | 760b55c | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 101 | BPF_PROG_TYPE_CGROUP_SKB, |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 102 | }; |
| 103 | |
Daniel Mack | 760b55c | 2016-11-23 16:52:25 +0100 | [diff] [blame] | 104 | enum bpf_attach_type { |
| 105 | BPF_CGROUP_INET_INGRESS, |
| 106 | BPF_CGROUP_INET_EGRESS, |
| 107 | __MAX_BPF_ATTACH_TYPE |
| 108 | }; |
| 109 | |
| 110 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE |
| 111 | |
Alexei Starovoitov | 1ee2b4b | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 112 | /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command |
| 113 | * to the given target_fd cgroup the descendent cgroup will be able to |
| 114 | * override effective bpf program that was inherited from this cgroup |
| 115 | */ |
| 116 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
| 117 | |
Daniel Borkmann | f1a66f8 | 2015-03-01 12:31:43 +0100 | [diff] [blame] | 118 | #define BPF_PSEUDO_MAP_FD 1 |
| 119 | |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 120 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
| 121 | #define BPF_ANY 0 /* create new element or update existing */ |
| 122 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */ |
| 123 | #define BPF_EXIST 2 /* update existing element */ |
| 124 | |
Alexei Starovoitov | 6c90598 | 2016-03-07 21:57:15 -0800 | [diff] [blame] | 125 | #define BPF_F_NO_PREALLOC (1U << 0) |
| 126 | |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 127 | union bpf_attr { |
| 128 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ |
| 129 | __u32 map_type; /* one of enum bpf_map_type */ |
| 130 | __u32 key_size; /* size of key in bytes */ |
| 131 | __u32 value_size; /* size of value in bytes */ |
| 132 | __u32 max_entries; /* max number of entries in a map */ |
Alexei Starovoitov | 6c90598 | 2016-03-07 21:57:15 -0800 | [diff] [blame] | 133 | __u32 map_flags; /* prealloc or not */ |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 134 | }; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 135 | |
| 136 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ |
| 137 | __u32 map_fd; |
| 138 | __aligned_u64 key; |
| 139 | union { |
| 140 | __aligned_u64 value; |
| 141 | __aligned_u64 next_key; |
| 142 | }; |
Alexei Starovoitov | 3274f52 | 2014-11-13 17:36:44 -0800 | [diff] [blame] | 143 | __u64 flags; |
Alexei Starovoitov | db20fd2 | 2014-09-26 00:16:59 -0700 | [diff] [blame] | 144 | }; |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 145 | |
| 146 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ |
| 147 | __u32 prog_type; /* one of enum bpf_prog_type */ |
| 148 | __u32 insn_cnt; |
| 149 | __aligned_u64 insns; |
| 150 | __aligned_u64 license; |
Alexei Starovoitov | cbd3570 | 2014-09-26 00:17:03 -0700 | [diff] [blame] | 151 | __u32 log_level; /* verbosity level of verifier */ |
| 152 | __u32 log_size; /* size of user buffer */ |
| 153 | __aligned_u64 log_buf; /* user supplied buffer */ |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 154 | __u32 kern_version; /* checked when prog_type=kprobe */ |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 155 | }; |
Daniel Borkmann | b219775 | 2015-10-29 14:58:09 +0100 | [diff] [blame] | 156 | |
| 157 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
| 158 | __aligned_u64 pathname; |
| 159 | __u32 bpf_fd; |
| 160 | }; |
Daniel Mack | 00615df | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 161 | |
| 162 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ |
| 163 | __u32 target_fd; /* container object to attach to */ |
| 164 | __u32 attach_bpf_fd; /* eBPF program to attach */ |
| 165 | __u32 attach_type; |
Alexei Starovoitov | 1ee2b4b | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 166 | __u32 attach_flags; |
Daniel Mack | 00615df | 2016-11-23 16:52:27 +0100 | [diff] [blame] | 167 | }; |
Alexei Starovoitov | 99c55f7 | 2014-09-26 00:16:57 -0700 | [diff] [blame] | 168 | } __attribute__((aligned(8))); |
| 169 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 170 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
| 171 | * function eBPF program intends to call |
| 172 | */ |
| 173 | enum bpf_func_id { |
| 174 | BPF_FUNC_unspec, |
Alexei Starovoitov | d0003ec | 2014-11-13 17:36:49 -0800 | [diff] [blame] | 175 | BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ |
| 176 | BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ |
| 177 | BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 178 | BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ |
Alexei Starovoitov | d9847d3 | 2015-03-25 12:49:21 -0700 | [diff] [blame] | 179 | BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ |
Alexei Starovoitov | 9c959c8 | 2015-03-25 12:49:22 -0700 | [diff] [blame] | 180 | BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */ |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 181 | BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 182 | BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 183 | |
| 184 | /** |
| 185 | * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet |
| 186 | * @skb: pointer to skb |
Alexei Starovoitov | a166151 | 2015-04-15 12:55:45 -0700 | [diff] [blame] | 187 | * @offset: offset within packet from skb->mac_header |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 188 | * @from: pointer where to copy bytes from |
| 189 | * @len: number of bytes to store into packet |
| 190 | * @flags: bit 0 - if true, recompute skb->csum |
| 191 | * other bits - reserved |
| 192 | * Return: 0 on success |
| 193 | */ |
| 194 | BPF_FUNC_skb_store_bytes, |
| 195 | |
| 196 | /** |
| 197 | * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum |
| 198 | * @skb: pointer to skb |
| 199 | * @offset: offset within packet where IP checksum is located |
| 200 | * @from: old value of header field |
| 201 | * @to: new value of header field |
| 202 | * @flags: bits 0-3 - size of header field |
| 203 | * other bits - reserved |
| 204 | * Return: 0 on success |
| 205 | */ |
| 206 | BPF_FUNC_l3_csum_replace, |
| 207 | |
| 208 | /** |
| 209 | * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum |
| 210 | * @skb: pointer to skb |
| 211 | * @offset: offset within packet where TCP/UDP checksum is located |
| 212 | * @from: old value of header field |
| 213 | * @to: new value of header field |
| 214 | * @flags: bits 0-3 - size of header field |
| 215 | * bit 4 - is pseudo header |
| 216 | * other bits - reserved |
| 217 | * Return: 0 on success |
| 218 | */ |
| 219 | BPF_FUNC_l4_csum_replace, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 220 | |
| 221 | /** |
| 222 | * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program |
| 223 | * @ctx: context pointer passed to next program |
| 224 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
| 225 | * @index: index inside array that selects specific program to run |
| 226 | * Return: 0 on success |
| 227 | */ |
| 228 | BPF_FUNC_tail_call, |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 229 | |
| 230 | /** |
| 231 | * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev |
| 232 | * @skb: pointer to skb |
| 233 | * @ifindex: ifindex of the net device |
| 234 | * @flags: bit 0 - if set, redirect to ingress instead of egress |
| 235 | * other bits - reserved |
| 236 | * Return: 0 on success |
| 237 | */ |
| 238 | BPF_FUNC_clone_redirect, |
Alexei Starovoitov | ffeedaf | 2015-06-12 19:39:12 -0700 | [diff] [blame] | 239 | |
| 240 | /** |
| 241 | * u64 bpf_get_current_pid_tgid(void) |
| 242 | * Return: current->tgid << 32 | current->pid |
| 243 | */ |
| 244 | BPF_FUNC_get_current_pid_tgid, |
| 245 | |
| 246 | /** |
| 247 | * u64 bpf_get_current_uid_gid(void) |
| 248 | * Return: current_gid << 32 | current_uid |
| 249 | */ |
| 250 | BPF_FUNC_get_current_uid_gid, |
| 251 | |
| 252 | /** |
| 253 | * bpf_get_current_comm(char *buf, int size_of_buf) |
| 254 | * stores current->comm into buf |
| 255 | * Return: 0 on success |
| 256 | */ |
| 257 | BPF_FUNC_get_current_comm, |
Daniel Borkmann | 8d20aab | 2015-07-15 14:21:42 +0200 | [diff] [blame] | 258 | |
| 259 | /** |
| 260 | * bpf_get_cgroup_classid(skb) - retrieve a proc's classid |
| 261 | * @skb: pointer to skb |
| 262 | * Return: classid if != 0 |
| 263 | */ |
| 264 | BPF_FUNC_get_cgroup_classid, |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 265 | BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */ |
| 266 | BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */ |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 267 | |
| 268 | /** |
| 269 | * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags) |
| 270 | * retrieve or populate tunnel metadata |
| 271 | * @skb: pointer to skb |
| 272 | * @key: pointer to 'struct bpf_tunnel_key' |
| 273 | * @size: size of 'struct bpf_tunnel_key' |
| 274 | * @flags: room for future extensions |
| 275 | * Retrun: 0 on success |
| 276 | */ |
| 277 | BPF_FUNC_skb_get_tunnel_key, |
| 278 | BPF_FUNC_skb_set_tunnel_key, |
Kaixu Xia | 35578d7 | 2015-08-06 07:02:35 +0000 | [diff] [blame] | 279 | BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */ |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 280 | /** |
| 281 | * bpf_redirect(ifindex, flags) - redirect to another netdev |
| 282 | * @ifindex: ifindex of the net device |
| 283 | * @flags: bit 0 - if set, redirect to ingress instead of egress |
| 284 | * other bits - reserved |
| 285 | * Return: TC_ACT_REDIRECT |
| 286 | */ |
| 287 | BPF_FUNC_redirect, |
Daniel Borkmann | c46646d | 2015-09-30 01:41:51 +0200 | [diff] [blame] | 288 | |
| 289 | /** |
| 290 | * bpf_get_route_realm(skb) - retrieve a dst's tclassid |
| 291 | * @skb: pointer to skb |
| 292 | * Return: realm if != 0 |
| 293 | */ |
| 294 | BPF_FUNC_get_route_realm, |
Alexei Starovoitov | a43eec3 | 2015-10-20 20:02:34 -0700 | [diff] [blame] | 295 | |
| 296 | /** |
| 297 | * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample |
| 298 | * @ctx: struct pt_regs* |
| 299 | * @map: pointer to perf_event_array map |
| 300 | * @index: index of event in the map |
| 301 | * @data: data on stack to be output as raw data |
| 302 | * @size: size of data |
| 303 | * Return: 0 on success |
| 304 | */ |
| 305 | BPF_FUNC_perf_event_output, |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 306 | BPF_FUNC_skb_load_bytes, |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 307 | |
| 308 | /** |
| 309 | * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id |
| 310 | * @ctx: struct pt_regs* |
| 311 | * @map: pointer to stack_trace map |
| 312 | * @flags: bits 0-7 - numer of stack frames to skip |
| 313 | * bit 8 - collect user stack instead of kernel |
| 314 | * bit 9 - compare stacks by hash only |
| 315 | * bit 10 - if two different stacks hash into the same stackid |
| 316 | * discard old |
| 317 | * other bits - reserved |
| 318 | * Return: >= 0 stackid on success or negative error |
| 319 | */ |
| 320 | BPF_FUNC_get_stackid, |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 321 | |
| 322 | /** |
| 323 | * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff |
| 324 | * @from: raw from buffer |
| 325 | * @from_size: length of from buffer |
| 326 | * @to: raw to buffer |
| 327 | * @to_size: length of to buffer |
| 328 | * @seed: optional seed |
| 329 | * Return: csum result |
| 330 | */ |
| 331 | BPF_FUNC_csum_diff, |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 332 | |
| 333 | /** |
| 334 | * bpf_skb_[gs]et_tunnel_opt(skb, opt, size) |
| 335 | * retrieve or populate tunnel options metadata |
| 336 | * @skb: pointer to skb |
| 337 | * @opt: pointer to raw tunnel option data |
| 338 | * @size: size of @opt |
| 339 | * Return: 0 on success for set, option size for get |
| 340 | */ |
| 341 | BPF_FUNC_skb_get_tunnel_opt, |
| 342 | BPF_FUNC_skb_set_tunnel_opt, |
Daniel Borkmann | 6578171 | 2016-06-28 12:18:27 +0200 | [diff] [blame] | 343 | |
| 344 | /** |
| 345 | * bpf_skb_change_proto(skb, proto, flags) |
| 346 | * Change protocol of the skb. Currently supported is |
| 347 | * v4 -> v6, v6 -> v4 transitions. The helper will also |
| 348 | * resize the skb. eBPF program is expected to fill the |
| 349 | * new headers via skb_store_bytes and lX_csum_replace. |
| 350 | * @skb: pointer to skb |
| 351 | * @proto: new skb->protocol type |
| 352 | * @flags: reserved |
| 353 | * Return: 0 on success or negative error |
| 354 | */ |
| 355 | BPF_FUNC_skb_change_proto, |
| 356 | |
Daniel Borkmann | d2485c4 | 2016-06-28 12:18:28 +0200 | [diff] [blame] | 357 | /** |
| 358 | * bpf_skb_change_type(skb, type) |
| 359 | * Change packet type of skb. |
| 360 | * @skb: pointer to skb |
| 361 | * @type: new skb->pkt_type type |
| 362 | * Return: 0 on success or negative error |
| 363 | */ |
| 364 | BPF_FUNC_skb_change_type, |
| 365 | |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 366 | /** |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 367 | * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 368 | * @skb: pointer to skb |
| 369 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type |
| 370 | * @index: index of the cgroup in the bpf_map |
| 371 | * Return: |
| 372 | * == 0 skb failed the cgroup2 descendant test |
| 373 | * == 1 skb succeeded the cgroup2 descendant test |
| 374 | * < 0 error |
| 375 | */ |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 376 | BPF_FUNC_skb_under_cgroup, |
Daniel Borkmann | 13c5c24 | 2016-07-03 01:28:47 +0200 | [diff] [blame] | 377 | |
| 378 | /** |
| 379 | * bpf_get_hash_recalc(skb) |
| 380 | * Retrieve and possibly recalculate skb->hash. |
| 381 | * @skb: pointer to skb |
| 382 | * Return: hash |
| 383 | */ |
| 384 | BPF_FUNC_get_hash_recalc, |
| 385 | |
Alexei Starovoitov | 606274c | 2016-07-06 22:38:36 -0700 | [diff] [blame] | 386 | /** |
| 387 | * u64 bpf_get_current_task(void) |
| 388 | * Returns current task_struct |
| 389 | * Return: current |
| 390 | */ |
| 391 | BPF_FUNC_get_current_task, |
| 392 | |
Sargun Dhillon | 96ae522 | 2016-07-25 05:54:46 -0700 | [diff] [blame] | 393 | /** |
| 394 | * bpf_probe_write_user(void *dst, void *src, int len) |
| 395 | * safely attempt to write to a location |
| 396 | * @dst: destination address in userspace |
| 397 | * @src: source address on stack |
| 398 | * @len: number of bytes to copy |
| 399 | * Return: 0 on success or negative error |
| 400 | */ |
| 401 | BPF_FUNC_probe_write_user, |
| 402 | |
Sargun Dhillon | 60d20f9 | 2016-08-12 08:56:52 -0700 | [diff] [blame] | 403 | /** |
| 404 | * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task |
| 405 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type |
| 406 | * @index: index of the cgroup in the bpf_map |
| 407 | * Return: |
| 408 | * == 0 current failed the cgroup2 descendant test |
| 409 | * == 1 current succeeded the cgroup2 descendant test |
| 410 | * < 0 error |
| 411 | */ |
| 412 | BPF_FUNC_current_task_under_cgroup, |
| 413 | |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 414 | /** |
| 415 | * bpf_skb_change_tail(skb, len, flags) |
| 416 | * The helper will resize the skb to the given new size, |
| 417 | * to be used f.e. with control messages. |
| 418 | * @skb: pointer to skb |
| 419 | * @len: new skb length |
| 420 | * @flags: reserved |
| 421 | * Return: 0 on success or negative error |
| 422 | */ |
| 423 | BPF_FUNC_skb_change_tail, |
| 424 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 425 | /** |
| 426 | * bpf_skb_pull_data(skb, len) |
| 427 | * The helper will pull in non-linear data in case the |
| 428 | * skb is non-linear and not all of len are part of the |
| 429 | * linear section. Only needed for read/write with direct |
| 430 | * packet access. |
| 431 | * @skb: pointer to skb |
| 432 | * @len: len to make read/writeable |
| 433 | * Return: 0 on success or negative error |
| 434 | */ |
| 435 | BPF_FUNC_skb_pull_data, |
| 436 | |
| 437 | /** |
| 438 | * bpf_csum_update(skb, csum) |
| 439 | * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. |
| 440 | * @skb: pointer to skb |
| 441 | * @csum: csum to add |
| 442 | * Return: csum on success or negative error |
| 443 | */ |
| 444 | BPF_FUNC_csum_update, |
| 445 | |
Daniel Borkmann | 7a4b28c | 2016-09-23 01:28:37 +0200 | [diff] [blame] | 446 | /** |
| 447 | * bpf_set_hash_invalid(skb) |
| 448 | * Invalidate current skb>hash. |
| 449 | * @skb: pointer to skb |
| 450 | */ |
| 451 | BPF_FUNC_set_hash_invalid, |
| 452 | |
Chenbo Feng | 0aa0151 | 2017-05-03 15:22:42 -0700 | [diff] [blame] | 453 | /** |
| 454 | * int bpf_get_numa_node_id() |
| 455 | * Return: Id of current NUMA node. |
| 456 | */ |
| 457 | BPF_FUNC_get_numa_node_id, |
| 458 | |
| 459 | /** |
| 460 | * int bpf_skb_change_head() |
| 461 | * Grows headroom of skb and adjusts MAC header offset accordingly. |
| 462 | * Will extends/reallocae as required automatically. |
| 463 | * May change skb data pointer and will thus invalidate any check |
| 464 | * performed for direct packet access. |
| 465 | * @skb: pointer to skb |
| 466 | * @len: length of header to be pushed in front |
| 467 | * @flags: Flags (unused for now) |
| 468 | * Return: 0 on success or negative error |
| 469 | */ |
| 470 | BPF_FUNC_skb_change_head, |
| 471 | |
| 472 | /** |
| 473 | * int bpf_xdp_adjust_head(xdp_md, delta) |
| 474 | * Adjust the xdp_md.data by delta |
| 475 | * @xdp_md: pointer to xdp_md |
| 476 | * @delta: An positive/negative integer to be added to xdp_md.data |
| 477 | * Return: 0 on success or negative on error |
| 478 | */ |
| 479 | BPF_FUNC_xdp_adjust_head, |
| 480 | |
| 481 | /** |
| 482 | * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) |
| 483 | * Copy a NUL terminated string from unsafe address. In case the string |
| 484 | * length is smaller than size, the target is not padded with further NUL |
| 485 | * bytes. In case the string length is larger than size, just count-1 |
| 486 | * bytes are copied and the last byte is set to NUL. |
| 487 | * @dst: destination address |
| 488 | * @size: maximum number of bytes to copy, including the trailing NUL |
| 489 | * @unsafe_ptr: unsafe address |
| 490 | * Return: |
| 491 | * > 0 length of the string including the trailing NUL on success |
| 492 | * < 0 error |
| 493 | */ |
| 494 | BPF_FUNC_probe_read_str, |
| 495 | |
Chenbo Feng | 0931366 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 496 | /** |
| 497 | * u64 bpf_bpf_get_socket_cookie(skb) |
| 498 | * Get the cookie for the socket stored inside sk_buff. |
| 499 | * @skb: pointer to skb |
| 500 | * Return: 8 Bytes non-decreasing number on success or 0 if the socket |
| 501 | * field is missing inside sk_buff |
| 502 | */ |
| 503 | BPF_FUNC_get_socket_cookie, |
| 504 | |
Chenbo Feng | a29c81e | 2017-03-22 17:27:35 -0700 | [diff] [blame] | 505 | /** |
| 506 | * u32 bpf_get_socket_uid(skb) |
| 507 | * Get the owner uid of the socket stored inside sk_buff. |
| 508 | * @skb: pointer to skb |
| 509 | * Return: uid of the socket owner on success or 0 if the socket pointer |
| 510 | * inside sk_buff is NULL |
| 511 | */ |
| 512 | BPF_FUNC_get_socket_uid, |
| 513 | |
Alexei Starovoitov | 09756af | 2014-09-26 00:17:00 -0700 | [diff] [blame] | 514 | __BPF_FUNC_MAX_ID, |
| 515 | }; |
| 516 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 517 | /* All flags used by eBPF helper functions, placed here. */ |
| 518 | |
| 519 | /* BPF_FUNC_skb_store_bytes flags. */ |
| 520 | #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) |
Daniel Borkmann | 8afd54c | 2016-03-04 15:15:03 +0100 | [diff] [blame] | 521 | #define BPF_F_INVALIDATE_HASH (1ULL << 1) |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 522 | |
| 523 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. |
| 524 | * First 4 bits are for passing the header field size. |
| 525 | */ |
| 526 | #define BPF_F_HDR_FIELD_MASK 0xfULL |
| 527 | |
| 528 | /* BPF_FUNC_l4_csum_replace flags. */ |
| 529 | #define BPF_F_PSEUDO_HDR (1ULL << 4) |
Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 530 | #define BPF_F_MARK_MANGLED_0 (1ULL << 5) |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 531 | |
| 532 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ |
| 533 | #define BPF_F_INGRESS (1ULL << 0) |
| 534 | |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 535 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ |
| 536 | #define BPF_F_TUNINFO_IPV6 (1ULL << 0) |
| 537 | |
Alexei Starovoitov | d5a3b1f | 2016-02-17 19:58:58 -0800 | [diff] [blame] | 538 | /* BPF_FUNC_get_stackid flags. */ |
| 539 | #define BPF_F_SKIP_FIELD_MASK 0xffULL |
| 540 | #define BPF_F_USER_STACK (1ULL << 8) |
| 541 | #define BPF_F_FAST_STACK_CMP (1ULL << 9) |
| 542 | #define BPF_F_REUSE_STACKID (1ULL << 10) |
| 543 | |
Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 544 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
| 545 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) |
Daniel Borkmann | 2208087 | 2016-03-04 15:15:05 +0100 | [diff] [blame] | 546 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) |
Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 547 | |
Daniel Borkmann | 6816a7f | 2016-06-28 12:18:25 +0200 | [diff] [blame] | 548 | /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ |
Daniel Borkmann | 1e33759 | 2016-04-18 21:01:23 +0200 | [diff] [blame] | 549 | #define BPF_F_INDEX_MASK 0xffffffffULL |
| 550 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 551 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
| 552 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
Daniel Borkmann | 1e33759 | 2016-04-18 21:01:23 +0200 | [diff] [blame] | 553 | |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 554 | /* user accessible mirror of in-kernel sk_buff. |
| 555 | * new fields can only be added to the end of this structure |
| 556 | */ |
| 557 | struct __sk_buff { |
| 558 | __u32 len; |
| 559 | __u32 pkt_type; |
| 560 | __u32 mark; |
| 561 | __u32 queue_mapping; |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 562 | __u32 protocol; |
| 563 | __u32 vlan_present; |
| 564 | __u32 vlan_tci; |
Michal Sekletar | 27cd545 | 2015-03-24 14:48:41 +0100 | [diff] [blame] | 565 | __u32 vlan_proto; |
Daniel Borkmann | bcad571 | 2015-04-03 20:52:24 +0200 | [diff] [blame] | 566 | __u32 priority; |
Alexei Starovoitov | 37e82c2 | 2015-05-27 15:30:39 -0700 | [diff] [blame] | 567 | __u32 ingress_ifindex; |
| 568 | __u32 ifindex; |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 569 | __u32 tc_index; |
| 570 | __u32 cb[5]; |
Daniel Borkmann | ba7591d | 2015-08-01 00:46:29 +0200 | [diff] [blame] | 571 | __u32 hash; |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 572 | __u32 tc_classid; |
Alexei Starovoitov | 969bf05 | 2016-05-05 19:49:10 -0700 | [diff] [blame] | 573 | __u32 data; |
| 574 | __u32 data_end; |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 575 | }; |
| 576 | |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 577 | struct bpf_tunnel_key { |
| 578 | __u32 tunnel_id; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 579 | union { |
| 580 | __u32 remote_ipv4; |
| 581 | __u32 remote_ipv6[4]; |
| 582 | }; |
| 583 | __u8 tunnel_tos; |
| 584 | __u8 tunnel_ttl; |
Daniel Borkmann | c0e760c | 2016-03-30 00:02:00 +0200 | [diff] [blame] | 585 | __u16 tunnel_ext; |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 586 | __u32 tunnel_label; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 587 | }; |
| 588 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 589 | /* User return codes for XDP prog type. |
| 590 | * A valid XDP program must return one of these defined values. All other |
| 591 | * return codes are reserved for future use. Unknown return codes will result |
| 592 | * in packet drop. |
| 593 | */ |
| 594 | enum xdp_action { |
| 595 | XDP_ABORTED = 0, |
| 596 | XDP_DROP, |
| 597 | XDP_PASS, |
Brenden Blanco | 6ce96ca | 2016-07-19 12:16:53 -0700 | [diff] [blame] | 598 | XDP_TX, |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 599 | }; |
| 600 | |
| 601 | /* user accessible metadata for XDP packet hook |
| 602 | * new fields must be added to the end of this structure |
| 603 | */ |
| 604 | struct xdp_md { |
| 605 | __u32 data; |
| 606 | __u32 data_end; |
| 607 | }; |
| 608 | |
Alexei Starovoitov | daedfb2 | 2014-09-04 22:17:18 -0700 | [diff] [blame] | 609 | #endif /* _UAPI__LINUX_BPF_H__ */ |