blob: 01cd485ccd4f1874c411328bd68b92c2f9839915 [file] [log] [blame]
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -07001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_H__
8#define _UAPI__LINUX_BPF_H__
9
10#include <linux/types.h>
Alexei Starovoitovc15952d2014-10-14 02:08:54 -070011#include <linux/bpf_common.h>
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070012
13/* Extended instruction set based on top of classic BPF */
14
15/* instruction classes */
16#define BPF_ALU64 0x07 /* alu mode in double word width */
17
18/* ld/ldx fields */
19#define BPF_DW 0x18 /* double word */
20#define BPF_XADD 0xc0 /* exclusive add */
21
22/* alu/jmp fields */
23#define BPF_MOV 0xb0 /* mov reg to reg */
24#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
25
26/* change endianness of a register */
27#define BPF_END 0xd0 /* flags for endianness conversion: */
28#define BPF_TO_LE 0x00 /* convert to little-endian */
29#define BPF_TO_BE 0x08 /* convert to big-endian */
30#define BPF_FROM_LE BPF_TO_LE
31#define BPF_FROM_BE BPF_TO_BE
32
33#define BPF_JNE 0x50 /* jump != */
34#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
35#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
36#define BPF_CALL 0x80 /* function call */
37#define BPF_EXIT 0x90 /* function return */
38
39/* Register numbers */
40enum {
41 BPF_REG_0 = 0,
42 BPF_REG_1,
43 BPF_REG_2,
44 BPF_REG_3,
45 BPF_REG_4,
46 BPF_REG_5,
47 BPF_REG_6,
48 BPF_REG_7,
49 BPF_REG_8,
50 BPF_REG_9,
51 BPF_REG_10,
52 __MAX_BPF_REG,
53};
54
55/* BPF has 10 general purpose 64-bit registers and stack frame. */
56#define MAX_BPF_REG __MAX_BPF_REG
57
58struct bpf_insn {
59 __u8 code; /* opcode */
60 __u8 dst_reg:4; /* dest register */
61 __u8 src_reg:4; /* source register */
62 __s16 off; /* signed offset */
63 __s32 imm; /* signed immediate constant */
64};
65
Daniel Mackb95a5c42017-01-21 17:26:11 +010066/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
67struct bpf_lpm_trie_key {
68 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
69 __u8 data[0]; /* Arbitrary size */
70};
71
Daniel Borkmannb2197752015-10-29 14:58:09 +010072/* BPF syscall commands, see bpf(2) man-page for details. */
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070073enum bpf_cmd {
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070074 BPF_MAP_CREATE,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070075 BPF_MAP_LOOKUP_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070076 BPF_MAP_UPDATE_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070077 BPF_MAP_DELETE_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070078 BPF_MAP_GET_NEXT_KEY,
Alexei Starovoitov09756af2014-09-26 00:17:00 -070079 BPF_PROG_LOAD,
Daniel Borkmannb2197752015-10-29 14:58:09 +010080 BPF_OBJ_PIN,
81 BPF_OBJ_GET,
Daniel Mackf4324552016-11-23 16:52:27 +010082 BPF_PROG_ATTACH,
83 BPF_PROG_DETACH,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070084 BPF_PROG_TEST_RUN,
Martin KaFai Lau34ad5582017-06-05 12:15:48 -070085 BPF_PROG_GET_NEXT_ID,
86 BPF_MAP_GET_NEXT_ID,
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -070087 BPF_PROG_GET_FD_BY_ID,
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -070088 BPF_MAP_GET_FD_BY_ID,
Martin KaFai Lau1e270972017-06-05 12:15:52 -070089 BPF_OBJ_GET_INFO_BY_FD,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070090};
91
92enum bpf_map_type {
93 BPF_MAP_TYPE_UNSPEC,
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080094 BPF_MAP_TYPE_HASH,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080095 BPF_MAP_TYPE_ARRAY,
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -070096 BPF_MAP_TYPE_PROG_ARRAY,
Kaixu Xiaea317b22015-08-06 07:02:34 +000097 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -080098 BPF_MAP_TYPE_PERCPU_HASH,
Alexei Starovoitova10423b2016-02-01 22:39:54 -080099 BPF_MAP_TYPE_PERCPU_ARRAY,
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -0800100 BPF_MAP_TYPE_STACK_TRACE,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700101 BPF_MAP_TYPE_CGROUP_ARRAY,
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800102 BPF_MAP_TYPE_LRU_HASH,
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800103 BPF_MAP_TYPE_LRU_PERCPU_HASH,
Daniel Mackb95a5c42017-01-21 17:26:11 +0100104 BPF_MAP_TYPE_LPM_TRIE,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700105 BPF_MAP_TYPE_ARRAY_OF_MAPS,
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -0700106 BPF_MAP_TYPE_HASH_OF_MAPS,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700107};
108
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700109enum bpf_prog_type {
110 BPF_PROG_TYPE_UNSPEC,
Alexei Starovoitovddd872b2014-12-01 15:06:34 -0800111 BPF_PROG_TYPE_SOCKET_FILTER,
Alexei Starovoitov25415172015-03-25 12:49:20 -0700112 BPF_PROG_TYPE_KPROBE,
Daniel Borkmann96be4322015-03-01 12:31:46 +0100113 BPF_PROG_TYPE_SCHED_CLS,
Daniel Borkmann94caee82015-03-20 15:11:11 +0100114 BPF_PROG_TYPE_SCHED_ACT,
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -0700115 BPF_PROG_TYPE_TRACEPOINT,
Brenden Blanco6a773a12016-07-19 12:16:47 -0700116 BPF_PROG_TYPE_XDP,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700117 BPF_PROG_TYPE_PERF_EVENT,
Daniel Mack0e336612016-11-23 16:52:25 +0100118 BPF_PROG_TYPE_CGROUP_SKB,
David Ahern610236582016-12-01 08:48:04 -0800119 BPF_PROG_TYPE_CGROUP_SOCK,
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100120 BPF_PROG_TYPE_LWT_IN,
121 BPF_PROG_TYPE_LWT_OUT,
122 BPF_PROG_TYPE_LWT_XMIT,
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700123 BPF_PROG_TYPE_SOCK_OPS,
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700124};
125
Daniel Mack0e336612016-11-23 16:52:25 +0100126enum bpf_attach_type {
127 BPF_CGROUP_INET_INGRESS,
128 BPF_CGROUP_INET_EGRESS,
David Ahern610236582016-12-01 08:48:04 -0800129 BPF_CGROUP_INET_SOCK_CREATE,
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700130 BPF_CGROUP_SOCK_OPS,
Daniel Mack0e336612016-11-23 16:52:25 +0100131 __MAX_BPF_ATTACH_TYPE
132};
133
134#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
135
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800136/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
137 * to the given target_fd cgroup the descendent cgroup will be able to
138 * override effective bpf program that was inherited from this cgroup
139 */
140#define BPF_F_ALLOW_OVERRIDE (1U << 0)
141
David S. Millere07b98d2017-05-10 11:38:07 -0700142/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
143 * verifier will perform strict alignment checking as if the kernel
144 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
145 * and NET_IP_ALIGN defined to 2.
146 */
147#define BPF_F_STRICT_ALIGNMENT (1U << 0)
148
Daniel Borkmannf1a66f82015-03-01 12:31:43 +0100149#define BPF_PSEUDO_MAP_FD 1
150
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800151/* flags for BPF_MAP_UPDATE_ELEM command */
152#define BPF_ANY 0 /* create new element or update existing */
153#define BPF_NOEXIST 1 /* create new element if it didn't exist */
154#define BPF_EXIST 2 /* update existing element */
155
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800156#define BPF_F_NO_PREALLOC (1U << 0)
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800157/* Instead of having one common LRU list in the
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800158 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800159 * which can scale and perform better.
160 * Note, the LRU nodes (including free nodes) cannot be moved
161 * across different LRU lists.
162 */
163#define BPF_F_NO_COMMON_LRU (1U << 1)
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800164
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700165union bpf_attr {
166 struct { /* anonymous struct used by BPF_MAP_CREATE command */
167 __u32 map_type; /* one of enum bpf_map_type */
168 __u32 key_size; /* size of key in bytes */
169 __u32 value_size; /* size of value in bytes */
170 __u32 max_entries; /* max number of entries in a map */
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800171 __u32 map_flags; /* prealloc or not */
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700172 __u32 inner_map_fd; /* fd pointing to the inner map */
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700173 };
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700174
175 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
176 __u32 map_fd;
177 __aligned_u64 key;
178 union {
179 __aligned_u64 value;
180 __aligned_u64 next_key;
181 };
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800182 __u64 flags;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700183 };
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700184
185 struct { /* anonymous struct used by BPF_PROG_LOAD command */
186 __u32 prog_type; /* one of enum bpf_prog_type */
187 __u32 insn_cnt;
188 __aligned_u64 insns;
189 __aligned_u64 license;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700190 __u32 log_level; /* verbosity level of verifier */
191 __u32 log_size; /* size of user buffer */
192 __aligned_u64 log_buf; /* user supplied buffer */
Alexei Starovoitov25415172015-03-25 12:49:20 -0700193 __u32 kern_version; /* checked when prog_type=kprobe */
David S. Millere07b98d2017-05-10 11:38:07 -0700194 __u32 prog_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700195 };
Daniel Borkmannb2197752015-10-29 14:58:09 +0100196
197 struct { /* anonymous struct used by BPF_OBJ_* commands */
198 __aligned_u64 pathname;
199 __u32 bpf_fd;
200 };
Daniel Mackf4324552016-11-23 16:52:27 +0100201
202 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
203 __u32 target_fd; /* container object to attach to */
204 __u32 attach_bpf_fd; /* eBPF program to attach */
205 __u32 attach_type;
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800206 __u32 attach_flags;
Daniel Mackf4324552016-11-23 16:52:27 +0100207 };
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700208
209 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
210 __u32 prog_fd;
211 __u32 retval;
212 __u32 data_size_in;
213 __u32 data_size_out;
214 __aligned_u64 data_in;
215 __aligned_u64 data_out;
216 __u32 repeat;
217 __u32 duration;
218 } test;
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700219
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700220 struct { /* anonymous struct used by BPF_*_GET_*_ID */
221 union {
222 __u32 start_id;
223 __u32 prog_id;
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700224 __u32 map_id;
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700225 };
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700226 __u32 next_id;
227 };
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700228
229 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
230 __u32 bpf_fd;
231 __u32 info_len;
232 __aligned_u64 info;
233 } info;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700234} __attribute__((aligned(8)));
235
Thomas Grafebb676d2016-10-27 11:23:51 +0200236/* BPF helper function descriptions:
237 *
238 * void *bpf_map_lookup_elem(&map, &key)
239 * Return: Map value or NULL
240 *
241 * int bpf_map_update_elem(&map, &key, &value, flags)
242 * Return: 0 on success or negative error
243 *
244 * int bpf_map_delete_elem(&map, &key)
245 * Return: 0 on success or negative error
246 *
247 * int bpf_probe_read(void *dst, int size, void *src)
248 * Return: 0 on success or negative error
249 *
250 * u64 bpf_ktime_get_ns(void)
251 * Return: current ktime
252 *
253 * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
254 * Return: length of buffer written or negative error
255 *
256 * u32 bpf_prandom_u32(void)
257 * Return: random value
258 *
259 * u32 bpf_raw_smp_processor_id(void)
260 * Return: SMP processor ID
261 *
262 * int bpf_skb_store_bytes(skb, offset, from, len, flags)
263 * store bytes into packet
264 * @skb: pointer to skb
265 * @offset: offset within packet from skb->mac_header
266 * @from: pointer where to copy bytes from
267 * @len: number of bytes to store into packet
268 * @flags: bit 0 - if true, recompute skb->csum
269 * other bits - reserved
270 * Return: 0 on success or negative error
271 *
272 * int bpf_l3_csum_replace(skb, offset, from, to, flags)
273 * recompute IP checksum
274 * @skb: pointer to skb
275 * @offset: offset within packet where IP checksum is located
276 * @from: old value of header field
277 * @to: new value of header field
278 * @flags: bits 0-3 - size of header field
279 * other bits - reserved
280 * Return: 0 on success or negative error
281 *
282 * int bpf_l4_csum_replace(skb, offset, from, to, flags)
283 * recompute TCP/UDP checksum
284 * @skb: pointer to skb
285 * @offset: offset within packet where TCP/UDP checksum is located
286 * @from: old value of header field
287 * @to: new value of header field
288 * @flags: bits 0-3 - size of header field
289 * bit 4 - is pseudo header
290 * other bits - reserved
291 * Return: 0 on success or negative error
292 *
293 * int bpf_tail_call(ctx, prog_array_map, index)
294 * jump into another BPF program
295 * @ctx: context pointer passed to next program
296 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
297 * @index: index inside array that selects specific program to run
298 * Return: 0 on success or negative error
299 *
300 * int bpf_clone_redirect(skb, ifindex, flags)
301 * redirect to another netdev
302 * @skb: pointer to skb
303 * @ifindex: ifindex of the net device
304 * @flags: bit 0 - if set, redirect to ingress instead of egress
305 * other bits - reserved
306 * Return: 0 on success or negative error
307 *
308 * u64 bpf_get_current_pid_tgid(void)
309 * Return: current->tgid << 32 | current->pid
310 *
311 * u64 bpf_get_current_uid_gid(void)
312 * Return: current_gid << 32 | current_uid
313 *
314 * int bpf_get_current_comm(char *buf, int size_of_buf)
315 * stores current->comm into buf
316 * Return: 0 on success or negative error
317 *
318 * u32 bpf_get_cgroup_classid(skb)
319 * retrieve a proc's classid
320 * @skb: pointer to skb
321 * Return: classid if != 0
322 *
323 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
324 * Return: 0 on success or negative error
325 *
326 * int bpf_skb_vlan_pop(skb)
327 * Return: 0 on success or negative error
328 *
329 * int bpf_skb_get_tunnel_key(skb, key, size, flags)
330 * int bpf_skb_set_tunnel_key(skb, key, size, flags)
331 * retrieve or populate tunnel metadata
332 * @skb: pointer to skb
333 * @key: pointer to 'struct bpf_tunnel_key'
334 * @size: size of 'struct bpf_tunnel_key'
335 * @flags: room for future extensions
336 * Return: 0 on success or negative error
337 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700338 * u64 bpf_perf_event_read(map, flags)
339 * read perf event counter value
340 * @map: pointer to perf_event_array map
341 * @flags: index of event in the map or bitmask flags
342 * Return: value of perf event counter read or error code
Thomas Grafebb676d2016-10-27 11:23:51 +0200343 *
344 * int bpf_redirect(ifindex, flags)
345 * redirect to another netdev
346 * @ifindex: ifindex of the net device
347 * @flags: bit 0 - if set, redirect to ingress instead of egress
348 * other bits - reserved
349 * Return: TC_ACT_REDIRECT
350 *
351 * u32 bpf_get_route_realm(skb)
352 * retrieve a dst's tclassid
353 * @skb: pointer to skb
354 * Return: realm if != 0
355 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700356 * int bpf_perf_event_output(ctx, map, flags, data, size)
Thomas Grafebb676d2016-10-27 11:23:51 +0200357 * output perf raw sample
358 * @ctx: struct pt_regs*
359 * @map: pointer to perf_event_array map
Teng Qinb7d3ed52017-06-02 21:03:54 -0700360 * @flags: index of event in the map or bitmask flags
Thomas Grafebb676d2016-10-27 11:23:51 +0200361 * @data: data on stack to be output as raw data
362 * @size: size of data
363 * Return: 0 on success or negative error
364 *
365 * int bpf_get_stackid(ctx, map, flags)
366 * walk user or kernel stack and return id
367 * @ctx: struct pt_regs*
368 * @map: pointer to stack_trace map
369 * @flags: bits 0-7 - numer of stack frames to skip
370 * bit 8 - collect user stack instead of kernel
371 * bit 9 - compare stacks by hash only
372 * bit 10 - if two different stacks hash into the same stackid
373 * discard old
374 * other bits - reserved
375 * Return: >= 0 stackid on success or negative error
376 *
377 * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
378 * calculate csum diff
379 * @from: raw from buffer
380 * @from_size: length of from buffer
381 * @to: raw to buffer
382 * @to_size: length of to buffer
383 * @seed: optional seed
384 * Return: csum result or negative error code
385 *
386 * int bpf_skb_get_tunnel_opt(skb, opt, size)
387 * retrieve tunnel options metadata
388 * @skb: pointer to skb
389 * @opt: pointer to raw tunnel option data
390 * @size: size of @opt
391 * Return: option size
392 *
393 * int bpf_skb_set_tunnel_opt(skb, opt, size)
394 * populate tunnel options metadata
395 * @skb: pointer to skb
396 * @opt: pointer to raw tunnel option data
397 * @size: size of @opt
398 * Return: 0 on success or negative error
399 *
400 * int bpf_skb_change_proto(skb, proto, flags)
401 * Change protocol of the skb. Currently supported is v4 -> v6,
402 * v6 -> v4 transitions. The helper will also resize the skb. eBPF
403 * program is expected to fill the new headers via skb_store_bytes
404 * and lX_csum_replace.
405 * @skb: pointer to skb
406 * @proto: new skb->protocol type
407 * @flags: reserved
408 * Return: 0 on success or negative error
409 *
410 * int bpf_skb_change_type(skb, type)
411 * Change packet type of skb.
412 * @skb: pointer to skb
413 * @type: new skb->pkt_type type
414 * Return: 0 on success or negative error
415 *
416 * int bpf_skb_under_cgroup(skb, map, index)
417 * Check cgroup2 membership of skb
418 * @skb: pointer to skb
419 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
420 * @index: index of the cgroup in the bpf_map
421 * Return:
422 * == 0 skb failed the cgroup2 descendant test
423 * == 1 skb succeeded the cgroup2 descendant test
424 * < 0 error
425 *
426 * u32 bpf_get_hash_recalc(skb)
427 * Retrieve and possibly recalculate skb->hash.
428 * @skb: pointer to skb
429 * Return: hash
430 *
431 * u64 bpf_get_current_task(void)
432 * Returns current task_struct
433 * Return: current
434 *
435 * int bpf_probe_write_user(void *dst, void *src, int len)
436 * safely attempt to write to a location
437 * @dst: destination address in userspace
438 * @src: source address on stack
439 * @len: number of bytes to copy
440 * Return: 0 on success or negative error
441 *
442 * int bpf_current_task_under_cgroup(map, index)
443 * Check cgroup2 membership of current task
444 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
445 * @index: index of the cgroup in the bpf_map
446 * Return:
447 * == 0 current failed the cgroup2 descendant test
448 * == 1 current succeeded the cgroup2 descendant test
449 * < 0 error
450 *
451 * int bpf_skb_change_tail(skb, len, flags)
452 * The helper will resize the skb to the given new size, to be used f.e.
453 * with control messages.
454 * @skb: pointer to skb
455 * @len: new skb length
456 * @flags: reserved
457 * Return: 0 on success or negative error
458 *
459 * int bpf_skb_pull_data(skb, len)
460 * The helper will pull in non-linear data in case the skb is non-linear
461 * and not all of len are part of the linear section. Only needed for
462 * read/write with direct packet access.
463 * @skb: pointer to skb
464 * @len: len to make read/writeable
465 * Return: 0 on success or negative error
466 *
467 * s64 bpf_csum_update(skb, csum)
468 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
469 * @skb: pointer to skb
470 * @csum: csum to add
471 * Return: csum on success or negative error
472 *
473 * void bpf_set_hash_invalid(skb)
474 * Invalidate current skb->hash.
475 * @skb: pointer to skb
476 *
477 * int bpf_get_numa_node_id()
478 * Return: Id of current NUMA node.
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100479 *
480 * int bpf_skb_change_head()
481 * Grows headroom of skb and adjusts MAC header offset accordingly.
482 * Will extends/reallocae as required automatically.
483 * May change skb data pointer and will thus invalidate any check
484 * performed for direct packet access.
485 * @skb: pointer to skb
486 * @len: length of header to be pushed in front
487 * @flags: Flags (unused for now)
488 * Return: 0 on success or negative error
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800489 *
490 * int bpf_xdp_adjust_head(xdp_md, delta)
491 * Adjust the xdp_md.data by delta
492 * @xdp_md: pointer to xdp_md
493 * @delta: An positive/negative integer to be added to xdp_md.data
494 * Return: 0 on success or negative on error
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000495 *
496 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
497 * Copy a NUL terminated string from unsafe address. In case the string
498 * length is smaller than size, the target is not padded with further NUL
499 * bytes. In case the string length is larger than size, just count-1
500 * bytes are copied and the last byte is set to NUL.
501 * @dst: destination address
502 * @size: maximum number of bytes to copy, including the trailing NUL
503 * @unsafe_ptr: unsafe address
504 * Return:
505 * > 0 length of the string including the trailing NUL on success
506 * < 0 error
Chenbo Feng91b82702017-03-22 17:27:34 -0700507 *
Alexander Alemayhu3c60a532017-04-08 22:08:10 +0200508 * u64 bpf_get_socket_cookie(skb)
Chenbo Feng91b82702017-03-22 17:27:34 -0700509 * Get the cookie for the socket stored inside sk_buff.
510 * @skb: pointer to skb
511 * Return: 8 Bytes non-decreasing number on success or 0 if the socket
512 * field is missing inside sk_buff
Chenbo Feng6acc5c22017-03-22 17:27:35 -0700513 *
514 * u32 bpf_get_socket_uid(skb)
515 * Get the owner uid of the socket stored inside sk_buff.
516 * @skb: pointer to skb
Chenbo Feng5d4e3442017-04-26 16:41:23 -0700517 * Return: uid of the socket owner on success or overflowuid if failed.
Daniel Borkmannded092c2017-06-11 00:50:47 +0200518 *
519 * u32 bpf_set_hash(skb, hash)
520 * Set full skb->hash.
521 * @skb: pointer to skb
522 * @hash: hash to set
Thomas Grafebb676d2016-10-27 11:23:51 +0200523 */
524#define __BPF_FUNC_MAPPER(FN) \
525 FN(unspec), \
526 FN(map_lookup_elem), \
527 FN(map_update_elem), \
528 FN(map_delete_elem), \
529 FN(probe_read), \
530 FN(ktime_get_ns), \
531 FN(trace_printk), \
532 FN(get_prandom_u32), \
533 FN(get_smp_processor_id), \
534 FN(skb_store_bytes), \
535 FN(l3_csum_replace), \
536 FN(l4_csum_replace), \
537 FN(tail_call), \
538 FN(clone_redirect), \
539 FN(get_current_pid_tgid), \
540 FN(get_current_uid_gid), \
541 FN(get_current_comm), \
542 FN(get_cgroup_classid), \
543 FN(skb_vlan_push), \
544 FN(skb_vlan_pop), \
545 FN(skb_get_tunnel_key), \
546 FN(skb_set_tunnel_key), \
547 FN(perf_event_read), \
548 FN(redirect), \
549 FN(get_route_realm), \
550 FN(perf_event_output), \
551 FN(skb_load_bytes), \
552 FN(get_stackid), \
553 FN(csum_diff), \
554 FN(skb_get_tunnel_opt), \
555 FN(skb_set_tunnel_opt), \
556 FN(skb_change_proto), \
557 FN(skb_change_type), \
558 FN(skb_under_cgroup), \
559 FN(get_hash_recalc), \
560 FN(get_current_task), \
561 FN(probe_write_user), \
562 FN(current_task_under_cgroup), \
563 FN(skb_change_tail), \
564 FN(skb_pull_data), \
565 FN(csum_update), \
566 FN(set_hash_invalid), \
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100567 FN(get_numa_node_id), \
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800568 FN(skb_change_head), \
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000569 FN(xdp_adjust_head), \
Chenbo Feng91b82702017-03-22 17:27:34 -0700570 FN(probe_read_str), \
Chenbo Feng6acc5c22017-03-22 17:27:35 -0700571 FN(get_socket_cookie), \
Daniel Borkmannded092c2017-06-11 00:50:47 +0200572 FN(get_socket_uid), \
573 FN(set_hash),
Thomas Grafebb676d2016-10-27 11:23:51 +0200574
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700575/* integer value in 'imm' field of BPF_CALL instruction selects which helper
576 * function eBPF program intends to call
577 */
Thomas Grafebb676d2016-10-27 11:23:51 +0200578#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700579enum bpf_func_id {
Thomas Grafebb676d2016-10-27 11:23:51 +0200580 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700581 __BPF_FUNC_MAX_ID,
582};
Thomas Grafebb676d2016-10-27 11:23:51 +0200583#undef __BPF_ENUM_FN
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700584
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100585/* All flags used by eBPF helper functions, placed here. */
586
587/* BPF_FUNC_skb_store_bytes flags. */
588#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
Daniel Borkmann8afd54c2016-03-04 15:15:03 +0100589#define BPF_F_INVALIDATE_HASH (1ULL << 1)
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100590
591/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
592 * First 4 bits are for passing the header field size.
593 */
594#define BPF_F_HDR_FIELD_MASK 0xfULL
595
596/* BPF_FUNC_l4_csum_replace flags. */
597#define BPF_F_PSEUDO_HDR (1ULL << 4)
Daniel Borkmann2f729592016-02-19 23:05:26 +0100598#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
Daniel Borkmannd1b662a2017-01-24 01:06:28 +0100599#define BPF_F_MARK_ENFORCE (1ULL << 6)
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100600
601/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
602#define BPF_F_INGRESS (1ULL << 0)
603
Daniel Borkmannc6c33452016-01-11 01:16:39 +0100604/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
605#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
606
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -0800607/* BPF_FUNC_get_stackid flags. */
608#define BPF_F_SKIP_FIELD_MASK 0xffULL
609#define BPF_F_USER_STACK (1ULL << 8)
610#define BPF_F_FAST_STACK_CMP (1ULL << 9)
611#define BPF_F_REUSE_STACKID (1ULL << 10)
612
Daniel Borkmann2da897e2016-02-23 02:05:26 +0100613/* BPF_FUNC_skb_set_tunnel_key flags. */
614#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
Daniel Borkmann22080872016-03-04 15:15:05 +0100615#define BPF_F_DONT_FRAGMENT (1ULL << 2)
Daniel Borkmann2da897e2016-02-23 02:05:26 +0100616
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200617/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
Daniel Borkmann1e337592016-04-18 21:01:23 +0200618#define BPF_F_INDEX_MASK 0xffffffffULL
619#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200620/* BPF_FUNC_perf_event_output for sk_buff input context. */
621#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
Daniel Borkmann1e337592016-04-18 21:01:23 +0200622
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700623/* user accessible mirror of in-kernel sk_buff.
624 * new fields can only be added to the end of this structure
625 */
626struct __sk_buff {
627 __u32 len;
628 __u32 pkt_type;
629 __u32 mark;
630 __u32 queue_mapping;
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700631 __u32 protocol;
632 __u32 vlan_present;
633 __u32 vlan_tci;
Michal Sekletar27cd5452015-03-24 14:48:41 +0100634 __u32 vlan_proto;
Daniel Borkmannbcad5712015-04-03 20:52:24 +0200635 __u32 priority;
Alexei Starovoitov37e82c22015-05-27 15:30:39 -0700636 __u32 ingress_ifindex;
637 __u32 ifindex;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700638 __u32 tc_index;
639 __u32 cb[5];
Daniel Borkmannba7591d2015-08-01 00:46:29 +0200640 __u32 hash;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700641 __u32 tc_classid;
Alexei Starovoitov969bf052016-05-05 19:49:10 -0700642 __u32 data;
643 __u32 data_end;
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200644 __u32 napi_id;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700645};
646
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700647struct bpf_tunnel_key {
648 __u32 tunnel_id;
Daniel Borkmannc6c33452016-01-11 01:16:39 +0100649 union {
650 __u32 remote_ipv4;
651 __u32 remote_ipv6[4];
652 };
653 __u8 tunnel_tos;
654 __u8 tunnel_ttl;
Daniel Borkmannc0e760c2016-03-30 00:02:00 +0200655 __u16 tunnel_ext;
Daniel Borkmann4018ab12016-03-09 03:00:05 +0100656 __u32 tunnel_label;
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700657};
658
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100659/* Generic BPF return codes which all BPF program types may support.
660 * The values are binary compatible with their TC_ACT_* counter-part to
661 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
662 * programs.
663 *
664 * XDP is handled seprately, see XDP_*.
665 */
666enum bpf_ret_code {
667 BPF_OK = 0,
668 /* 1 reserved */
669 BPF_DROP = 2,
670 /* 3-6 reserved */
671 BPF_REDIRECT = 7,
672 /* >127 are reserved for prog type specific return codes */
673};
674
David Ahern610236582016-12-01 08:48:04 -0800675struct bpf_sock {
676 __u32 bound_dev_if;
David Ahernaa4c1032016-12-01 08:48:06 -0800677 __u32 family;
678 __u32 type;
679 __u32 protocol;
David Ahern610236582016-12-01 08:48:04 -0800680};
681
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800682#define XDP_PACKET_HEADROOM 256
683
Brenden Blanco6a773a12016-07-19 12:16:47 -0700684/* User return codes for XDP prog type.
685 * A valid XDP program must return one of these defined values. All other
686 * return codes are reserved for future use. Unknown return codes will result
687 * in packet drop.
688 */
689enum xdp_action {
690 XDP_ABORTED = 0,
691 XDP_DROP,
692 XDP_PASS,
Brenden Blanco6ce96ca2016-07-19 12:16:53 -0700693 XDP_TX,
Brenden Blanco6a773a12016-07-19 12:16:47 -0700694};
695
696/* user accessible metadata for XDP packet hook
697 * new fields must be added to the end of this structure
698 */
699struct xdp_md {
700 __u32 data;
701 __u32 data_end;
702};
703
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700704#define BPF_TAG_SIZE 8
705
706struct bpf_prog_info {
707 __u32 type;
708 __u32 id;
709 __u8 tag[BPF_TAG_SIZE];
710 __u32 jited_prog_len;
711 __u32 xlated_prog_len;
712 __aligned_u64 jited_prog_insns;
713 __aligned_u64 xlated_prog_insns;
714} __attribute__((aligned(8)));
715
716struct bpf_map_info {
717 __u32 type;
718 __u32 id;
719 __u32 key_size;
720 __u32 value_size;
721 __u32 max_entries;
722 __u32 map_flags;
723} __attribute__((aligned(8)));
724
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700725/* User bpf_sock_ops struct to access socket values and specify request ops
726 * and their replies.
727 * Some of this fields are in network (bigendian) byte order and may need
728 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
729 * New fields can only be added at the end of this structure
730 */
731struct bpf_sock_ops {
732 __u32 op;
733 union {
734 __u32 reply;
735 __u32 replylong[4];
736 };
737 __u32 family;
738 __u32 remote_ip4; /* Stored in network byte order */
739 __u32 local_ip4; /* Stored in network byte order */
740 __u32 remote_ip6[4]; /* Stored in network byte order */
741 __u32 local_ip6[4]; /* Stored in network byte order */
742 __u32 remote_port; /* Stored in network byte order */
743 __u32 local_port; /* stored in host byte order */
744};
745
746/* List of known BPF sock_ops operators.
747 * New entries can only be added at the end
748 */
749enum {
750 BPF_SOCK_OPS_VOID,
751};
752
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -0700753#endif /* _UAPI__LINUX_BPF_H__ */