blob: e6679393b6877801651640d66857c4899c41beea [file] [log] [blame]
Ingo Molnarfb7df122017-11-03 12:18:37 +01001/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -03002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
12#include <linux/bpf_common.h>
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_ALU64 0x07 /* alu mode in double word width */
18
19/* ld/ldx fields */
Lawrence Brakmod6d4f602018-01-25 16:14:16 -080020#define BPF_DW 0x18 /* double word (64-bit) */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030021#define BPF_XADD 0xc0 /* exclusive add */
22
23/* alu/jmp fields */
24#define BPF_MOV 0xb0 /* mov reg to reg */
25#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
26
27/* change endianness of a register */
28#define BPF_END 0xd0 /* flags for endianness conversion: */
29#define BPF_TO_LE 0x00 /* convert to little-endian */
30#define BPF_TO_BE 0x08 /* convert to big-endian */
31#define BPF_FROM_LE BPF_TO_LE
32#define BPF_FROM_BE BPF_TO_BE
33
Daniel Borkmann92b31a92017-08-10 01:39:55 +020034/* jmp encodings */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030035#define BPF_JNE 0x50 /* jump != */
Daniel Borkmann92b31a92017-08-10 01:39:55 +020036#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
37#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030038#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
39#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
Daniel Borkmann92b31a92017-08-10 01:39:55 +020040#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
41#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030042#define BPF_CALL 0x80 /* function call */
43#define BPF_EXIT 0x90 /* function return */
44
45/* Register numbers */
46enum {
47 BPF_REG_0 = 0,
48 BPF_REG_1,
49 BPF_REG_2,
50 BPF_REG_3,
51 BPF_REG_4,
52 BPF_REG_5,
53 BPF_REG_6,
54 BPF_REG_7,
55 BPF_REG_8,
56 BPF_REG_9,
57 BPF_REG_10,
58 __MAX_BPF_REG,
59};
60
61/* BPF has 10 general purpose 64-bit registers and stack frame. */
62#define MAX_BPF_REG __MAX_BPF_REG
63
64struct bpf_insn {
65 __u8 code; /* opcode */
66 __u8 dst_reg:4; /* dest register */
67 __u8 src_reg:4; /* source register */
68 __s16 off; /* signed offset */
69 __s32 imm; /* signed immediate constant */
70};
71
Mickaël Salaün9a738262017-02-10 00:21:35 +010072/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73struct bpf_lpm_trie_key {
74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
75 __u8 data[0]; /* Arbitrary size */
76};
77
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030078/* BPF syscall commands, see bpf(2) man-page for details. */
79enum bpf_cmd {
80 BPF_MAP_CREATE,
81 BPF_MAP_LOOKUP_ELEM,
82 BPF_MAP_UPDATE_ELEM,
83 BPF_MAP_DELETE_ELEM,
84 BPF_MAP_GET_NEXT_KEY,
85 BPF_PROG_LOAD,
86 BPF_OBJ_PIN,
87 BPF_OBJ_GET,
Joe Stringer0cb34dc2016-12-08 18:46:14 -080088 BPF_PROG_ATTACH,
89 BPF_PROG_DETACH,
Alexei Starovoitov30848872017-03-30 21:45:39 -070090 BPF_PROG_TEST_RUN,
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -070091 BPF_PROG_GET_NEXT_ID,
92 BPF_MAP_GET_NEXT_ID,
93 BPF_PROG_GET_FD_BY_ID,
94 BPF_MAP_GET_FD_BY_ID,
95 BPF_OBJ_GET_INFO_BY_FD,
Alexei Starovoitovdefd9c42017-10-02 22:50:26 -070096 BPF_PROG_QUERY,
Alexei Starovoitova0fe3e52018-03-28 12:05:38 -070097 BPF_RAW_TRACEPOINT_OPEN,
Martin KaFai Lau3bd86a82018-04-18 15:56:04 -070098 BPF_BTF_LOAD,
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -030099};
100
101enum bpf_map_type {
102 BPF_MAP_TYPE_UNSPEC,
103 BPF_MAP_TYPE_HASH,
104 BPF_MAP_TYPE_ARRAY,
105 BPF_MAP_TYPE_PROG_ARRAY,
106 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
107 BPF_MAP_TYPE_PERCPU_HASH,
108 BPF_MAP_TYPE_PERCPU_ARRAY,
109 BPF_MAP_TYPE_STACK_TRACE,
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -0300110 BPF_MAP_TYPE_CGROUP_ARRAY,
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800111 BPF_MAP_TYPE_LRU_HASH,
112 BPF_MAP_TYPE_LRU_PERCPU_HASH,
Mickaël Salaün9a738262017-02-10 00:21:35 +0100113 BPF_MAP_TYPE_LPM_TRIE,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700114 BPF_MAP_TYPE_ARRAY_OF_MAPS,
115 BPF_MAP_TYPE_HASH_OF_MAPS,
John Fastabend81f6bf82017-07-26 17:32:07 -0700116 BPF_MAP_TYPE_DEVMAP,
John Fastabend69e8cc12017-08-15 22:33:32 -0700117 BPF_MAP_TYPE_SOCKMAP,
Jesper Dangaard Brouer6710e112017-10-16 12:19:28 +0200118 BPF_MAP_TYPE_CPUMAP,
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300119};
120
121enum bpf_prog_type {
122 BPF_PROG_TYPE_UNSPEC,
123 BPF_PROG_TYPE_SOCKET_FILTER,
124 BPF_PROG_TYPE_KPROBE,
125 BPF_PROG_TYPE_SCHED_CLS,
126 BPF_PROG_TYPE_SCHED_ACT,
127 BPF_PROG_TYPE_TRACEPOINT,
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -0300128 BPF_PROG_TYPE_XDP,
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800129 BPF_PROG_TYPE_PERF_EVENT,
130 BPF_PROG_TYPE_CGROUP_SKB,
131 BPF_PROG_TYPE_CGROUP_SOCK,
132 BPF_PROG_TYPE_LWT_IN,
133 BPF_PROG_TYPE_LWT_OUT,
134 BPF_PROG_TYPE_LWT_XMIT,
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700135 BPF_PROG_TYPE_SOCK_OPS,
John Fastabend69e8cc12017-08-15 22:33:32 -0700136 BPF_PROG_TYPE_SK_SKB,
Roman Gushchinebc614f2017-11-05 08:15:32 -0500137 BPF_PROG_TYPE_CGROUP_DEVICE,
John Fastabend82a86162018-03-18 12:57:31 -0700138 BPF_PROG_TYPE_SK_MSG,
Alexei Starovoitova0fe3e52018-03-28 12:05:38 -0700139 BPF_PROG_TYPE_RAW_TRACEPOINT,
Andrey Ignatove50b0a62018-03-30 15:08:03 -0700140 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300141};
142
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800143enum bpf_attach_type {
144 BPF_CGROUP_INET_INGRESS,
145 BPF_CGROUP_INET_EGRESS,
146 BPF_CGROUP_INET_SOCK_CREATE,
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700147 BPF_CGROUP_SOCK_OPS,
John Fastabend464bc0f2017-08-28 07:10:04 -0700148 BPF_SK_SKB_STREAM_PARSER,
149 BPF_SK_SKB_STREAM_VERDICT,
Roman Gushchinebc614f2017-11-05 08:15:32 -0500150 BPF_CGROUP_DEVICE,
John Fastabend82a86162018-03-18 12:57:31 -0700151 BPF_SK_MSG_VERDICT,
Andrey Ignatove50b0a62018-03-30 15:08:03 -0700152 BPF_CGROUP_INET4_BIND,
153 BPF_CGROUP_INET6_BIND,
Andrey Ignatov622adaf2018-03-30 15:08:06 -0700154 BPF_CGROUP_INET4_CONNECT,
155 BPF_CGROUP_INET6_CONNECT,
Andrey Ignatov1d436882018-03-30 15:08:08 -0700156 BPF_CGROUP_INET4_POST_BIND,
157 BPF_CGROUP_INET6_POST_BIND,
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800158 __MAX_BPF_ATTACH_TYPE
159};
160
161#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
162
Alexei Starovoitovdefd9c42017-10-02 22:50:26 -0700163/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
164 *
165 * NONE(default): No further bpf programs allowed in the subtree.
166 *
167 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
168 * the program in this cgroup yields to sub-cgroup program.
169 *
170 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
171 * that cgroup program gets run in addition to the program in this cgroup.
172 *
173 * Only one program is allowed to be attached to a cgroup with
174 * NONE or BPF_F_ALLOW_OVERRIDE flag.
175 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
176 * release old program and attach the new one. Attach flags has to match.
177 *
178 * Multiple programs are allowed to be attached to a cgroup with
179 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
180 * (those that were attached first, run first)
181 * The programs of sub-cgroup are executed first, then programs of
182 * this cgroup and then programs of parent cgroup.
183 * When children program makes decision (like picking TCP CA or sock bind)
184 * parent program has a chance to override it.
185 *
186 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
187 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
188 * Ex1:
189 * cgrp1 (MULTI progs A, B) ->
190 * cgrp2 (OVERRIDE prog C) ->
191 * cgrp3 (MULTI prog D) ->
192 * cgrp4 (OVERRIDE prog E) ->
193 * cgrp5 (NONE prog F)
194 * the event in cgrp5 triggers execution of F,D,A,B in that order.
195 * if prog F is detached, the execution is E,D,A,B
196 * if prog F and D are detached, the execution is E,A,B
197 * if prog F, E and D are detached, the execution is C,A,B
198 *
199 * All eligible programs are executed regardless of return code from
200 * earlier programs.
Stephen Rothwell5463b3d2017-02-14 08:22:20 +1100201 */
202#define BPF_F_ALLOW_OVERRIDE (1U << 0)
Alexei Starovoitovdefd9c42017-10-02 22:50:26 -0700203#define BPF_F_ALLOW_MULTI (1U << 1)
Stephen Rothwell5463b3d2017-02-14 08:22:20 +1100204
David S. Millere07b98d2017-05-10 11:38:07 -0700205/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
206 * verifier will perform strict alignment checking as if the kernel
207 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
208 * and NET_IP_ALIGN defined to 2.
209 */
210#define BPF_F_STRICT_ALIGNMENT (1U << 0)
211
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800212/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300213#define BPF_PSEUDO_MAP_FD 1
214
Alexei Starovoitov48cca7e2017-12-14 17:55:10 -0800215/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
216 * offset to another bpf function
217 */
218#define BPF_PSEUDO_CALL 1
219
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300220/* flags for BPF_MAP_UPDATE_ELEM command */
221#define BPF_ANY 0 /* create new element or update existing */
222#define BPF_NOEXIST 1 /* create new element if it didn't exist */
223#define BPF_EXIST 2 /* update existing element */
224
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700225/* flags for BPF_MAP_CREATE command */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300226#define BPF_F_NO_PREALLOC (1U << 0)
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800227/* Instead of having one common LRU list in the
228 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
229 * which can scale and perform better.
230 * Note, the LRU nodes (including free nodes) cannot be moved
231 * across different LRU lists.
232 */
233#define BPF_F_NO_COMMON_LRU (1U << 1)
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700234/* Specify numa node during map creation */
235#define BPF_F_NUMA_NODE (1U << 2)
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300236
Alexei Starovoitovdefd9c42017-10-02 22:50:26 -0700237/* flags for BPF_PROG_QUERY */
238#define BPF_F_QUERY_EFFECTIVE (1U << 0)
239
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -0700240#define BPF_OBJ_NAME_LEN 16U
241
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700242/* Flags for accessing BPF object */
243#define BPF_F_RDONLY (1U << 3)
244#define BPF_F_WRONLY (1U << 4)
245
Song Liu81f77fd2018-03-14 10:23:22 -0700246/* Flag for stack_map, store build_id+offset instead of pointer */
247#define BPF_F_STACK_BUILD_ID (1U << 5)
248
249enum bpf_stack_build_id_status {
250 /* user space need an empty entry to identify end of a trace */
251 BPF_STACK_BUILD_ID_EMPTY = 0,
252 /* with valid build_id and offset */
253 BPF_STACK_BUILD_ID_VALID = 1,
254 /* couldn't get build_id, fallback to ip */
255 BPF_STACK_BUILD_ID_IP = 2,
256};
257
258#define BPF_BUILD_ID_SIZE 20
259struct bpf_stack_build_id {
260 __s32 status;
261 unsigned char build_id[BPF_BUILD_ID_SIZE];
262 union {
263 __u64 offset;
264 __u64 ip;
265 };
266};
267
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300268union bpf_attr {
269 struct { /* anonymous struct used by BPF_MAP_CREATE command */
270 __u32 map_type; /* one of enum bpf_map_type */
271 __u32 key_size; /* size of key in bytes */
272 __u32 value_size; /* size of value in bytes */
273 __u32 max_entries; /* max number of entries in a map */
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700274 __u32 map_flags; /* BPF_MAP_CREATE related
275 * flags defined above.
276 */
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -0700277 __u32 inner_map_fd; /* fd pointing to the inner map */
Martin KaFai Lauad17d0e2017-08-18 11:28:01 -0700278 __u32 numa_node; /* numa node (effective only if
279 * BPF_F_NUMA_NODE is set).
280 */
Martin KaFai Lau067cae42017-10-05 21:52:12 -0700281 char map_name[BPF_OBJ_NAME_LEN];
Jakub Kicinskia3884572018-01-11 20:29:09 -0800282 __u32 map_ifindex; /* ifindex of netdev to create on */
Martin KaFai Lau3bd86a82018-04-18 15:56:04 -0700283 __u32 btf_fd; /* fd pointing to a BTF type data */
284 __u32 btf_key_id; /* BTF type_id of the key */
285 __u32 btf_value_id; /* BTF type_id of the value */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300286 };
287
288 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
289 __u32 map_fd;
290 __aligned_u64 key;
291 union {
292 __aligned_u64 value;
293 __aligned_u64 next_key;
294 };
295 __u64 flags;
296 };
297
298 struct { /* anonymous struct used by BPF_PROG_LOAD command */
299 __u32 prog_type; /* one of enum bpf_prog_type */
300 __u32 insn_cnt;
301 __aligned_u64 insns;
302 __aligned_u64 license;
303 __u32 log_level; /* verbosity level of verifier */
304 __u32 log_size; /* size of user buffer */
305 __aligned_u64 log_buf; /* user supplied buffer */
306 __u32 kern_version; /* checked when prog_type=kprobe */
David S. Millere07b98d2017-05-10 11:38:07 -0700307 __u32 prog_flags;
Martin KaFai Lau067cae42017-10-05 21:52:12 -0700308 char prog_name[BPF_OBJ_NAME_LEN];
Jakub Kicinski1f6f4cb2017-11-20 15:21:53 -0800309 __u32 prog_ifindex; /* ifindex of netdev to prep for */
Andrey Ignatovd7be1432018-03-30 15:08:01 -0700310 /* For some prog types expected attach type must be known at
311 * load time to verify attach type specific parts of prog
312 * (context accesses, allowed helpers, etc).
313 */
314 __u32 expected_attach_type;
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300315 };
316
317 struct { /* anonymous struct used by BPF_OBJ_* commands */
318 __aligned_u64 pathname;
319 __u32 bpf_fd;
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700320 __u32 file_flags;
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300321 };
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800322
323 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
324 __u32 target_fd; /* container object to attach to */
325 __u32 attach_bpf_fd; /* eBPF program to attach */
326 __u32 attach_type;
Stephen Rothwell5463b3d2017-02-14 08:22:20 +1100327 __u32 attach_flags;
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800328 };
Alexei Starovoitov30848872017-03-30 21:45:39 -0700329
330 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
331 __u32 prog_fd;
332 __u32 retval;
333 __u32 data_size_in;
334 __u32 data_size_out;
335 __aligned_u64 data_in;
336 __aligned_u64 data_out;
337 __u32 repeat;
338 __u32 duration;
339 } test;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700340
341 struct { /* anonymous struct used by BPF_*_GET_*_ID */
342 union {
343 __u32 start_id;
344 __u32 prog_id;
345 __u32 map_id;
346 };
347 __u32 next_id;
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700348 __u32 open_flags;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -0700349 };
350
351 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
352 __u32 bpf_fd;
353 __u32 info_len;
354 __aligned_u64 info;
355 } info;
Alexei Starovoitovdefd9c42017-10-02 22:50:26 -0700356
357 struct { /* anonymous struct used by BPF_PROG_QUERY command */
358 __u32 target_fd; /* container object to query */
359 __u32 attach_type;
360 __u32 query_flags;
361 __u32 attach_flags;
362 __aligned_u64 prog_ids;
363 __u32 prog_cnt;
364 } query;
Alexei Starovoitova0fe3e52018-03-28 12:05:38 -0700365
366 struct {
367 __u64 name;
368 __u32 prog_fd;
369 } raw_tracepoint;
Martin KaFai Lau3bd86a82018-04-18 15:56:04 -0700370
371 struct { /* anonymous struct for BPF_BTF_LOAD */
372 __aligned_u64 btf;
373 __aligned_u64 btf_log_buf;
374 __u32 btf_size;
375 __u32 btf_log_size;
376 __u32 btf_log_level;
377 };
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300378} __attribute__((aligned(8)));
379
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800380/* BPF helper function descriptions:
381 *
382 * void *bpf_map_lookup_elem(&map, &key)
383 * Return: Map value or NULL
384 *
385 * int bpf_map_update_elem(&map, &key, &value, flags)
386 * Return: 0 on success or negative error
387 *
388 * int bpf_map_delete_elem(&map, &key)
389 * Return: 0 on success or negative error
390 *
391 * int bpf_probe_read(void *dst, int size, void *src)
392 * Return: 0 on success or negative error
393 *
394 * u64 bpf_ktime_get_ns(void)
395 * Return: current ktime
396 *
397 * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
398 * Return: length of buffer written or negative error
399 *
400 * u32 bpf_prandom_u32(void)
401 * Return: random value
402 *
403 * u32 bpf_raw_smp_processor_id(void)
404 * Return: SMP processor ID
405 *
406 * int bpf_skb_store_bytes(skb, offset, from, len, flags)
407 * store bytes into packet
408 * @skb: pointer to skb
409 * @offset: offset within packet from skb->mac_header
410 * @from: pointer where to copy bytes from
411 * @len: number of bytes to store into packet
412 * @flags: bit 0 - if true, recompute skb->csum
413 * other bits - reserved
414 * Return: 0 on success or negative error
415 *
416 * int bpf_l3_csum_replace(skb, offset, from, to, flags)
417 * recompute IP checksum
418 * @skb: pointer to skb
419 * @offset: offset within packet where IP checksum is located
420 * @from: old value of header field
421 * @to: new value of header field
422 * @flags: bits 0-3 - size of header field
423 * other bits - reserved
424 * Return: 0 on success or negative error
425 *
426 * int bpf_l4_csum_replace(skb, offset, from, to, flags)
427 * recompute TCP/UDP checksum
428 * @skb: pointer to skb
429 * @offset: offset within packet where TCP/UDP checksum is located
430 * @from: old value of header field
431 * @to: new value of header field
432 * @flags: bits 0-3 - size of header field
433 * bit 4 - is pseudo header
434 * other bits - reserved
435 * Return: 0 on success or negative error
436 *
437 * int bpf_tail_call(ctx, prog_array_map, index)
438 * jump into another BPF program
439 * @ctx: context pointer passed to next program
440 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
Arnaldo Carvalho de Meloaa7b4e02017-10-09 15:55:45 -0300441 * @index: 32-bit index inside array that selects specific program to run
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800442 * Return: 0 on success or negative error
443 *
444 * int bpf_clone_redirect(skb, ifindex, flags)
445 * redirect to another netdev
446 * @skb: pointer to skb
447 * @ifindex: ifindex of the net device
448 * @flags: bit 0 - if set, redirect to ingress instead of egress
449 * other bits - reserved
450 * Return: 0 on success or negative error
451 *
452 * u64 bpf_get_current_pid_tgid(void)
453 * Return: current->tgid << 32 | current->pid
454 *
455 * u64 bpf_get_current_uid_gid(void)
456 * Return: current_gid << 32 | current_uid
457 *
458 * int bpf_get_current_comm(char *buf, int size_of_buf)
459 * stores current->comm into buf
460 * Return: 0 on success or negative error
461 *
462 * u32 bpf_get_cgroup_classid(skb)
463 * retrieve a proc's classid
464 * @skb: pointer to skb
465 * Return: classid if != 0
466 *
467 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
468 * Return: 0 on success or negative error
469 *
470 * int bpf_skb_vlan_pop(skb)
471 * Return: 0 on success or negative error
472 *
473 * int bpf_skb_get_tunnel_key(skb, key, size, flags)
474 * int bpf_skb_set_tunnel_key(skb, key, size, flags)
475 * retrieve or populate tunnel metadata
476 * @skb: pointer to skb
477 * @key: pointer to 'struct bpf_tunnel_key'
478 * @size: size of 'struct bpf_tunnel_key'
479 * @flags: room for future extensions
480 * Return: 0 on success or negative error
481 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700482 * u64 bpf_perf_event_read(map, flags)
483 * read perf event counter value
484 * @map: pointer to perf_event_array map
485 * @flags: index of event in the map or bitmask flags
486 * Return: value of perf event counter read or error code
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800487 *
488 * int bpf_redirect(ifindex, flags)
489 * redirect to another netdev
490 * @ifindex: ifindex of the net device
Daniel Borkmannac299912017-09-25 02:25:52 +0200491 * @flags:
492 * cls_bpf:
493 * bit 0 - if set, redirect to ingress instead of egress
494 * other bits - reserved
495 * xdp_bpf:
496 * all bits - reserved
497 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
498 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
499 * int bpf_redirect_map(map, key, flags)
500 * redirect to endpoint in map
501 * @map: pointer to dev map
502 * @key: index in map to lookup
503 * @flags: --
504 * Return: XDP_REDIRECT on success or XDP_ABORT on error
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800505 *
506 * u32 bpf_get_route_realm(skb)
507 * retrieve a dst's tclassid
508 * @skb: pointer to skb
509 * Return: realm if != 0
510 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700511 * int bpf_perf_event_output(ctx, map, flags, data, size)
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800512 * output perf raw sample
513 * @ctx: struct pt_regs*
514 * @map: pointer to perf_event_array map
Teng Qinb7d3ed52017-06-02 21:03:54 -0700515 * @flags: index of event in the map or bitmask flags
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800516 * @data: data on stack to be output as raw data
517 * @size: size of data
518 * Return: 0 on success or negative error
519 *
520 * int bpf_get_stackid(ctx, map, flags)
521 * walk user or kernel stack and return id
522 * @ctx: struct pt_regs*
523 * @map: pointer to stack_trace map
524 * @flags: bits 0-7 - numer of stack frames to skip
525 * bit 8 - collect user stack instead of kernel
526 * bit 9 - compare stacks by hash only
527 * bit 10 - if two different stacks hash into the same stackid
528 * discard old
529 * other bits - reserved
530 * Return: >= 0 stackid on success or negative error
531 *
532 * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
533 * calculate csum diff
534 * @from: raw from buffer
535 * @from_size: length of from buffer
536 * @to: raw to buffer
537 * @to_size: length of to buffer
538 * @seed: optional seed
539 * Return: csum result or negative error code
540 *
541 * int bpf_skb_get_tunnel_opt(skb, opt, size)
542 * retrieve tunnel options metadata
543 * @skb: pointer to skb
544 * @opt: pointer to raw tunnel option data
545 * @size: size of @opt
546 * Return: option size
547 *
548 * int bpf_skb_set_tunnel_opt(skb, opt, size)
549 * populate tunnel options metadata
550 * @skb: pointer to skb
551 * @opt: pointer to raw tunnel option data
552 * @size: size of @opt
553 * Return: 0 on success or negative error
554 *
555 * int bpf_skb_change_proto(skb, proto, flags)
556 * Change protocol of the skb. Currently supported is v4 -> v6,
557 * v6 -> v4 transitions. The helper will also resize the skb. eBPF
558 * program is expected to fill the new headers via skb_store_bytes
559 * and lX_csum_replace.
560 * @skb: pointer to skb
561 * @proto: new skb->protocol type
562 * @flags: reserved
563 * Return: 0 on success or negative error
564 *
565 * int bpf_skb_change_type(skb, type)
566 * Change packet type of skb.
567 * @skb: pointer to skb
568 * @type: new skb->pkt_type type
569 * Return: 0 on success or negative error
570 *
571 * int bpf_skb_under_cgroup(skb, map, index)
572 * Check cgroup2 membership of skb
573 * @skb: pointer to skb
574 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
575 * @index: index of the cgroup in the bpf_map
576 * Return:
577 * == 0 skb failed the cgroup2 descendant test
578 * == 1 skb succeeded the cgroup2 descendant test
579 * < 0 error
580 *
581 * u32 bpf_get_hash_recalc(skb)
582 * Retrieve and possibly recalculate skb->hash.
583 * @skb: pointer to skb
584 * Return: hash
585 *
586 * u64 bpf_get_current_task(void)
587 * Returns current task_struct
588 * Return: current
589 *
590 * int bpf_probe_write_user(void *dst, void *src, int len)
591 * safely attempt to write to a location
592 * @dst: destination address in userspace
593 * @src: source address on stack
594 * @len: number of bytes to copy
595 * Return: 0 on success or negative error
596 *
597 * int bpf_current_task_under_cgroup(map, index)
598 * Check cgroup2 membership of current task
599 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
600 * @index: index of the cgroup in the bpf_map
601 * Return:
602 * == 0 current failed the cgroup2 descendant test
603 * == 1 current succeeded the cgroup2 descendant test
604 * < 0 error
605 *
606 * int bpf_skb_change_tail(skb, len, flags)
607 * The helper will resize the skb to the given new size, to be used f.e.
608 * with control messages.
609 * @skb: pointer to skb
610 * @len: new skb length
611 * @flags: reserved
612 * Return: 0 on success or negative error
613 *
614 * int bpf_skb_pull_data(skb, len)
615 * The helper will pull in non-linear data in case the skb is non-linear
616 * and not all of len are part of the linear section. Only needed for
617 * read/write with direct packet access.
618 * @skb: pointer to skb
619 * @len: len to make read/writeable
620 * Return: 0 on success or negative error
621 *
622 * s64 bpf_csum_update(skb, csum)
623 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
624 * @skb: pointer to skb
625 * @csum: csum to add
626 * Return: csum on success or negative error
627 *
628 * void bpf_set_hash_invalid(skb)
629 * Invalidate current skb->hash.
630 * @skb: pointer to skb
631 *
632 * int bpf_get_numa_node_id()
633 * Return: Id of current NUMA node.
634 *
635 * int bpf_skb_change_head()
636 * Grows headroom of skb and adjusts MAC header offset accordingly.
637 * Will extends/reallocae as required automatically.
638 * May change skb data pointer and will thus invalidate any check
639 * performed for direct packet access.
640 * @skb: pointer to skb
641 * @len: length of header to be pushed in front
642 * @flags: Flags (unused for now)
643 * Return: 0 on success or negative error
644 *
645 * int bpf_xdp_adjust_head(xdp_md, delta)
646 * Adjust the xdp_md.data by delta
647 * @xdp_md: pointer to xdp_md
648 * @delta: An positive/negative integer to be added to xdp_md.data
649 * Return: 0 on success or negative on error
Mickaël Salaün9a738262017-02-10 00:21:35 +0100650 *
651 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
652 * Copy a NUL terminated string from unsafe address. In case the string
653 * length is smaller than size, the target is not padded with further NUL
654 * bytes. In case the string length is larger than size, just count-1
655 * bytes are copied and the last byte is set to NUL.
656 * @dst: destination address
657 * @size: maximum number of bytes to copy, including the trailing NUL
658 * @unsafe_ptr: unsafe address
659 * Return:
660 * > 0 length of the string including the trailing NUL on success
661 * < 0 error
Alexei Starovoitov30848872017-03-30 21:45:39 -0700662 *
Alexander Alemayhu3c60a532017-04-08 22:08:10 +0200663 * u64 bpf_get_socket_cookie(skb)
Alexei Starovoitov30848872017-03-30 21:45:39 -0700664 * Get the cookie for the socket stored inside sk_buff.
665 * @skb: pointer to skb
666 * Return: 8 Bytes non-decreasing number on success or 0 if the socket
667 * field is missing inside sk_buff
668 *
669 * u32 bpf_get_socket_uid(skb)
670 * Get the owner uid of the socket stored inside sk_buff.
671 * @skb: pointer to skb
David S. Millere07b98d2017-05-10 11:38:07 -0700672 * Return: uid of the socket owner on success or overflowuid if failed.
Daniel Borkmannded092c2017-06-11 00:50:47 +0200673 *
674 * u32 bpf_set_hash(skb, hash)
675 * Set full skb->hash.
676 * @skb: pointer to skb
677 * @hash: hash to set
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700678 *
679 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
680 * Calls setsockopt. Not all opts are available, only those with
681 * integer optvals plus TCP_CONGESTION.
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700682 * Supported levels: SOL_SOCKET and IPPROTO_TCP
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700683 * @bpf_socket: pointer to bpf_socket
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700684 * @level: SOL_SOCKET or IPPROTO_TCP
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700685 * @optname: option name
686 * @optval: pointer to option value
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700687 * @optlen: length of optval in bytes
688 * Return: 0 or negative error
689 *
690 * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
691 * Calls getsockopt. Not all opts are available.
692 * Supported levels: IPPROTO_TCP
693 * @bpf_socket: pointer to bpf_socket
694 * @level: IPPROTO_TCP
695 * @optname: option name
696 * @optval: pointer to option value
697 * @optlen: length of optval in bytes
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700698 * Return: 0 or negative error
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200699 *
Lawrence Brakmod6d4f602018-01-25 16:14:16 -0800700 * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
701 * Set callback flags for sock_ops
702 * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
703 * @flags: flags value
704 * Return: 0 for no error
705 * -EINVAL if there is no full tcp socket
706 * bits in flags that are not supported by current kernel
707 *
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200708 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
709 * Grow or shrink room in sk_buff.
710 * @skb: pointer to skb
711 * @len_diff: (signed) amount of room to grow/shrink
712 * @mode: operation mode (enum bpf_adj_room_mode)
713 * @flags: reserved for future use
714 * Return: 0 on success or negative error code
John Fastabend69e8cc12017-08-15 22:33:32 -0700715 *
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700716 * int bpf_sk_redirect_map(map, key, flags)
John Fastabend69e8cc12017-08-15 22:33:32 -0700717 * Redirect skb to a sock in map using key as a lookup key for the
718 * sock in map.
719 * @map: pointer to sockmap
720 * @key: key to lookup sock in map
721 * @flags: reserved for future use
John Fastabend04686ef2017-10-31 19:17:31 -0700722 * Return: SK_PASS
John Fastabend69e8cc12017-08-15 22:33:32 -0700723 *
John Fastabend464bc0f2017-08-28 07:10:04 -0700724 * int bpf_sock_map_update(skops, map, key, flags)
John Fastabend69e8cc12017-08-15 22:33:32 -0700725 * @skops: pointer to bpf_sock_ops
726 * @map: pointer to sockmap to update
727 * @key: key to insert/update sock in map
728 * @flags: same flags as map update elem
Daniel Borkmannac299912017-09-25 02:25:52 +0200729 *
730 * int bpf_xdp_adjust_meta(xdp_md, delta)
731 * Adjust the xdp_md.data_meta by delta
732 * @xdp_md: pointer to xdp_md
733 * @delta: An positive/negative integer to be added to xdp_md.data_meta
734 * Return: 0 on success or negative on error
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700735 *
736 * int bpf_perf_event_read_value(map, flags, buf, buf_size)
737 * read perf event counter value and perf event enabled/running time
738 * @map: pointer to perf_event_array map
739 * @flags: index of event in the map or bitmask flags
740 * @buf: buf to fill
741 * @buf_size: size of the buf
742 * Return: 0 on success or negative error code
743 *
744 * int bpf_perf_prog_read_value(ctx, buf, buf_size)
745 * read perf prog attached perf event counter and enabled/running time
746 * @ctx: pointer to ctx
747 * @buf: buf to fill
748 * @buf_size: size of the buf
749 * Return : 0 on success or negative error code
Josef Bacik965de872017-12-11 11:36:49 -0500750 *
751 * int bpf_override_return(pt_regs, rc)
752 * @pt_regs: pointer to struct pt_regs
753 * @rc: the return value to set
John Fastabend4c4c3c22018-03-18 12:57:41 -0700754 *
755 * int bpf_msg_redirect_map(map, key, flags)
756 * Redirect msg to a sock in map using key as a lookup key for the
757 * sock in map.
758 * @map: pointer to sockmap
759 * @key: key to lookup sock in map
760 * @flags: reserved for future use
761 * Return: SK_PASS
762 *
Andrey Ignatov622adaf2018-03-30 15:08:06 -0700763 * int bpf_bind(ctx, addr, addr_len)
764 * Bind socket to address. Only binding to IP is supported, no port can be
765 * set in addr.
766 * @ctx: pointer to context of type bpf_sock_addr
767 * @addr: pointer to struct sockaddr to bind socket to
768 * @addr_len: length of sockaddr structure
769 * Return: 0 on success or negative error code
Nikita V. Shirokov0367d0a2018-04-17 21:42:22 -0700770 *
771 * int bpf_xdp_adjust_tail(xdp_md, delta)
772 * Adjust the xdp_md.data_end by delta. Only shrinking of packet's
773 * size is supported.
774 * @xdp_md: pointer to xdp_md
775 * @delta: A negative integer to be added to xdp_md.data_end
776 * Return: 0 on success or negative on error
Eyal Birger29a36f92018-04-24 17:50:30 +0300777 *
778 * int bpf_skb_get_xfrm_state(skb, index, xfrm_state, size, flags)
779 * retrieve XFRM state
780 * @skb: pointer to skb
781 * @index: index of the xfrm state in the secpath
782 * @key: pointer to 'struct bpf_xfrm_state'
783 * @size: size of 'struct bpf_xfrm_state'
784 * @flags: room for future extensions
785 * Return: 0 on success or negative error
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800786 */
787#define __BPF_FUNC_MAPPER(FN) \
788 FN(unspec), \
789 FN(map_lookup_elem), \
790 FN(map_update_elem), \
791 FN(map_delete_elem), \
792 FN(probe_read), \
793 FN(ktime_get_ns), \
794 FN(trace_printk), \
795 FN(get_prandom_u32), \
796 FN(get_smp_processor_id), \
797 FN(skb_store_bytes), \
798 FN(l3_csum_replace), \
799 FN(l4_csum_replace), \
800 FN(tail_call), \
801 FN(clone_redirect), \
802 FN(get_current_pid_tgid), \
803 FN(get_current_uid_gid), \
804 FN(get_current_comm), \
805 FN(get_cgroup_classid), \
806 FN(skb_vlan_push), \
807 FN(skb_vlan_pop), \
808 FN(skb_get_tunnel_key), \
809 FN(skb_set_tunnel_key), \
810 FN(perf_event_read), \
811 FN(redirect), \
812 FN(get_route_realm), \
813 FN(perf_event_output), \
814 FN(skb_load_bytes), \
815 FN(get_stackid), \
816 FN(csum_diff), \
817 FN(skb_get_tunnel_opt), \
818 FN(skb_set_tunnel_opt), \
819 FN(skb_change_proto), \
820 FN(skb_change_type), \
821 FN(skb_under_cgroup), \
822 FN(get_hash_recalc), \
823 FN(get_current_task), \
824 FN(probe_write_user), \
825 FN(current_task_under_cgroup), \
826 FN(skb_change_tail), \
827 FN(skb_pull_data), \
828 FN(csum_update), \
829 FN(set_hash_invalid), \
830 FN(get_numa_node_id), \
831 FN(skb_change_head), \
Mickaël Salaün9a738262017-02-10 00:21:35 +0100832 FN(xdp_adjust_head), \
Chenbo Feng91b82702017-03-22 17:27:34 -0700833 FN(probe_read_str), \
Chenbo Feng6acc5c22017-03-22 17:27:35 -0700834 FN(get_socket_cookie), \
Daniel Borkmannded092c2017-06-11 00:50:47 +0200835 FN(get_socket_uid), \
Lawrence Brakmo04df41e2017-06-30 20:02:55 -0700836 FN(set_hash), \
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200837 FN(setsockopt), \
William Tu996139e2017-08-07 13:14:42 -0700838 FN(skb_adjust_room), \
John Fastabend69e8cc12017-08-15 22:33:32 -0700839 FN(redirect_map), \
840 FN(sk_redirect_map), \
Daniel Borkmannac299912017-09-25 02:25:52 +0200841 FN(sock_map_update), \
Yonghong Song020a32d2017-10-05 09:19:21 -0700842 FN(xdp_adjust_meta), \
Yonghong Song81b9cf82017-10-05 09:19:23 -0700843 FN(perf_event_read_value), \
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700844 FN(perf_prog_read_value), \
Josef Bacik965de872017-12-11 11:36:49 -0500845 FN(getsockopt), \
Lawrence Brakmod6d4f602018-01-25 16:14:16 -0800846 FN(override_return), \
John Fastabend4c4c3c22018-03-18 12:57:41 -0700847 FN(sock_ops_cb_flags_set), \
848 FN(msg_redirect_map), \
John Fastabend468b3fd2018-03-18 12:58:02 -0700849 FN(msg_apply_bytes), \
John Fastabend0dcbbf62018-03-18 12:58:12 -0700850 FN(msg_cork_bytes), \
Andrey Ignatov622adaf2018-03-30 15:08:06 -0700851 FN(msg_pull_data), \
Nikita V. Shirokov0367d0a2018-04-17 21:42:22 -0700852 FN(bind), \
Eyal Birger29a36f92018-04-24 17:50:30 +0300853 FN(xdp_adjust_tail), \
854 FN(skb_get_xfrm_state),
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800855
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300856/* integer value in 'imm' field of BPF_CALL instruction selects which helper
857 * function eBPF program intends to call
858 */
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800859#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300860enum bpf_func_id {
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800861 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300862 __BPF_FUNC_MAX_ID,
863};
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800864#undef __BPF_ENUM_FN
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300865
866/* All flags used by eBPF helper functions, placed here. */
867
868/* BPF_FUNC_skb_store_bytes flags. */
869#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
870#define BPF_F_INVALIDATE_HASH (1ULL << 1)
871
872/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
873 * First 4 bits are for passing the header field size.
874 */
875#define BPF_F_HDR_FIELD_MASK 0xfULL
876
877/* BPF_FUNC_l4_csum_replace flags. */
878#define BPF_F_PSEUDO_HDR (1ULL << 4)
879#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
Mickaël Salaün9a738262017-02-10 00:21:35 +0100880#define BPF_F_MARK_ENFORCE (1ULL << 6)
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300881
882/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
883#define BPF_F_INGRESS (1ULL << 0)
884
885/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
886#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
887
888/* BPF_FUNC_get_stackid flags. */
889#define BPF_F_SKIP_FIELD_MASK 0xffULL
890#define BPF_F_USER_STACK (1ULL << 8)
891#define BPF_F_FAST_STACK_CMP (1ULL << 9)
892#define BPF_F_REUSE_STACKID (1ULL << 10)
893
894/* BPF_FUNC_skb_set_tunnel_key flags. */
895#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
896#define BPF_F_DONT_FRAGMENT (1ULL << 2)
John Fastabend16962b22018-04-23 14:30:38 -0700897#define BPF_F_SEQ_NUMBER (1ULL << 3)
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300898
Alexei Starovoitove27afb82017-10-22 10:29:06 -0700899/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
900 * BPF_FUNC_perf_event_read_value flags.
901 */
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300902#define BPF_F_INDEX_MASK 0xffffffffULL
903#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -0300904/* BPF_FUNC_perf_event_output for sk_buff input context. */
905#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300906
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200907/* Mode for BPF_FUNC_skb_adjust_room helper. */
908enum bpf_adj_room_mode {
Arnaldo Carvalho de Melod62c1d72017-07-31 10:45:07 -0300909 BPF_ADJ_ROOM_NET,
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200910};
911
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300912/* user accessible mirror of in-kernel sk_buff.
913 * new fields can only be added to the end of this structure
914 */
915struct __sk_buff {
916 __u32 len;
917 __u32 pkt_type;
918 __u32 mark;
919 __u32 queue_mapping;
920 __u32 protocol;
921 __u32 vlan_present;
922 __u32 vlan_tci;
923 __u32 vlan_proto;
924 __u32 priority;
925 __u32 ingress_ifindex;
926 __u32 ifindex;
927 __u32 tc_index;
928 __u32 cb[5];
929 __u32 hash;
930 __u32 tc_classid;
931 __u32 data;
932 __u32 data_end;
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200933 __u32 napi_id;
John Fastabend69e8cc12017-08-15 22:33:32 -0700934
Daniel Borkmannac299912017-09-25 02:25:52 +0200935 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
John Fastabend69e8cc12017-08-15 22:33:32 -0700936 __u32 family;
937 __u32 remote_ip4; /* Stored in network byte order */
938 __u32 local_ip4; /* Stored in network byte order */
939 __u32 remote_ip6[4]; /* Stored in network byte order */
940 __u32 local_ip6[4]; /* Stored in network byte order */
941 __u32 remote_port; /* Stored in network byte order */
942 __u32 local_port; /* stored in host byte order */
Daniel Borkmannac299912017-09-25 02:25:52 +0200943 /* ... here. */
944
945 __u32 data_meta;
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -0300946};
947
948struct bpf_tunnel_key {
949 __u32 tunnel_id;
950 union {
951 __u32 remote_ipv4;
952 __u32 remote_ipv6[4];
953 };
954 __u8 tunnel_tos;
955 __u8 tunnel_ttl;
956 __u16 tunnel_ext;
957 __u32 tunnel_label;
958};
959
Eyal Birger29a36f92018-04-24 17:50:30 +0300960/* user accessible mirror of in-kernel xfrm_state.
961 * new fields can only be added to the end of this structure
962 */
963struct bpf_xfrm_state {
964 __u32 reqid;
965 __u32 spi; /* Stored in network byte order */
966 __u16 family;
967 union {
968 __u32 remote_ipv4; /* Stored in network byte order */
969 __u32 remote_ipv6[4]; /* Stored in network byte order */
970 };
971};
972
Joe Stringer0cb34dc2016-12-08 18:46:14 -0800973/* Generic BPF return codes which all BPF program types may support.
974 * The values are binary compatible with their TC_ACT_* counter-part to
975 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
976 * programs.
977 *
978 * XDP is handled seprately, see XDP_*.
979 */
980enum bpf_ret_code {
981 BPF_OK = 0,
982 /* 1 reserved */
983 BPF_DROP = 2,
984 /* 3-6 reserved */
985 BPF_REDIRECT = 7,
986 /* >127 are reserved for prog type specific return codes */
987};
988
989struct bpf_sock {
990 __u32 bound_dev_if;
991 __u32 family;
992 __u32 type;
993 __u32 protocol;
Daniel Borkmannac299912017-09-25 02:25:52 +0200994 __u32 mark;
995 __u32 priority;
Andrey Ignatov1d436882018-03-30 15:08:08 -0700996 __u32 src_ip4; /* Allows 1,2,4-byte read.
997 * Stored in network byte order.
998 */
999 __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
1000 * Stored in network byte order.
1001 */
1002 __u32 src_port; /* Allows 4-byte read.
1003 * Stored in host byte order
1004 */
Joe Stringer0cb34dc2016-12-08 18:46:14 -08001005};
1006
1007#define XDP_PACKET_HEADROOM 256
1008
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -03001009/* User return codes for XDP prog type.
1010 * A valid XDP program must return one of these defined values. All other
Daniel Borkmannac299912017-09-25 02:25:52 +02001011 * return codes are reserved for future use. Unknown return codes will
1012 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -03001013 */
1014enum xdp_action {
1015 XDP_ABORTED = 0,
1016 XDP_DROP,
1017 XDP_PASS,
1018 XDP_TX,
Daniel Borkmannac299912017-09-25 02:25:52 +02001019 XDP_REDIRECT,
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -03001020};
1021
1022/* user accessible metadata for XDP packet hook
1023 * new fields must be added to the end of this structure
1024 */
1025struct xdp_md {
1026 __u32 data;
1027 __u32 data_end;
Daniel Borkmannac299912017-09-25 02:25:52 +02001028 __u32 data_meta;
Jesper Dangaard Brouere7b28232018-01-18 17:49:08 +01001029 /* Below access go through struct xdp_rxq_info */
1030 __u32 ingress_ifindex; /* rxq->dev->ifindex */
1031 __u32 rx_queue_index; /* rxq->queue_index */
Arnaldo Carvalho de Melo791cceb2016-08-09 11:48:07 -03001032};
1033
John Fastabend69e8cc12017-08-15 22:33:32 -07001034enum sk_action {
John Fastabendbfa640752017-10-27 09:45:53 -07001035 SK_DROP = 0,
1036 SK_PASS,
John Fastabend69e8cc12017-08-15 22:33:32 -07001037};
1038
John Fastabend82a86162018-03-18 12:57:31 -07001039/* user accessible metadata for SK_MSG packet hook, new fields must
1040 * be added to the end of this structure
1041 */
1042struct sk_msg_md {
1043 void *data;
1044 void *data_end;
1045};
1046
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -07001047#define BPF_TAG_SIZE 8
1048
1049struct bpf_prog_info {
1050 __u32 type;
1051 __u32 id;
1052 __u8 tag[BPF_TAG_SIZE];
1053 __u32 jited_prog_len;
1054 __u32 xlated_prog_len;
1055 __aligned_u64 jited_prog_insns;
1056 __aligned_u64 xlated_prog_insns;
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -07001057 __u64 load_time; /* ns since boottime */
1058 __u32 created_by_uid;
1059 __u32 nr_map_ids;
1060 __aligned_u64 map_ids;
Alexei Starovoitove27afb82017-10-22 10:29:06 -07001061 char name[BPF_OBJ_NAME_LEN];
Jakub Kicinski675fc272017-12-27 18:39:09 -08001062 __u32 ifindex;
1063 __u64 netns_dev;
1064 __u64 netns_ino;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -07001065} __attribute__((aligned(8)));
1066
1067struct bpf_map_info {
1068 __u32 type;
1069 __u32 id;
1070 __u32 key_size;
1071 __u32 value_size;
1072 __u32 max_entries;
1073 __u32 map_flags;
Martin KaFai Lau067cae42017-10-05 21:52:12 -07001074 char name[BPF_OBJ_NAME_LEN];
Jakub Kicinski52775b32018-01-17 19:13:28 -08001075 __u32 ifindex;
1076 __u64 netns_dev;
1077 __u64 netns_ino;
Martin KaFai Lau95b9afd2017-06-05 12:15:53 -07001078} __attribute__((aligned(8)));
1079
Andrey Ignatove50b0a62018-03-30 15:08:03 -07001080/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
1081 * by user and intended to be used by socket (e.g. to bind to, depends on
1082 * attach attach type).
1083 */
1084struct bpf_sock_addr {
1085 __u32 user_family; /* Allows 4-byte read, but no write. */
1086 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
1087 * Stored in network byte order.
1088 */
1089 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
1090 * Stored in network byte order.
1091 */
1092 __u32 user_port; /* Allows 4-byte read and write.
1093 * Stored in network byte order
1094 */
1095 __u32 family; /* Allows 4-byte read, but no write */
1096 __u32 type; /* Allows 4-byte read, but no write */
1097 __u32 protocol; /* Allows 4-byte read, but no write */
1098};
1099
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001100/* User bpf_sock_ops struct to access socket values and specify request ops
1101 * and their replies.
Arnaldo Carvalho de Melof1d6cb22017-07-31 10:45:07 -03001102 * Some of this fields are in network (bigendian) byte order and may need
1103 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001104 * New fields can only be added at the end of this structure
1105 */
1106struct bpf_sock_ops {
1107 __u32 op;
1108 union {
Lawrence Brakmod6d4f602018-01-25 16:14:16 -08001109 __u32 args[4]; /* Optionally passed to bpf program */
1110 __u32 reply; /* Returned by bpf program */
1111 __u32 replylong[4]; /* Optionally returned by bpf prog */
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001112 };
1113 __u32 family;
Arnaldo Carvalho de Melof1d6cb22017-07-31 10:45:07 -03001114 __u32 remote_ip4; /* Stored in network byte order */
1115 __u32 local_ip4; /* Stored in network byte order */
1116 __u32 remote_ip6[4]; /* Stored in network byte order */
1117 __u32 local_ip6[4]; /* Stored in network byte order */
1118 __u32 remote_port; /* Stored in network byte order */
1119 __u32 local_port; /* stored in host byte order */
Jesper Dangaard Brouere7b28232018-01-18 17:49:08 +01001120 __u32 is_fullsock; /* Some TCP fields are only valid if
1121 * there is a full socket. If not, the
1122 * fields read as zero.
1123 */
1124 __u32 snd_cwnd;
1125 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
Lawrence Brakmod6d4f602018-01-25 16:14:16 -08001126 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
1127 __u32 state;
1128 __u32 rtt_min;
1129 __u32 snd_ssthresh;
1130 __u32 rcv_nxt;
1131 __u32 snd_nxt;
1132 __u32 snd_una;
1133 __u32 mss_cache;
1134 __u32 ecn_flags;
1135 __u32 rate_delivered;
1136 __u32 rate_interval_us;
1137 __u32 packets_out;
1138 __u32 retrans_out;
1139 __u32 total_retrans;
1140 __u32 segs_in;
1141 __u32 data_segs_in;
1142 __u32 segs_out;
1143 __u32 data_segs_out;
1144 __u32 lost_out;
1145 __u32 sacked_out;
1146 __u32 sk_txhash;
1147 __u64 bytes_received;
1148 __u64 bytes_acked;
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001149};
1150
Lawrence Brakmod6d4f602018-01-25 16:14:16 -08001151/* Definitions for bpf_sock_ops_cb_flags */
1152#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
1153#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
1154#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
1155#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
1156 * supported cb flags
1157 */
1158
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001159/* List of known BPF sock_ops operators.
1160 * New entries can only be added at the end
1161 */
1162enum {
1163 BPF_SOCK_OPS_VOID,
1164 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
1165 * -1 if default value should be used
1166 */
1167 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
1168 * window (in packets) or -1 if default
1169 * value should be used
1170 */
1171 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
1172 * active connection is initialized
1173 */
1174 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
1175 * active connection is
1176 * established
1177 */
1178 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
1179 * passive connection is
1180 * established
1181 */
1182 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
1183 * needs ECN
1184 */
Alexei Starovoitove27afb82017-10-22 10:29:06 -07001185 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
1186 * based on the path and may be
1187 * dependent on the congestion control
1188 * algorithm. In general it indicates
1189 * a congestion threshold. RTTs above
1190 * this indicate congestion
1191 */
Lawrence Brakmod6d4f602018-01-25 16:14:16 -08001192 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
1193 * Arg1: value of icsk_retransmits
1194 * Arg2: value of icsk_rto
1195 * Arg3: whether RTO has expired
1196 */
1197 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
1198 * Arg1: sequence number of 1st byte
1199 * Arg2: # segments
1200 * Arg3: return value of
1201 * tcp_transmit_skb (0 => success)
1202 */
1203 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
1204 * Arg1: old_state
1205 * Arg2: new_state
1206 */
1207};
1208
1209/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
1210 * changes between the TCP and BPF versions. Ideally this should never happen.
1211 * If it does, we need to add code to convert them before calling
1212 * the BPF sock_ops function.
1213 */
1214enum {
1215 BPF_TCP_ESTABLISHED = 1,
1216 BPF_TCP_SYN_SENT,
1217 BPF_TCP_SYN_RECV,
1218 BPF_TCP_FIN_WAIT1,
1219 BPF_TCP_FIN_WAIT2,
1220 BPF_TCP_TIME_WAIT,
1221 BPF_TCP_CLOSE,
1222 BPF_TCP_CLOSE_WAIT,
1223 BPF_TCP_LAST_ACK,
1224 BPF_TCP_LISTEN,
1225 BPF_TCP_CLOSING, /* Now a valid state */
1226 BPF_TCP_NEW_SYN_RECV,
1227
1228 BPF_TCP_MAX_STATES /* Leave at the end! */
Lawrence Brakmo04df41e2017-06-30 20:02:55 -07001229};
1230
1231#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
1232#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
1233
Alexei Starovoitove27afb82017-10-22 10:29:06 -07001234struct bpf_perf_event_value {
1235 __u64 counter;
1236 __u64 enabled;
1237 __u64 running;
1238};
1239
Roman Gushchinebc614f2017-11-05 08:15:32 -05001240#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
1241#define BPF_DEVCG_ACC_READ (1ULL << 1)
1242#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
1243
1244#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
1245#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
1246
1247struct bpf_cgroup_dev_ctx {
Jesper Dangaard Brouere7b28232018-01-18 17:49:08 +01001248 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1249 __u32 access_type;
Roman Gushchinebc614f2017-11-05 08:15:32 -05001250 __u32 major;
1251 __u32 minor;
1252};
1253
Alexei Starovoitova0fe3e52018-03-28 12:05:38 -07001254struct bpf_raw_tracepoint_args {
1255 __u64 args[0];
1256};
1257
Arnaldo Carvalho de Melo971e8272016-07-11 16:38:05 -03001258#endif /* _UAPI__LINUX_BPF_H__ */