blob: 762f74bc6c479c6b511faa4077e2507ce1f4c659 [file] [log] [blame]
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -07001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_H__
8#define _UAPI__LINUX_BPF_H__
9
10#include <linux/types.h>
Alexei Starovoitovc15952d2014-10-14 02:08:54 -070011#include <linux/bpf_common.h>
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070012
13/* Extended instruction set based on top of classic BPF */
14
15/* instruction classes */
16#define BPF_ALU64 0x07 /* alu mode in double word width */
17
18/* ld/ldx fields */
19#define BPF_DW 0x18 /* double word */
20#define BPF_XADD 0xc0 /* exclusive add */
21
22/* alu/jmp fields */
23#define BPF_MOV 0xb0 /* mov reg to reg */
24#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
25
26/* change endianness of a register */
27#define BPF_END 0xd0 /* flags for endianness conversion: */
28#define BPF_TO_LE 0x00 /* convert to little-endian */
29#define BPF_TO_BE 0x08 /* convert to big-endian */
30#define BPF_FROM_LE BPF_TO_LE
31#define BPF_FROM_BE BPF_TO_BE
32
Daniel Borkmann92b31a92017-08-10 01:39:55 +020033/* jmp encodings */
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070034#define BPF_JNE 0x50 /* jump != */
Daniel Borkmann92b31a92017-08-10 01:39:55 +020035#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
36#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070037#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
38#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
Daniel Borkmann92b31a92017-08-10 01:39:55 +020039#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
40#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -070041#define BPF_CALL 0x80 /* function call */
42#define BPF_EXIT 0x90 /* function return */
43
44/* Register numbers */
45enum {
46 BPF_REG_0 = 0,
47 BPF_REG_1,
48 BPF_REG_2,
49 BPF_REG_3,
50 BPF_REG_4,
51 BPF_REG_5,
52 BPF_REG_6,
53 BPF_REG_7,
54 BPF_REG_8,
55 BPF_REG_9,
56 BPF_REG_10,
57 __MAX_BPF_REG,
58};
59
60/* BPF has 10 general purpose 64-bit registers and stack frame. */
61#define MAX_BPF_REG __MAX_BPF_REG
62
63struct bpf_insn {
64 __u8 code; /* opcode */
65 __u8 dst_reg:4; /* dest register */
66 __u8 src_reg:4; /* source register */
67 __s16 off; /* signed offset */
68 __s32 imm; /* signed immediate constant */
69};
70
Daniel Mackb95a5c42017-01-21 17:26:11 +010071/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
72struct bpf_lpm_trie_key {
73 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
74 __u8 data[0]; /* Arbitrary size */
75};
76
Daniel Borkmannb2197752015-10-29 14:58:09 +010077/* BPF syscall commands, see bpf(2) man-page for details. */
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070078enum bpf_cmd {
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070079 BPF_MAP_CREATE,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070080 BPF_MAP_LOOKUP_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070081 BPF_MAP_UPDATE_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070082 BPF_MAP_DELETE_ELEM,
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070083 BPF_MAP_GET_NEXT_KEY,
Alexei Starovoitov09756af2014-09-26 00:17:00 -070084 BPF_PROG_LOAD,
Daniel Borkmannb2197752015-10-29 14:58:09 +010085 BPF_OBJ_PIN,
86 BPF_OBJ_GET,
Daniel Mackf4324552016-11-23 16:52:27 +010087 BPF_PROG_ATTACH,
88 BPF_PROG_DETACH,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070089 BPF_PROG_TEST_RUN,
Martin KaFai Lau34ad5582017-06-05 12:15:48 -070090 BPF_PROG_GET_NEXT_ID,
91 BPF_MAP_GET_NEXT_ID,
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -070092 BPF_PROG_GET_FD_BY_ID,
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -070093 BPF_MAP_GET_FD_BY_ID,
Martin KaFai Lau1e270972017-06-05 12:15:52 -070094 BPF_OBJ_GET_INFO_BY_FD,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070095};
96
97enum bpf_map_type {
98 BPF_MAP_TYPE_UNSPEC,
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080099 BPF_MAP_TYPE_HASH,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800100 BPF_MAP_TYPE_ARRAY,
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700101 BPF_MAP_TYPE_PROG_ARRAY,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000102 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800103 BPF_MAP_TYPE_PERCPU_HASH,
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800104 BPF_MAP_TYPE_PERCPU_ARRAY,
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -0800105 BPF_MAP_TYPE_STACK_TRACE,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700106 BPF_MAP_TYPE_CGROUP_ARRAY,
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800107 BPF_MAP_TYPE_LRU_HASH,
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800108 BPF_MAP_TYPE_LRU_PERCPU_HASH,
Daniel Mackb95a5c42017-01-21 17:26:11 +0100109 BPF_MAP_TYPE_LPM_TRIE,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700110 BPF_MAP_TYPE_ARRAY_OF_MAPS,
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -0700111 BPF_MAP_TYPE_HASH_OF_MAPS,
John Fastabend546ac1f2017-07-17 09:28:56 -0700112 BPF_MAP_TYPE_DEVMAP,
John Fastabend174a79f2017-08-15 22:32:47 -0700113 BPF_MAP_TYPE_SOCKMAP,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700114};
115
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700116enum bpf_prog_type {
117 BPF_PROG_TYPE_UNSPEC,
Alexei Starovoitovddd872b2014-12-01 15:06:34 -0800118 BPF_PROG_TYPE_SOCKET_FILTER,
Alexei Starovoitov25415172015-03-25 12:49:20 -0700119 BPF_PROG_TYPE_KPROBE,
Daniel Borkmann96be4322015-03-01 12:31:46 +0100120 BPF_PROG_TYPE_SCHED_CLS,
Daniel Borkmann94caee82015-03-20 15:11:11 +0100121 BPF_PROG_TYPE_SCHED_ACT,
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -0700122 BPF_PROG_TYPE_TRACEPOINT,
Brenden Blanco6a773a12016-07-19 12:16:47 -0700123 BPF_PROG_TYPE_XDP,
Alexei Starovoitov0515e592016-09-01 18:37:22 -0700124 BPF_PROG_TYPE_PERF_EVENT,
Daniel Mack0e336612016-11-23 16:52:25 +0100125 BPF_PROG_TYPE_CGROUP_SKB,
David Ahern610236582016-12-01 08:48:04 -0800126 BPF_PROG_TYPE_CGROUP_SOCK,
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100127 BPF_PROG_TYPE_LWT_IN,
128 BPF_PROG_TYPE_LWT_OUT,
129 BPF_PROG_TYPE_LWT_XMIT,
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700130 BPF_PROG_TYPE_SOCK_OPS,
John Fastabendb005fd12017-08-15 22:31:58 -0700131 BPF_PROG_TYPE_SK_SKB,
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700132};
133
Daniel Mack0e336612016-11-23 16:52:25 +0100134enum bpf_attach_type {
135 BPF_CGROUP_INET_INGRESS,
136 BPF_CGROUP_INET_EGRESS,
David Ahern610236582016-12-01 08:48:04 -0800137 BPF_CGROUP_INET_SOCK_CREATE,
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700138 BPF_CGROUP_SOCK_OPS,
John Fastabend464bc0f2017-08-28 07:10:04 -0700139 BPF_SK_SKB_STREAM_PARSER,
140 BPF_SK_SKB_STREAM_VERDICT,
Daniel Mack0e336612016-11-23 16:52:25 +0100141 __MAX_BPF_ATTACH_TYPE
142};
143
144#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
145
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700146/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
147 *
148 * NONE(default): No further bpf programs allowed in the subtree.
149 *
150 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
151 * the program in this cgroup yields to sub-cgroup program.
152 *
153 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
154 * that cgroup program gets run in addition to the program in this cgroup.
155 *
156 * Only one program is allowed to be attached to a cgroup with
157 * NONE or BPF_F_ALLOW_OVERRIDE flag.
158 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
159 * release old program and attach the new one. Attach flags has to match.
160 *
161 * Multiple programs are allowed to be attached to a cgroup with
162 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
163 * (those that were attached first, run first)
164 * The programs of sub-cgroup are executed first, then programs of
165 * this cgroup and then programs of parent cgroup.
166 * When children program makes decision (like picking TCP CA or sock bind)
167 * parent program has a chance to override it.
168 *
169 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
170 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
171 * Ex1:
172 * cgrp1 (MULTI progs A, B) ->
173 * cgrp2 (OVERRIDE prog C) ->
174 * cgrp3 (MULTI prog D) ->
175 * cgrp4 (OVERRIDE prog E) ->
176 * cgrp5 (NONE prog F)
177 * the event in cgrp5 triggers execution of F,D,A,B in that order.
178 * if prog F is detached, the execution is E,D,A,B
179 * if prog F and D are detached, the execution is E,A,B
180 * if prog F, E and D are detached, the execution is C,A,B
181 *
182 * All eligible programs are executed regardless of return code from
183 * earlier programs.
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800184 */
185#define BPF_F_ALLOW_OVERRIDE (1U << 0)
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700186#define BPF_F_ALLOW_MULTI (1U << 1)
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800187
David S. Millere07b98d2017-05-10 11:38:07 -0700188/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
189 * verifier will perform strict alignment checking as if the kernel
190 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
191 * and NET_IP_ALIGN defined to 2.
192 */
193#define BPF_F_STRICT_ALIGNMENT (1U << 0)
194
Daniel Borkmannf1a66f82015-03-01 12:31:43 +0100195#define BPF_PSEUDO_MAP_FD 1
196
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800197/* flags for BPF_MAP_UPDATE_ELEM command */
198#define BPF_ANY 0 /* create new element or update existing */
199#define BPF_NOEXIST 1 /* create new element if it didn't exist */
200#define BPF_EXIST 2 /* update existing element */
201
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700202/* flags for BPF_MAP_CREATE command */
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800203#define BPF_F_NO_PREALLOC (1U << 0)
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800204/* Instead of having one common LRU list in the
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800205 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800206 * which can scale and perform better.
207 * Note, the LRU nodes (including free nodes) cannot be moved
208 * across different LRU lists.
209 */
210#define BPF_F_NO_COMMON_LRU (1U << 1)
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700211/* Specify numa node during map creation */
212#define BPF_F_NUMA_NODE (1U << 2)
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800213
Martin KaFai Laucb4d2b32017-09-27 14:37:52 -0700214#define BPF_OBJ_NAME_LEN 16U
215
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700216union bpf_attr {
217 struct { /* anonymous struct used by BPF_MAP_CREATE command */
218 __u32 map_type; /* one of enum bpf_map_type */
219 __u32 key_size; /* size of key in bytes */
220 __u32 value_size; /* size of value in bytes */
221 __u32 max_entries; /* max number of entries in a map */
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700222 __u32 map_flags; /* BPF_MAP_CREATE related
223 * flags defined above.
224 */
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700225 __u32 inner_map_fd; /* fd pointing to the inner map */
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700226 __u32 numa_node; /* numa node (effective only if
227 * BPF_F_NUMA_NODE is set).
228 */
Martin KaFai Lauad5b1772017-09-27 14:37:53 -0700229 __u8 map_name[BPF_OBJ_NAME_LEN];
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700230 };
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700231
232 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
233 __u32 map_fd;
234 __aligned_u64 key;
235 union {
236 __aligned_u64 value;
237 __aligned_u64 next_key;
238 };
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800239 __u64 flags;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700240 };
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700241
242 struct { /* anonymous struct used by BPF_PROG_LOAD command */
243 __u32 prog_type; /* one of enum bpf_prog_type */
244 __u32 insn_cnt;
245 __aligned_u64 insns;
246 __aligned_u64 license;
Alexei Starovoitovcbd35702014-09-26 00:17:03 -0700247 __u32 log_level; /* verbosity level of verifier */
248 __u32 log_size; /* size of user buffer */
249 __aligned_u64 log_buf; /* user supplied buffer */
Alexei Starovoitov25415172015-03-25 12:49:20 -0700250 __u32 kern_version; /* checked when prog_type=kprobe */
David S. Millere07b98d2017-05-10 11:38:07 -0700251 __u32 prog_flags;
Martin KaFai Laucb4d2b32017-09-27 14:37:52 -0700252 __u8 prog_name[BPF_OBJ_NAME_LEN];
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700253 };
Daniel Borkmannb2197752015-10-29 14:58:09 +0100254
255 struct { /* anonymous struct used by BPF_OBJ_* commands */
256 __aligned_u64 pathname;
257 __u32 bpf_fd;
258 };
Daniel Mackf4324552016-11-23 16:52:27 +0100259
260 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
261 __u32 target_fd; /* container object to attach to */
262 __u32 attach_bpf_fd; /* eBPF program to attach */
263 __u32 attach_type;
Alexei Starovoitov7f677632017-02-10 20:28:24 -0800264 __u32 attach_flags;
Daniel Mackf4324552016-11-23 16:52:27 +0100265 };
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700266
267 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
268 __u32 prog_fd;
269 __u32 retval;
270 __u32 data_size_in;
271 __u32 data_size_out;
272 __aligned_u64 data_in;
273 __aligned_u64 data_out;
274 __u32 repeat;
275 __u32 duration;
276 } test;
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700277
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700278 struct { /* anonymous struct used by BPF_*_GET_*_ID */
279 union {
280 __u32 start_id;
281 __u32 prog_id;
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700282 __u32 map_id;
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700283 };
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700284 __u32 next_id;
285 };
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700286
287 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
288 __u32 bpf_fd;
289 __u32 info_len;
290 __aligned_u64 info;
291 } info;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700292} __attribute__((aligned(8)));
293
Thomas Grafebb676d2016-10-27 11:23:51 +0200294/* BPF helper function descriptions:
295 *
296 * void *bpf_map_lookup_elem(&map, &key)
297 * Return: Map value or NULL
298 *
299 * int bpf_map_update_elem(&map, &key, &value, flags)
300 * Return: 0 on success or negative error
301 *
302 * int bpf_map_delete_elem(&map, &key)
303 * Return: 0 on success or negative error
304 *
305 * int bpf_probe_read(void *dst, int size, void *src)
306 * Return: 0 on success or negative error
307 *
308 * u64 bpf_ktime_get_ns(void)
309 * Return: current ktime
310 *
311 * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
312 * Return: length of buffer written or negative error
313 *
314 * u32 bpf_prandom_u32(void)
315 * Return: random value
316 *
317 * u32 bpf_raw_smp_processor_id(void)
318 * Return: SMP processor ID
319 *
320 * int bpf_skb_store_bytes(skb, offset, from, len, flags)
321 * store bytes into packet
322 * @skb: pointer to skb
323 * @offset: offset within packet from skb->mac_header
324 * @from: pointer where to copy bytes from
325 * @len: number of bytes to store into packet
326 * @flags: bit 0 - if true, recompute skb->csum
327 * other bits - reserved
328 * Return: 0 on success or negative error
329 *
330 * int bpf_l3_csum_replace(skb, offset, from, to, flags)
331 * recompute IP checksum
332 * @skb: pointer to skb
333 * @offset: offset within packet where IP checksum is located
334 * @from: old value of header field
335 * @to: new value of header field
336 * @flags: bits 0-3 - size of header field
337 * other bits - reserved
338 * Return: 0 on success or negative error
339 *
340 * int bpf_l4_csum_replace(skb, offset, from, to, flags)
341 * recompute TCP/UDP checksum
342 * @skb: pointer to skb
343 * @offset: offset within packet where TCP/UDP checksum is located
344 * @from: old value of header field
345 * @to: new value of header field
346 * @flags: bits 0-3 - size of header field
347 * bit 4 - is pseudo header
348 * other bits - reserved
349 * Return: 0 on success or negative error
350 *
351 * int bpf_tail_call(ctx, prog_array_map, index)
352 * jump into another BPF program
353 * @ctx: context pointer passed to next program
354 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
355 * @index: index inside array that selects specific program to run
356 * Return: 0 on success or negative error
357 *
358 * int bpf_clone_redirect(skb, ifindex, flags)
359 * redirect to another netdev
360 * @skb: pointer to skb
361 * @ifindex: ifindex of the net device
362 * @flags: bit 0 - if set, redirect to ingress instead of egress
363 * other bits - reserved
364 * Return: 0 on success or negative error
365 *
366 * u64 bpf_get_current_pid_tgid(void)
367 * Return: current->tgid << 32 | current->pid
368 *
369 * u64 bpf_get_current_uid_gid(void)
370 * Return: current_gid << 32 | current_uid
371 *
372 * int bpf_get_current_comm(char *buf, int size_of_buf)
373 * stores current->comm into buf
374 * Return: 0 on success or negative error
375 *
376 * u32 bpf_get_cgroup_classid(skb)
377 * retrieve a proc's classid
378 * @skb: pointer to skb
379 * Return: classid if != 0
380 *
381 * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
382 * Return: 0 on success or negative error
383 *
384 * int bpf_skb_vlan_pop(skb)
385 * Return: 0 on success or negative error
386 *
387 * int bpf_skb_get_tunnel_key(skb, key, size, flags)
388 * int bpf_skb_set_tunnel_key(skb, key, size, flags)
389 * retrieve or populate tunnel metadata
390 * @skb: pointer to skb
391 * @key: pointer to 'struct bpf_tunnel_key'
392 * @size: size of 'struct bpf_tunnel_key'
393 * @flags: room for future extensions
394 * Return: 0 on success or negative error
395 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700396 * u64 bpf_perf_event_read(map, flags)
397 * read perf event counter value
398 * @map: pointer to perf_event_array map
399 * @flags: index of event in the map or bitmask flags
400 * Return: value of perf event counter read or error code
Thomas Grafebb676d2016-10-27 11:23:51 +0200401 *
402 * int bpf_redirect(ifindex, flags)
403 * redirect to another netdev
404 * @ifindex: ifindex of the net device
John Fastabend56ce0972017-08-04 08:24:05 -0700405 * @flags:
406 * cls_bpf:
407 * bit 0 - if set, redirect to ingress instead of egress
408 * other bits - reserved
409 * xdp_bpf:
410 * all bits - reserved
411 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
412 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
413 * int bpf_redirect_map(map, key, flags)
John Fastabend97f91a72017-07-17 09:29:18 -0700414 * redirect to endpoint in map
John Fastabend56ce0972017-08-04 08:24:05 -0700415 * @map: pointer to dev map
John Fastabend97f91a72017-07-17 09:29:18 -0700416 * @key: index in map to lookup
John Fastabend97f91a72017-07-17 09:29:18 -0700417 * @flags: --
John Fastabend56ce0972017-08-04 08:24:05 -0700418 * Return: XDP_REDIRECT on success or XDP_ABORT on error
Thomas Grafebb676d2016-10-27 11:23:51 +0200419 *
420 * u32 bpf_get_route_realm(skb)
421 * retrieve a dst's tclassid
422 * @skb: pointer to skb
423 * Return: realm if != 0
424 *
Teng Qinb7d3ed52017-06-02 21:03:54 -0700425 * int bpf_perf_event_output(ctx, map, flags, data, size)
Thomas Grafebb676d2016-10-27 11:23:51 +0200426 * output perf raw sample
427 * @ctx: struct pt_regs*
428 * @map: pointer to perf_event_array map
Teng Qinb7d3ed52017-06-02 21:03:54 -0700429 * @flags: index of event in the map or bitmask flags
Thomas Grafebb676d2016-10-27 11:23:51 +0200430 * @data: data on stack to be output as raw data
431 * @size: size of data
432 * Return: 0 on success or negative error
433 *
434 * int bpf_get_stackid(ctx, map, flags)
435 * walk user or kernel stack and return id
436 * @ctx: struct pt_regs*
437 * @map: pointer to stack_trace map
438 * @flags: bits 0-7 - numer of stack frames to skip
439 * bit 8 - collect user stack instead of kernel
440 * bit 9 - compare stacks by hash only
441 * bit 10 - if two different stacks hash into the same stackid
442 * discard old
443 * other bits - reserved
444 * Return: >= 0 stackid on success or negative error
445 *
446 * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
447 * calculate csum diff
448 * @from: raw from buffer
449 * @from_size: length of from buffer
450 * @to: raw to buffer
451 * @to_size: length of to buffer
452 * @seed: optional seed
453 * Return: csum result or negative error code
454 *
455 * int bpf_skb_get_tunnel_opt(skb, opt, size)
456 * retrieve tunnel options metadata
457 * @skb: pointer to skb
458 * @opt: pointer to raw tunnel option data
459 * @size: size of @opt
460 * Return: option size
461 *
462 * int bpf_skb_set_tunnel_opt(skb, opt, size)
463 * populate tunnel options metadata
464 * @skb: pointer to skb
465 * @opt: pointer to raw tunnel option data
466 * @size: size of @opt
467 * Return: 0 on success or negative error
468 *
469 * int bpf_skb_change_proto(skb, proto, flags)
470 * Change protocol of the skb. Currently supported is v4 -> v6,
471 * v6 -> v4 transitions. The helper will also resize the skb. eBPF
472 * program is expected to fill the new headers via skb_store_bytes
473 * and lX_csum_replace.
474 * @skb: pointer to skb
475 * @proto: new skb->protocol type
476 * @flags: reserved
477 * Return: 0 on success or negative error
478 *
479 * int bpf_skb_change_type(skb, type)
480 * Change packet type of skb.
481 * @skb: pointer to skb
482 * @type: new skb->pkt_type type
483 * Return: 0 on success or negative error
484 *
485 * int bpf_skb_under_cgroup(skb, map, index)
486 * Check cgroup2 membership of skb
487 * @skb: pointer to skb
488 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
489 * @index: index of the cgroup in the bpf_map
490 * Return:
491 * == 0 skb failed the cgroup2 descendant test
492 * == 1 skb succeeded the cgroup2 descendant test
493 * < 0 error
494 *
495 * u32 bpf_get_hash_recalc(skb)
496 * Retrieve and possibly recalculate skb->hash.
497 * @skb: pointer to skb
498 * Return: hash
499 *
500 * u64 bpf_get_current_task(void)
501 * Returns current task_struct
502 * Return: current
503 *
504 * int bpf_probe_write_user(void *dst, void *src, int len)
505 * safely attempt to write to a location
506 * @dst: destination address in userspace
507 * @src: source address on stack
508 * @len: number of bytes to copy
509 * Return: 0 on success or negative error
510 *
511 * int bpf_current_task_under_cgroup(map, index)
512 * Check cgroup2 membership of current task
513 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
514 * @index: index of the cgroup in the bpf_map
515 * Return:
516 * == 0 current failed the cgroup2 descendant test
517 * == 1 current succeeded the cgroup2 descendant test
518 * < 0 error
519 *
520 * int bpf_skb_change_tail(skb, len, flags)
521 * The helper will resize the skb to the given new size, to be used f.e.
522 * with control messages.
523 * @skb: pointer to skb
524 * @len: new skb length
525 * @flags: reserved
526 * Return: 0 on success or negative error
527 *
528 * int bpf_skb_pull_data(skb, len)
529 * The helper will pull in non-linear data in case the skb is non-linear
530 * and not all of len are part of the linear section. Only needed for
531 * read/write with direct packet access.
532 * @skb: pointer to skb
533 * @len: len to make read/writeable
534 * Return: 0 on success or negative error
535 *
536 * s64 bpf_csum_update(skb, csum)
537 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
538 * @skb: pointer to skb
539 * @csum: csum to add
540 * Return: csum on success or negative error
541 *
542 * void bpf_set_hash_invalid(skb)
543 * Invalidate current skb->hash.
544 * @skb: pointer to skb
545 *
546 * int bpf_get_numa_node_id()
547 * Return: Id of current NUMA node.
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100548 *
549 * int bpf_skb_change_head()
550 * Grows headroom of skb and adjusts MAC header offset accordingly.
551 * Will extends/reallocae as required automatically.
552 * May change skb data pointer and will thus invalidate any check
553 * performed for direct packet access.
554 * @skb: pointer to skb
555 * @len: length of header to be pushed in front
556 * @flags: Flags (unused for now)
557 * Return: 0 on success or negative error
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800558 *
559 * int bpf_xdp_adjust_head(xdp_md, delta)
560 * Adjust the xdp_md.data by delta
561 * @xdp_md: pointer to xdp_md
562 * @delta: An positive/negative integer to be added to xdp_md.data
563 * Return: 0 on success or negative on error
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000564 *
565 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
566 * Copy a NUL terminated string from unsafe address. In case the string
567 * length is smaller than size, the target is not padded with further NUL
568 * bytes. In case the string length is larger than size, just count-1
569 * bytes are copied and the last byte is set to NUL.
570 * @dst: destination address
571 * @size: maximum number of bytes to copy, including the trailing NUL
572 * @unsafe_ptr: unsafe address
573 * Return:
574 * > 0 length of the string including the trailing NUL on success
575 * < 0 error
Chenbo Feng91b82702017-03-22 17:27:34 -0700576 *
Alexander Alemayhu3c60a532017-04-08 22:08:10 +0200577 * u64 bpf_get_socket_cookie(skb)
Chenbo Feng91b82702017-03-22 17:27:34 -0700578 * Get the cookie for the socket stored inside sk_buff.
579 * @skb: pointer to skb
580 * Return: 8 Bytes non-decreasing number on success or 0 if the socket
581 * field is missing inside sk_buff
Chenbo Feng6acc5c22017-03-22 17:27:35 -0700582 *
583 * u32 bpf_get_socket_uid(skb)
584 * Get the owner uid of the socket stored inside sk_buff.
585 * @skb: pointer to skb
Chenbo Feng5d4e3442017-04-26 16:41:23 -0700586 * Return: uid of the socket owner on success or overflowuid if failed.
Daniel Borkmannded092c2017-06-11 00:50:47 +0200587 *
588 * u32 bpf_set_hash(skb, hash)
589 * Set full skb->hash.
590 * @skb: pointer to skb
591 * @hash: hash to set
Lawrence Brakmo8c4b4c72017-06-30 20:02:46 -0700592 *
593 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
594 * Calls setsockopt. Not all opts are available, only those with
595 * integer optvals plus TCP_CONGESTION.
596 * Supported levels: SOL_SOCKET and IPROTO_TCP
597 * @bpf_socket: pointer to bpf_socket
598 * @level: SOL_SOCKET or IPROTO_TCP
599 * @optname: option name
600 * @optval: pointer to option value
601 * @optlen: length of optval in byes
602 * Return: 0 or negative error
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200603 *
604 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
605 * Grow or shrink room in sk_buff.
606 * @skb: pointer to skb
607 * @len_diff: (signed) amount of room to grow/shrink
608 * @mode: operation mode (enum bpf_adj_room_mode)
609 * @flags: reserved for future use
610 * Return: 0 on success or negative error code
John Fastabend174a79f2017-08-15 22:32:47 -0700611 *
612 * int bpf_sk_redirect_map(map, key, flags)
613 * Redirect skb to a sock in map using key as a lookup key for the
614 * sock in map.
615 * @map: pointer to sockmap
616 * @key: key to lookup sock in map
617 * @flags: reserved for future use
618 * Return: SK_REDIRECT
619 *
John Fastabend464bc0f2017-08-28 07:10:04 -0700620 * int bpf_sock_map_update(skops, map, key, flags)
John Fastabend174a79f2017-08-15 22:32:47 -0700621 * @skops: pointer to bpf_sock_ops
622 * @map: pointer to sockmap to update
623 * @key: key to insert/update sock in map
624 * @flags: same flags as map update elem
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200625 *
626 * int bpf_xdp_adjust_meta(xdp_md, delta)
627 * Adjust the xdp_md.data_meta by delta
628 * @xdp_md: pointer to xdp_md
629 * @delta: An positive/negative integer to be added to xdp_md.data_meta
630 * Return: 0 on success or negative on error
Thomas Grafebb676d2016-10-27 11:23:51 +0200631 */
632#define __BPF_FUNC_MAPPER(FN) \
633 FN(unspec), \
634 FN(map_lookup_elem), \
635 FN(map_update_elem), \
636 FN(map_delete_elem), \
637 FN(probe_read), \
638 FN(ktime_get_ns), \
639 FN(trace_printk), \
640 FN(get_prandom_u32), \
641 FN(get_smp_processor_id), \
642 FN(skb_store_bytes), \
643 FN(l3_csum_replace), \
644 FN(l4_csum_replace), \
645 FN(tail_call), \
646 FN(clone_redirect), \
647 FN(get_current_pid_tgid), \
648 FN(get_current_uid_gid), \
649 FN(get_current_comm), \
650 FN(get_cgroup_classid), \
651 FN(skb_vlan_push), \
652 FN(skb_vlan_pop), \
653 FN(skb_get_tunnel_key), \
654 FN(skb_set_tunnel_key), \
655 FN(perf_event_read), \
656 FN(redirect), \
657 FN(get_route_realm), \
658 FN(perf_event_output), \
659 FN(skb_load_bytes), \
660 FN(get_stackid), \
661 FN(csum_diff), \
662 FN(skb_get_tunnel_opt), \
663 FN(skb_set_tunnel_opt), \
664 FN(skb_change_proto), \
665 FN(skb_change_type), \
666 FN(skb_under_cgroup), \
667 FN(get_hash_recalc), \
668 FN(get_current_task), \
669 FN(probe_write_user), \
670 FN(current_task_under_cgroup), \
671 FN(skb_change_tail), \
672 FN(skb_pull_data), \
673 FN(csum_update), \
674 FN(set_hash_invalid), \
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100675 FN(get_numa_node_id), \
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800676 FN(skb_change_head), \
Gianluca Borelloa5e8c072017-01-18 17:55:49 +0000677 FN(xdp_adjust_head), \
Chenbo Feng91b82702017-03-22 17:27:34 -0700678 FN(probe_read_str), \
Chenbo Feng6acc5c22017-03-22 17:27:35 -0700679 FN(get_socket_cookie), \
Daniel Borkmannded092c2017-06-11 00:50:47 +0200680 FN(get_socket_uid), \
Lawrence Brakmo8c4b4c72017-06-30 20:02:46 -0700681 FN(set_hash), \
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200682 FN(setsockopt), \
John Fastabend97f91a72017-07-17 09:29:18 -0700683 FN(skb_adjust_room), \
John Fastabend174a79f2017-08-15 22:32:47 -0700684 FN(redirect_map), \
685 FN(sk_redirect_map), \
686 FN(sock_map_update), \
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200687 FN(xdp_adjust_meta),
Thomas Grafebb676d2016-10-27 11:23:51 +0200688
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700689/* integer value in 'imm' field of BPF_CALL instruction selects which helper
690 * function eBPF program intends to call
691 */
Thomas Grafebb676d2016-10-27 11:23:51 +0200692#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700693enum bpf_func_id {
Thomas Grafebb676d2016-10-27 11:23:51 +0200694 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700695 __BPF_FUNC_MAX_ID,
696};
Thomas Grafebb676d2016-10-27 11:23:51 +0200697#undef __BPF_ENUM_FN
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700698
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100699/* All flags used by eBPF helper functions, placed here. */
700
701/* BPF_FUNC_skb_store_bytes flags. */
702#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
Daniel Borkmann8afd54c2016-03-04 15:15:03 +0100703#define BPF_F_INVALIDATE_HASH (1ULL << 1)
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100704
705/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
706 * First 4 bits are for passing the header field size.
707 */
708#define BPF_F_HDR_FIELD_MASK 0xfULL
709
710/* BPF_FUNC_l4_csum_replace flags. */
711#define BPF_F_PSEUDO_HDR (1ULL << 4)
Daniel Borkmann2f729592016-02-19 23:05:26 +0100712#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
Daniel Borkmannd1b662a2017-01-24 01:06:28 +0100713#define BPF_F_MARK_ENFORCE (1ULL << 6)
Daniel Borkmann781c53b2016-01-11 01:16:38 +0100714
715/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
716#define BPF_F_INGRESS (1ULL << 0)
717
Daniel Borkmannc6c33452016-01-11 01:16:39 +0100718/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
719#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
720
Alexei Starovoitovd5a3b1f2016-02-17 19:58:58 -0800721/* BPF_FUNC_get_stackid flags. */
722#define BPF_F_SKIP_FIELD_MASK 0xffULL
723#define BPF_F_USER_STACK (1ULL << 8)
724#define BPF_F_FAST_STACK_CMP (1ULL << 9)
725#define BPF_F_REUSE_STACKID (1ULL << 10)
726
Daniel Borkmann2da897e2016-02-23 02:05:26 +0100727/* BPF_FUNC_skb_set_tunnel_key flags. */
728#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
Daniel Borkmann22080872016-03-04 15:15:05 +0100729#define BPF_F_DONT_FRAGMENT (1ULL << 2)
Daniel Borkmann2da897e2016-02-23 02:05:26 +0100730
Daniel Borkmann6816a7f2016-06-28 12:18:25 +0200731/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
Daniel Borkmann1e337592016-04-18 21:01:23 +0200732#define BPF_F_INDEX_MASK 0xffffffffULL
733#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
Daniel Borkmann555c8a82016-07-14 18:08:05 +0200734/* BPF_FUNC_perf_event_output for sk_buff input context. */
735#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
Daniel Borkmann1e337592016-04-18 21:01:23 +0200736
Daniel Borkmann2be7e212017-07-02 02:13:26 +0200737/* Mode for BPF_FUNC_skb_adjust_room helper. */
738enum bpf_adj_room_mode {
739 BPF_ADJ_ROOM_NET,
740};
741
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700742/* user accessible mirror of in-kernel sk_buff.
743 * new fields can only be added to the end of this structure
744 */
745struct __sk_buff {
746 __u32 len;
747 __u32 pkt_type;
748 __u32 mark;
749 __u32 queue_mapping;
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700750 __u32 protocol;
751 __u32 vlan_present;
752 __u32 vlan_tci;
Michal Sekletar27cd5452015-03-24 14:48:41 +0100753 __u32 vlan_proto;
Daniel Borkmannbcad5712015-04-03 20:52:24 +0200754 __u32 priority;
Alexei Starovoitov37e82c22015-05-27 15:30:39 -0700755 __u32 ingress_ifindex;
756 __u32 ifindex;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700757 __u32 tc_index;
758 __u32 cb[5];
Daniel Borkmannba7591d2015-08-01 00:46:29 +0200759 __u32 hash;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700760 __u32 tc_classid;
Alexei Starovoitov969bf052016-05-05 19:49:10 -0700761 __u32 data;
762 __u32 data_end;
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200763 __u32 napi_id;
John Fastabend8a31db52017-08-15 22:33:09 -0700764
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200765 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
John Fastabend8a31db52017-08-15 22:33:09 -0700766 __u32 family;
767 __u32 remote_ip4; /* Stored in network byte order */
768 __u32 local_ip4; /* Stored in network byte order */
769 __u32 remote_ip6[4]; /* Stored in network byte order */
770 __u32 local_ip6[4]; /* Stored in network byte order */
771 __u32 remote_port; /* Stored in network byte order */
772 __u32 local_port; /* stored in host byte order */
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200773 /* ... here. */
774
775 __u32 data_meta;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700776};
777
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700778struct bpf_tunnel_key {
779 __u32 tunnel_id;
Daniel Borkmannc6c33452016-01-11 01:16:39 +0100780 union {
781 __u32 remote_ipv4;
782 __u32 remote_ipv6[4];
783 };
784 __u8 tunnel_tos;
785 __u8 tunnel_ttl;
Daniel Borkmannc0e760c2016-03-30 00:02:00 +0200786 __u16 tunnel_ext;
Daniel Borkmann4018ab12016-03-09 03:00:05 +0100787 __u32 tunnel_label;
Alexei Starovoitovd3aa45c2015-07-30 15:36:57 -0700788};
789
Thomas Graf3a0af8f2016-11-30 17:10:10 +0100790/* Generic BPF return codes which all BPF program types may support.
791 * The values are binary compatible with their TC_ACT_* counter-part to
792 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
793 * programs.
794 *
795 * XDP is handled seprately, see XDP_*.
796 */
797enum bpf_ret_code {
798 BPF_OK = 0,
799 /* 1 reserved */
800 BPF_DROP = 2,
801 /* 3-6 reserved */
802 BPF_REDIRECT = 7,
803 /* >127 are reserved for prog type specific return codes */
804};
805
David Ahern610236582016-12-01 08:48:04 -0800806struct bpf_sock {
807 __u32 bound_dev_if;
David Ahernaa4c1032016-12-01 08:48:06 -0800808 __u32 family;
809 __u32 type;
810 __u32 protocol;
David Ahern482dca92017-08-31 15:05:44 -0700811 __u32 mark;
812 __u32 priority;
David Ahern610236582016-12-01 08:48:04 -0800813};
814
Martin KaFai Lau17bedab2016-12-07 15:53:11 -0800815#define XDP_PACKET_HEADROOM 256
816
Brenden Blanco6a773a12016-07-19 12:16:47 -0700817/* User return codes for XDP prog type.
818 * A valid XDP program must return one of these defined values. All other
Daniel Borkmann9beb8be2017-09-09 01:40:35 +0200819 * return codes are reserved for future use. Unknown return codes will
820 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
Brenden Blanco6a773a12016-07-19 12:16:47 -0700821 */
822enum xdp_action {
823 XDP_ABORTED = 0,
824 XDP_DROP,
825 XDP_PASS,
Brenden Blanco6ce96ca2016-07-19 12:16:53 -0700826 XDP_TX,
John Fastabend814abfa2017-07-17 09:27:07 -0700827 XDP_REDIRECT,
Brenden Blanco6a773a12016-07-19 12:16:47 -0700828};
829
830/* user accessible metadata for XDP packet hook
831 * new fields must be added to the end of this structure
832 */
833struct xdp_md {
834 __u32 data;
835 __u32 data_end;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200836 __u32 data_meta;
Brenden Blanco6a773a12016-07-19 12:16:47 -0700837};
838
John Fastabend174a79f2017-08-15 22:32:47 -0700839enum sk_action {
840 SK_ABORTED = 0,
841 SK_DROP,
842 SK_REDIRECT,
843};
844
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700845#define BPF_TAG_SIZE 8
846
847struct bpf_prog_info {
848 __u32 type;
849 __u32 id;
850 __u8 tag[BPF_TAG_SIZE];
851 __u32 jited_prog_len;
852 __u32 xlated_prog_len;
853 __aligned_u64 jited_prog_insns;
854 __aligned_u64 xlated_prog_insns;
Martin KaFai Laucb4d2b32017-09-27 14:37:52 -0700855 __u64 load_time; /* ns since boottime */
856 __u32 created_by_uid;
857 __u32 nr_map_ids;
858 __aligned_u64 map_ids;
859 __u8 name[BPF_OBJ_NAME_LEN];
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700860} __attribute__((aligned(8)));
861
862struct bpf_map_info {
863 __u32 type;
864 __u32 id;
865 __u32 key_size;
866 __u32 value_size;
867 __u32 max_entries;
868 __u32 map_flags;
Martin KaFai Lauad5b1772017-09-27 14:37:53 -0700869 __u8 name[BPF_OBJ_NAME_LEN];
Martin KaFai Lau1e270972017-06-05 12:15:52 -0700870} __attribute__((aligned(8)));
871
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700872/* User bpf_sock_ops struct to access socket values and specify request ops
873 * and their replies.
874 * Some of this fields are in network (bigendian) byte order and may need
875 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
876 * New fields can only be added at the end of this structure
877 */
878struct bpf_sock_ops {
879 __u32 op;
880 union {
881 __u32 reply;
882 __u32 replylong[4];
883 };
884 __u32 family;
885 __u32 remote_ip4; /* Stored in network byte order */
886 __u32 local_ip4; /* Stored in network byte order */
887 __u32 remote_ip6[4]; /* Stored in network byte order */
888 __u32 local_ip6[4]; /* Stored in network byte order */
889 __u32 remote_port; /* Stored in network byte order */
890 __u32 local_port; /* stored in host byte order */
891};
892
893/* List of known BPF sock_ops operators.
894 * New entries can only be added at the end
895 */
896enum {
897 BPF_SOCK_OPS_VOID,
Lawrence Brakmo8550f322017-06-30 20:02:42 -0700898 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
899 * -1 if default value should be used
900 */
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700901 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
902 * window (in packets) or -1 if default
903 * value should be used
904 */
Lawrence Brakmo9872a4b2017-06-30 20:02:47 -0700905 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
906 * active connection is initialized
907 */
908 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
909 * active connection is
910 * established
911 */
912 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
913 * passive connection is
914 * established
915 */
Lawrence Brakmo91b5b212017-06-30 20:02:49 -0700916 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
917 * needs ECN
918 */
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700919};
920
Lawrence Brakmofc747812017-06-30 20:02:51 -0700921#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
Lawrence Brakmo13bf9642017-06-30 20:02:53 -0700922#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
Lawrence Brakmofc747812017-06-30 20:02:51 -0700923
Alexei Starovoitovdaedfb22014-09-04 22:17:18 -0700924#endif /* _UAPI__LINUX_BPF_H__ */