blob: 18bfee5aab6bdb7665d405d08e6fea3b84982f49 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07002/* eBPF mini library */
3#ifndef __LIBBPF_H
4#define __LIBBPF_H
5
Joe Stringer43371c82016-12-14 14:43:39 -08006#include <bpf/bpf.h>
7
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07008struct bpf_insn;
9
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
11
12#define BPF_ALU64_REG(OP, DST, SRC) \
13 ((struct bpf_insn) { \
14 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
15 .dst_reg = DST, \
16 .src_reg = SRC, \
17 .off = 0, \
18 .imm = 0 })
19
20#define BPF_ALU32_REG(OP, DST, SRC) \
21 ((struct bpf_insn) { \
22 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
23 .dst_reg = DST, \
24 .src_reg = SRC, \
25 .off = 0, \
26 .imm = 0 })
27
28/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
29
30#define BPF_ALU64_IMM(OP, DST, IMM) \
31 ((struct bpf_insn) { \
32 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
33 .dst_reg = DST, \
34 .src_reg = 0, \
35 .off = 0, \
36 .imm = IMM })
37
38#define BPF_ALU32_IMM(OP, DST, IMM) \
39 ((struct bpf_insn) { \
40 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
41 .dst_reg = DST, \
42 .src_reg = 0, \
43 .off = 0, \
44 .imm = IMM })
45
46/* Short form of mov, dst_reg = src_reg */
47
48#define BPF_MOV64_REG(DST, SRC) \
49 ((struct bpf_insn) { \
50 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
51 .dst_reg = DST, \
52 .src_reg = SRC, \
53 .off = 0, \
54 .imm = 0 })
55
Alexei Starovoitovbf508872015-10-07 22:23:23 -070056#define BPF_MOV32_REG(DST, SRC) \
57 ((struct bpf_insn) { \
58 .code = BPF_ALU | BPF_MOV | BPF_X, \
59 .dst_reg = DST, \
60 .src_reg = SRC, \
61 .off = 0, \
62 .imm = 0 })
63
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070064/* Short form of mov, dst_reg = imm32 */
65
66#define BPF_MOV64_IMM(DST, IMM) \
67 ((struct bpf_insn) { \
68 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
69 .dst_reg = DST, \
70 .src_reg = 0, \
71 .off = 0, \
72 .imm = IMM })
73
Josef Bacik48461132016-09-28 10:54:32 -040074#define BPF_MOV32_IMM(DST, IMM) \
75 ((struct bpf_insn) { \
76 .code = BPF_ALU | BPF_MOV | BPF_K, \
77 .dst_reg = DST, \
78 .src_reg = 0, \
79 .off = 0, \
80 .imm = IMM })
81
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070082/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
83#define BPF_LD_IMM64(DST, IMM) \
84 BPF_LD_IMM64_RAW(DST, 0, IMM)
85
86#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
87 ((struct bpf_insn) { \
88 .code = BPF_LD | BPF_DW | BPF_IMM, \
89 .dst_reg = DST, \
90 .src_reg = SRC, \
91 .off = 0, \
92 .imm = (__u32) (IMM) }), \
93 ((struct bpf_insn) { \
94 .code = 0, /* zero is reserved opcode */ \
95 .dst_reg = 0, \
96 .src_reg = 0, \
97 .off = 0, \
98 .imm = ((__u64) (IMM)) >> 32 })
99
Daniel Borkmannf1a66f82015-03-01 12:31:43 +0100100#ifndef BPF_PSEUDO_MAP_FD
101# define BPF_PSEUDO_MAP_FD 1
102#endif
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700103
104/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
105#define BPF_LD_MAP_FD(DST, MAP_FD) \
106 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
107
108
Alexei Starovoitov03f47232014-12-01 15:06:36 -0800109/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
110
111#define BPF_LD_ABS(SIZE, IMM) \
112 ((struct bpf_insn) { \
113 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
114 .dst_reg = 0, \
115 .src_reg = 0, \
116 .off = 0, \
117 .imm = IMM })
118
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700119/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
120
121#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
122 ((struct bpf_insn) { \
123 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
124 .dst_reg = DST, \
125 .src_reg = SRC, \
126 .off = OFF, \
127 .imm = 0 })
128
129/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
130
131#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
132 ((struct bpf_insn) { \
133 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
134 .dst_reg = DST, \
135 .src_reg = SRC, \
136 .off = OFF, \
137 .imm = 0 })
138
Chenbo Feng51570a52017-03-22 17:27:36 -0700139/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
140
141#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
142 ((struct bpf_insn) { \
143 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
144 .dst_reg = DST, \
145 .src_reg = SRC, \
146 .off = OFF, \
147 .imm = 0 })
148
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700149/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
150
151#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
152 ((struct bpf_insn) { \
153 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
154 .dst_reg = DST, \
155 .src_reg = 0, \
156 .off = OFF, \
157 .imm = IMM })
158
159/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
160
161#define BPF_JMP_REG(OP, DST, SRC, OFF) \
162 ((struct bpf_insn) { \
163 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
164 .dst_reg = DST, \
165 .src_reg = SRC, \
166 .off = OFF, \
167 .imm = 0 })
168
169/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
170
171#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
172 ((struct bpf_insn) { \
173 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
174 .dst_reg = DST, \
175 .src_reg = 0, \
176 .off = OFF, \
177 .imm = IMM })
178
179/* Raw code statement block */
180
181#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
182 ((struct bpf_insn) { \
183 .code = CODE, \
184 .dst_reg = DST, \
185 .src_reg = SRC, \
186 .off = OFF, \
187 .imm = IMM })
188
189/* Program exit */
190
191#define BPF_EXIT_INSN() \
192 ((struct bpf_insn) { \
193 .code = BPF_JMP | BPF_EXIT, \
194 .dst_reg = 0, \
195 .src_reg = 0, \
196 .off = 0, \
197 .imm = 0 })
198
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700199#endif