blob: 8ab36a04c174a9c154594b7ef4fe2ef2c5c5dc5e [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/* eBPF mini library */
2#ifndef __LIBBPF_H
3#define __LIBBPF_H
4
Joe Stringer43371c82016-12-14 14:43:39 -08005#include <bpf/bpf.h>
6
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07007struct bpf_insn;
8
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
10
11#define BPF_ALU64_REG(OP, DST, SRC) \
12 ((struct bpf_insn) { \
13 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
14 .dst_reg = DST, \
15 .src_reg = SRC, \
16 .off = 0, \
17 .imm = 0 })
18
19#define BPF_ALU32_REG(OP, DST, SRC) \
20 ((struct bpf_insn) { \
21 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
22 .dst_reg = DST, \
23 .src_reg = SRC, \
24 .off = 0, \
25 .imm = 0 })
26
27/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
28
29#define BPF_ALU64_IMM(OP, DST, IMM) \
30 ((struct bpf_insn) { \
31 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
32 .dst_reg = DST, \
33 .src_reg = 0, \
34 .off = 0, \
35 .imm = IMM })
36
37#define BPF_ALU32_IMM(OP, DST, IMM) \
38 ((struct bpf_insn) { \
39 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
40 .dst_reg = DST, \
41 .src_reg = 0, \
42 .off = 0, \
43 .imm = IMM })
44
45/* Short form of mov, dst_reg = src_reg */
46
47#define BPF_MOV64_REG(DST, SRC) \
48 ((struct bpf_insn) { \
49 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
50 .dst_reg = DST, \
51 .src_reg = SRC, \
52 .off = 0, \
53 .imm = 0 })
54
Alexei Starovoitovbf508872015-10-07 22:23:23 -070055#define BPF_MOV32_REG(DST, SRC) \
56 ((struct bpf_insn) { \
57 .code = BPF_ALU | BPF_MOV | BPF_X, \
58 .dst_reg = DST, \
59 .src_reg = SRC, \
60 .off = 0, \
61 .imm = 0 })
62
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070063/* Short form of mov, dst_reg = imm32 */
64
65#define BPF_MOV64_IMM(DST, IMM) \
66 ((struct bpf_insn) { \
67 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
68 .dst_reg = DST, \
69 .src_reg = 0, \
70 .off = 0, \
71 .imm = IMM })
72
Josef Bacik48461132016-09-28 10:54:32 -040073#define BPF_MOV32_IMM(DST, IMM) \
74 ((struct bpf_insn) { \
75 .code = BPF_ALU | BPF_MOV | BPF_K, \
76 .dst_reg = DST, \
77 .src_reg = 0, \
78 .off = 0, \
79 .imm = IMM })
80
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070081/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
82#define BPF_LD_IMM64(DST, IMM) \
83 BPF_LD_IMM64_RAW(DST, 0, IMM)
84
85#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
86 ((struct bpf_insn) { \
87 .code = BPF_LD | BPF_DW | BPF_IMM, \
88 .dst_reg = DST, \
89 .src_reg = SRC, \
90 .off = 0, \
91 .imm = (__u32) (IMM) }), \
92 ((struct bpf_insn) { \
93 .code = 0, /* zero is reserved opcode */ \
94 .dst_reg = 0, \
95 .src_reg = 0, \
96 .off = 0, \
97 .imm = ((__u64) (IMM)) >> 32 })
98
Daniel Borkmannf1a66f82015-03-01 12:31:43 +010099#ifndef BPF_PSEUDO_MAP_FD
100# define BPF_PSEUDO_MAP_FD 1
101#endif
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700102
103/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
104#define BPF_LD_MAP_FD(DST, MAP_FD) \
105 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
106
107
Alexei Starovoitov03f47232014-12-01 15:06:36 -0800108/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
109
110#define BPF_LD_ABS(SIZE, IMM) \
111 ((struct bpf_insn) { \
112 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
113 .dst_reg = 0, \
114 .src_reg = 0, \
115 .off = 0, \
116 .imm = IMM })
117
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700118/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
119
120#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
121 ((struct bpf_insn) { \
122 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
123 .dst_reg = DST, \
124 .src_reg = SRC, \
125 .off = OFF, \
126 .imm = 0 })
127
128/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
129
130#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
131 ((struct bpf_insn) { \
132 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
133 .dst_reg = DST, \
134 .src_reg = SRC, \
135 .off = OFF, \
136 .imm = 0 })
137
Chenbo Feng51570a52017-03-22 17:27:36 -0700138/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
139
140#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
141 ((struct bpf_insn) { \
142 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
143 .dst_reg = DST, \
144 .src_reg = SRC, \
145 .off = OFF, \
146 .imm = 0 })
147
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700148/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
149
150#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
151 ((struct bpf_insn) { \
152 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
153 .dst_reg = DST, \
154 .src_reg = 0, \
155 .off = OFF, \
156 .imm = IMM })
157
158/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
159
160#define BPF_JMP_REG(OP, DST, SRC, OFF) \
161 ((struct bpf_insn) { \
162 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
163 .dst_reg = DST, \
164 .src_reg = SRC, \
165 .off = OFF, \
166 .imm = 0 })
167
168/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
169
170#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
171 ((struct bpf_insn) { \
172 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
173 .dst_reg = DST, \
174 .src_reg = 0, \
175 .off = OFF, \
176 .imm = IMM })
177
178/* Raw code statement block */
179
180#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
181 ((struct bpf_insn) { \
182 .code = CODE, \
183 .dst_reg = DST, \
184 .src_reg = SRC, \
185 .off = OFF, \
186 .imm = IMM })
187
188/* Program exit */
189
190#define BPF_EXIT_INSN() \
191 ((struct bpf_insn) { \
192 .code = BPF_JMP | BPF_EXIT, \
193 .dst_reg = 0, \
194 .src_reg = 0, \
195 .off = 0, \
196 .imm = 0 })
197
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700198#endif