blob: f7252dff6f301738b6516102a0ca99af89c6aa39 [file] [log] [blame]
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -07001/* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6#include <stdint.h>
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10
11#include "bpf.h"
12
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -070013/* Basic BPF instruction setter. */
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070014inline size_t set_bpf_instr(struct sock_filter *instr,
15 unsigned short code, unsigned int k,
16 unsigned char jt, unsigned char jf)
17{
18 instr->code = code;
19 instr->k = k;
20 instr->jt = jt;
21 instr->jf = jf;
22 return 1U;
23}
24
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -070025/* Architecture validation. */
26size_t bpf_validate_arch(struct sock_filter *filter)
27{
28 struct sock_filter *curr_block = filter;
29 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, arch_nr);
30 set_bpf_jump(curr_block++,
31 BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, SKIP, NEXT);
32 set_bpf_ret_kill(curr_block++);
33 return curr_block - filter;
34}
35
36/* Syscall number eval functions. */
37size_t bpf_allow_syscall(struct sock_filter *filter, int nr)
38{
39 struct sock_filter *curr_block = filter;
40 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP);
41 set_bpf_stmt(curr_block++, BPF_RET+BPF_K, SECCOMP_RET_ALLOW);
42 return curr_block - filter;
43}
44
45size_t bpf_allow_syscall_args(struct sock_filter *filter,
46 int nr, unsigned int id)
47{
48 struct sock_filter *curr_block = filter;
49 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP);
50 set_bpf_jump_lbl(curr_block++, id);
51 return curr_block - filter;
52}
53
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070054/* Size-aware arg loaders. */
55#if defined(BITS32)
56size_t bpf_load_arg(struct sock_filter *filter, int argidx)
57{
58 set_bpf_stmt(filter, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
59 return 1U;
60}
61#elif defined(BITS64)
62size_t bpf_load_arg(struct sock_filter *filter, int argidx)
63{
64 struct sock_filter *curr_block = filter;
65 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
66 set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
67 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, HI_ARG(argidx));
68 set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
69 return curr_block - filter;
70}
71#endif
72
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -080073/* Size-aware equality comparison. */
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070074size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070075 unsigned char jt, unsigned char jf)
76{
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070077 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
78 set_bpf_jump(filter, BPF_JMP+BPF_JEQ+BPF_K, lo, jt, jf);
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070079 return 1U;
80}
81
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -080082/*
83 * On 64 bits, we have to do two 32-bit comparisons.
84 * We jump true when *both* comparisons are true.
85 */
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070086size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c,
87 unsigned char jt, unsigned char jf)
88{
89 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
90 unsigned int hi = (unsigned int)(c >> 32);
91
92 struct sock_filter *curr_block = filter;
93
94 /* bpf_load_arg leaves |hi| in A */
95 curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -080096 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in |lo| */
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070097 curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
98
99 return curr_block - filter;
100}
101
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800102/* Size-aware bitwise AND. */
103size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask,
104 unsigned char jt, unsigned char jf)
105{
106 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
107 set_bpf_jump(filter, BPF_JMP+BPF_JSET+BPF_K, mask_lo, jt, jf);
108 return 1U;
109}
110
111/*
112 * On 64 bits, we have to do two 32-bit bitwise ANDs.
113 * We jump true when *either* bitwise AND is true (non-zero).
114 */
115size_t bpf_comp_jset64(struct sock_filter *filter, uint64_t mask,
116 unsigned char jt, unsigned char jf)
117{
118 unsigned int mask_lo = (unsigned int)(mask & 0xFFFFFFFF);
119 unsigned int mask_hi = (unsigned int)(mask >> 32);
120
121 struct sock_filter *curr_block = filter;
122
123 /* bpf_load_arg leaves |hi| in A */
124 curr_block += bpf_comp_jset32(curr_block, mask_hi, SKIPN(2) + jt, NEXT);
125 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in |lo| */
126 curr_block += bpf_comp_jset32(curr_block, mask_lo, jt, jf);
127
128 return curr_block - filter;
129}
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700130
131size_t bpf_arg_comp(struct sock_filter **pfilter,
132 int op, int argidx, unsigned long c, unsigned int label_id)
133{
134 struct sock_filter *filter = calloc(BPF_ARG_COMP_LEN + 1,
135 sizeof(struct sock_filter));
136 struct sock_filter *curr_block = filter;
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800137 size_t (*comp_function)(struct sock_filter *filter, unsigned long k,
138 unsigned char jt, unsigned char jf);
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700139 int flip = 0;
140
141 /* Load arg */
142 curr_block += bpf_load_arg(curr_block, argidx);
143
144 /* Jump type */
145 switch (op) {
146 case EQ:
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800147 comp_function = bpf_comp_jeq;
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700148 flip = 0;
149 break;
150 case NE:
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800151 comp_function = bpf_comp_jeq;
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700152 flip = 1;
153 break;
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800154 case SET:
155 comp_function = bpf_comp_jset;
156 flip = 0;
157 break;
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700158 default:
159 *pfilter = NULL;
160 return 0;
161 }
162
163 /*
164 * It's easier for the rest of the code to have the true branch
165 * skip and the false branch fall through.
166 */
167 unsigned char jt = flip ? NEXT : SKIP;
168 unsigned char jf = flip ? SKIP : NEXT;
Jorge Lucangeli Obesffec8912012-11-30 14:46:23 -0800169 curr_block += comp_function(curr_block, c, jt, jf);
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700170 curr_block += set_bpf_jump_lbl(curr_block, label_id);
171
172 *pfilter = filter;
173 return curr_block - filter;
174}
175
176void dump_bpf_filter(struct sock_filter *filter, unsigned short len)
177{
178 int i = 0;
179
180 printf("len == %d\n", len);
181 printf("filter:\n");
182 for (i = 0; i < len; i++) {
183 printf("%d: \t{ code=%#x, jt=%u, jf=%u, k=%#x \t}\n",
184 i, filter[i].code, filter[i].jt, filter[i].jf, filter[i].k);
185 }
186}
187
188void dump_bpf_prog(struct sock_fprog *fprog)
189{
190 struct sock_filter *filter = fprog->filter;
191 unsigned short len = fprog->len;
192 dump_bpf_filter(filter, len);
193}
194
195int bpf_resolve_jumps(struct bpf_labels *labels,
196 struct sock_filter *filter, size_t count)
197{
198 struct sock_filter *begin = filter;
199 __u8 insn = count - 1;
200
201 if (count < 1)
202 return -1;
203 /*
204 * Walk it once, backwards, to build the label table and do fixups.
205 * Since backward jumps are disallowed by BPF, this is easy.
206 */
207 for (filter += insn; filter >= begin; --insn, --filter) {
208 if (filter->code != (BPF_JMP+BPF_JA))
209 continue;
210 switch ((filter->jt<<8)|filter->jf) {
211 case (JUMP_JT<<8)|JUMP_JF:
212 if (labels->labels[filter->k].location == 0xffffffff) {
213 fprintf(stderr, "Unresolved label: '%s'\n",
214 labels->labels[filter->k].label);
215 return 1;
216 }
217 filter->k = labels->labels[filter->k].location -
218 (insn + 1);
219 filter->jt = 0;
220 filter->jf = 0;
221 continue;
222 case (LABEL_JT<<8)|LABEL_JF:
223 if (labels->labels[filter->k].location != 0xffffffff) {
224 fprintf(stderr, "Duplicate label use: '%s'\n",
225 labels->labels[filter->k].label);
226 return 1;
227 }
228 labels->labels[filter->k].location = insn;
229 filter->k = 0; /* fall through */
230 filter->jt = 0;
231 filter->jf = 0;
232 continue;
233 }
234 }
235 return 0;
236}
237
238/* Simple lookup table for labels. */
239int bpf_label_id(struct bpf_labels *labels, const char *label)
240{
241 struct __bpf_label *begin = labels->labels, *end;
242 int id;
243 if (labels->count == 0) {
244 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
245 if (!begin->label) {
246 return -1;
247 }
248 begin->location = 0xffffffff;
249 labels->count++;
250 return 0;
251 }
252 end = begin + labels->count;
253 for (id = 0; begin < end; ++begin, ++id) {
254 if (!strcmp(label, begin->label))
255 return id;
256 }
257 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
258 if (!begin->label) {
259 return -1;
260 }
261 begin->location = 0xffffffff;
262 labels->count++;
263 return id;
264}
265
266/* Free label strings. */
267void free_label_strings(struct bpf_labels *labels)
268{
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -0700269 if (labels->count == 0)
270 return;
271
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700272 struct __bpf_label *begin = labels->labels, *end;
273
274 end = begin + labels->count;
275 for (; begin < end; ++begin) {
276 if (begin->label)
277 free((void*)(begin->label));
278 }
279}