blob: a0b6d3f31aaeb42cf460321885ca3b7d4bc53745 [file] [log] [blame]
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -07001/* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6#include <stdint.h>
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10
11#include "bpf.h"
12
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -070013/* Basic BPF instruction setter. */
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070014inline size_t set_bpf_instr(struct sock_filter *instr,
15 unsigned short code, unsigned int k,
16 unsigned char jt, unsigned char jf)
17{
18 instr->code = code;
19 instr->k = k;
20 instr->jt = jt;
21 instr->jf = jf;
22 return 1U;
23}
24
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -070025/* Architecture validation. */
26size_t bpf_validate_arch(struct sock_filter *filter)
27{
28 struct sock_filter *curr_block = filter;
29 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, arch_nr);
30 set_bpf_jump(curr_block++,
31 BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, SKIP, NEXT);
32 set_bpf_ret_kill(curr_block++);
33 return curr_block - filter;
34}
35
36/* Syscall number eval functions. */
37size_t bpf_allow_syscall(struct sock_filter *filter, int nr)
38{
39 struct sock_filter *curr_block = filter;
40 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP);
41 set_bpf_stmt(curr_block++, BPF_RET+BPF_K, SECCOMP_RET_ALLOW);
42 return curr_block - filter;
43}
44
45size_t bpf_allow_syscall_args(struct sock_filter *filter,
46 int nr, unsigned int id)
47{
48 struct sock_filter *curr_block = filter;
49 set_bpf_jump(curr_block++, BPF_JMP+BPF_JEQ+BPF_K, nr, NEXT, SKIP);
50 set_bpf_jump_lbl(curr_block++, id);
51 return curr_block - filter;
52}
53
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070054/* Size-aware arg loaders. */
55#if defined(BITS32)
56size_t bpf_load_arg(struct sock_filter *filter, int argidx)
57{
58 set_bpf_stmt(filter, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
59 return 1U;
60}
61#elif defined(BITS64)
62size_t bpf_load_arg(struct sock_filter *filter, int argidx)
63{
64 struct sock_filter *curr_block = filter;
65 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
66 set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
67 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, HI_ARG(argidx));
68 set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
69 return curr_block - filter;
70}
71#endif
72
73/* Size-aware comparisons. */
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070074size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070075 unsigned char jt, unsigned char jf)
76{
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070077 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
78 set_bpf_jump(filter, BPF_JMP+BPF_JEQ+BPF_K, lo, jt, jf);
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070079 return 1U;
80}
81
82size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c,
83 unsigned char jt, unsigned char jf)
84{
85 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
86 unsigned int hi = (unsigned int)(c >> 32);
87
88 struct sock_filter *curr_block = filter;
89
90 /* bpf_load_arg leaves |hi| in A */
91 curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
92 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in lo */
93 curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
94
95 return curr_block - filter;
96}
97
98#if defined(BITS32)
99#define bpf_comp_jeq bpf_comp_jeq32
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700100#elif defined(BITS64)
101#define bpf_comp_jeq bpf_comp_jeq64
102#endif
103
104size_t bpf_arg_comp(struct sock_filter **pfilter,
105 int op, int argidx, unsigned long c, unsigned int label_id)
106{
107 struct sock_filter *filter = calloc(BPF_ARG_COMP_LEN + 1,
108 sizeof(struct sock_filter));
109 struct sock_filter *curr_block = filter;
110 int flip = 0;
111
112 /* Load arg */
113 curr_block += bpf_load_arg(curr_block, argidx);
114
115 /* Jump type */
116 switch (op) {
117 case EQ:
118 flip = 0;
119 break;
120 case NE:
121 flip = 1;
122 break;
123 default:
124 *pfilter = NULL;
125 return 0;
126 }
127
128 /*
129 * It's easier for the rest of the code to have the true branch
130 * skip and the false branch fall through.
131 */
132 unsigned char jt = flip ? NEXT : SKIP;
133 unsigned char jf = flip ? SKIP : NEXT;
134 curr_block += bpf_comp_jeq(curr_block, c, jt, jf);
135 curr_block += set_bpf_jump_lbl(curr_block, label_id);
136
137 *pfilter = filter;
138 return curr_block - filter;
139}
140
141void dump_bpf_filter(struct sock_filter *filter, unsigned short len)
142{
143 int i = 0;
144
145 printf("len == %d\n", len);
146 printf("filter:\n");
147 for (i = 0; i < len; i++) {
148 printf("%d: \t{ code=%#x, jt=%u, jf=%u, k=%#x \t}\n",
149 i, filter[i].code, filter[i].jt, filter[i].jf, filter[i].k);
150 }
151}
152
153void dump_bpf_prog(struct sock_fprog *fprog)
154{
155 struct sock_filter *filter = fprog->filter;
156 unsigned short len = fprog->len;
157 dump_bpf_filter(filter, len);
158}
159
160int bpf_resolve_jumps(struct bpf_labels *labels,
161 struct sock_filter *filter, size_t count)
162{
163 struct sock_filter *begin = filter;
164 __u8 insn = count - 1;
165
166 if (count < 1)
167 return -1;
168 /*
169 * Walk it once, backwards, to build the label table and do fixups.
170 * Since backward jumps are disallowed by BPF, this is easy.
171 */
172 for (filter += insn; filter >= begin; --insn, --filter) {
173 if (filter->code != (BPF_JMP+BPF_JA))
174 continue;
175 switch ((filter->jt<<8)|filter->jf) {
176 case (JUMP_JT<<8)|JUMP_JF:
177 if (labels->labels[filter->k].location == 0xffffffff) {
178 fprintf(stderr, "Unresolved label: '%s'\n",
179 labels->labels[filter->k].label);
180 return 1;
181 }
182 filter->k = labels->labels[filter->k].location -
183 (insn + 1);
184 filter->jt = 0;
185 filter->jf = 0;
186 continue;
187 case (LABEL_JT<<8)|LABEL_JF:
188 if (labels->labels[filter->k].location != 0xffffffff) {
189 fprintf(stderr, "Duplicate label use: '%s'\n",
190 labels->labels[filter->k].label);
191 return 1;
192 }
193 labels->labels[filter->k].location = insn;
194 filter->k = 0; /* fall through */
195 filter->jt = 0;
196 filter->jf = 0;
197 continue;
198 }
199 }
200 return 0;
201}
202
203/* Simple lookup table for labels. */
204int bpf_label_id(struct bpf_labels *labels, const char *label)
205{
206 struct __bpf_label *begin = labels->labels, *end;
207 int id;
208 if (labels->count == 0) {
209 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
210 if (!begin->label) {
211 return -1;
212 }
213 begin->location = 0xffffffff;
214 labels->count++;
215 return 0;
216 }
217 end = begin + labels->count;
218 for (id = 0; begin < end; ++begin, ++id) {
219 if (!strcmp(label, begin->label))
220 return id;
221 }
222 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
223 if (!begin->label) {
224 return -1;
225 }
226 begin->location = 0xffffffff;
227 labels->count++;
228 return id;
229}
230
231/* Free label strings. */
232void free_label_strings(struct bpf_labels *labels)
233{
Jorge Lucangeli Obesd4467262012-03-23 16:19:59 -0700234 if (labels->count == 0)
235 return;
236
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -0700237 struct __bpf_label *begin = labels->labels, *end;
238
239 end = begin + labels->count;
240 for (; begin < end; ++begin) {
241 if (begin->label)
242 free((void*)(begin->label));
243 }
244}