blob: 780f4e0d76de280a68726c5af622334052610fed [file] [log] [blame]
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -07001/* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6#include <stdint.h>
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10
11#include "bpf.h"
12
13/* Common jump targets. */
14#define NEXT 0
15#define SKIP 1
16#define SKIPN(_n) (_n)
17
18inline size_t set_bpf_instr(struct sock_filter *instr,
19 unsigned short code, unsigned int k,
20 unsigned char jt, unsigned char jf)
21{
22 instr->code = code;
23 instr->k = k;
24 instr->jt = jt;
25 instr->jf = jf;
26 return 1U;
27}
28
29/* Size-aware arg loaders. */
30#if defined(BITS32)
31size_t bpf_load_arg(struct sock_filter *filter, int argidx)
32{
33 set_bpf_stmt(filter, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
34 return 1U;
35}
36#elif defined(BITS64)
37size_t bpf_load_arg(struct sock_filter *filter, int argidx)
38{
39 struct sock_filter *curr_block = filter;
40 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, LO_ARG(argidx));
41 set_bpf_stmt(curr_block++, BPF_ST, 0); /* lo -> M[0] */
42 set_bpf_stmt(curr_block++, BPF_LD+BPF_W+BPF_ABS, HI_ARG(argidx));
43 set_bpf_stmt(curr_block++, BPF_ST, 1); /* hi -> M[1] */
44 return curr_block - filter;
45}
46#endif
47
48/* Size-aware comparisons. */
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070049size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070050 unsigned char jt, unsigned char jf)
51{
Jorge Lucangeli Obesedb1d8e2012-04-26 10:05:09 -070052 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
53 set_bpf_jump(filter, BPF_JMP+BPF_JEQ+BPF_K, lo, jt, jf);
Jorge Lucangeli Obesfc8ab532012-03-20 10:14:31 -070054 return 1U;
55}
56
57size_t bpf_comp_jeq64(struct sock_filter *filter, uint64_t c,
58 unsigned char jt, unsigned char jf)
59{
60 unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
61 unsigned int hi = (unsigned int)(c >> 32);
62
63 struct sock_filter *curr_block = filter;
64
65 /* bpf_load_arg leaves |hi| in A */
66 curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
67 set_bpf_stmt(curr_block++, BPF_LD+BPF_MEM, 0); /* swap in lo */
68 curr_block += bpf_comp_jeq32(curr_block, lo, jt, jf);
69
70 return curr_block - filter;
71}
72
73#if defined(BITS32)
74#define bpf_comp_jeq bpf_comp_jeq32
75
76#elif defined(BITS64)
77#define bpf_comp_jeq bpf_comp_jeq64
78#endif
79
80size_t bpf_arg_comp(struct sock_filter **pfilter,
81 int op, int argidx, unsigned long c, unsigned int label_id)
82{
83 struct sock_filter *filter = calloc(BPF_ARG_COMP_LEN + 1,
84 sizeof(struct sock_filter));
85 struct sock_filter *curr_block = filter;
86 int flip = 0;
87
88 /* Load arg */
89 curr_block += bpf_load_arg(curr_block, argidx);
90
91 /* Jump type */
92 switch (op) {
93 case EQ:
94 flip = 0;
95 break;
96 case NE:
97 flip = 1;
98 break;
99 default:
100 *pfilter = NULL;
101 return 0;
102 }
103
104 /*
105 * It's easier for the rest of the code to have the true branch
106 * skip and the false branch fall through.
107 */
108 unsigned char jt = flip ? NEXT : SKIP;
109 unsigned char jf = flip ? SKIP : NEXT;
110 curr_block += bpf_comp_jeq(curr_block, c, jt, jf);
111 curr_block += set_bpf_jump_lbl(curr_block, label_id);
112
113 *pfilter = filter;
114 return curr_block - filter;
115}
116
117void dump_bpf_filter(struct sock_filter *filter, unsigned short len)
118{
119 int i = 0;
120
121 printf("len == %d\n", len);
122 printf("filter:\n");
123 for (i = 0; i < len; i++) {
124 printf("%d: \t{ code=%#x, jt=%u, jf=%u, k=%#x \t}\n",
125 i, filter[i].code, filter[i].jt, filter[i].jf, filter[i].k);
126 }
127}
128
129void dump_bpf_prog(struct sock_fprog *fprog)
130{
131 struct sock_filter *filter = fprog->filter;
132 unsigned short len = fprog->len;
133 dump_bpf_filter(filter, len);
134}
135
136int bpf_resolve_jumps(struct bpf_labels *labels,
137 struct sock_filter *filter, size_t count)
138{
139 struct sock_filter *begin = filter;
140 __u8 insn = count - 1;
141
142 if (count < 1)
143 return -1;
144 /*
145 * Walk it once, backwards, to build the label table and do fixups.
146 * Since backward jumps are disallowed by BPF, this is easy.
147 */
148 for (filter += insn; filter >= begin; --insn, --filter) {
149 if (filter->code != (BPF_JMP+BPF_JA))
150 continue;
151 switch ((filter->jt<<8)|filter->jf) {
152 case (JUMP_JT<<8)|JUMP_JF:
153 if (labels->labels[filter->k].location == 0xffffffff) {
154 fprintf(stderr, "Unresolved label: '%s'\n",
155 labels->labels[filter->k].label);
156 return 1;
157 }
158 filter->k = labels->labels[filter->k].location -
159 (insn + 1);
160 filter->jt = 0;
161 filter->jf = 0;
162 continue;
163 case (LABEL_JT<<8)|LABEL_JF:
164 if (labels->labels[filter->k].location != 0xffffffff) {
165 fprintf(stderr, "Duplicate label use: '%s'\n",
166 labels->labels[filter->k].label);
167 return 1;
168 }
169 labels->labels[filter->k].location = insn;
170 filter->k = 0; /* fall through */
171 filter->jt = 0;
172 filter->jf = 0;
173 continue;
174 }
175 }
176 return 0;
177}
178
179/* Simple lookup table for labels. */
180int bpf_label_id(struct bpf_labels *labels, const char *label)
181{
182 struct __bpf_label *begin = labels->labels, *end;
183 int id;
184 if (labels->count == 0) {
185 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
186 if (!begin->label) {
187 return -1;
188 }
189 begin->location = 0xffffffff;
190 labels->count++;
191 return 0;
192 }
193 end = begin + labels->count;
194 for (id = 0; begin < end; ++begin, ++id) {
195 if (!strcmp(label, begin->label))
196 return id;
197 }
198 begin->label = strndup(label, MAX_BPF_LABEL_LEN);
199 if (!begin->label) {
200 return -1;
201 }
202 begin->location = 0xffffffff;
203 labels->count++;
204 return id;
205}
206
207/* Free label strings. */
208void free_label_strings(struct bpf_labels *labels)
209{
210 struct __bpf_label *begin = labels->labels, *end;
211
212 end = begin + labels->count;
213 for (; begin < end; ++begin) {
214 if (begin->label)
215 free((void*)(begin->label));
216 }
217}