blob: eed49d1d0878d6e55d23e622e58f3bf908c9cab9 [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
19#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25MODULE_DESCRIPTION("TC BPF based classifier");
26
27struct cls_bpf_head {
28 struct list_head plist;
29 u32 hgen;
John Fastabend1f947bf2014-09-12 20:10:24 -070030 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010031};
32
33struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070034 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010035 struct sock_filter *bpf_ops;
36 struct tcf_exts exts;
37 struct tcf_result res;
38 struct list_head link;
39 u32 handle;
40 u16 bpf_len;
John Fastabend1f947bf2014-09-12 20:10:24 -070041 struct tcf_proto *tp;
42 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010043};
44
45static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
46 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
47 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
48 [TCA_BPF_OPS] = { .type = NLA_BINARY,
49 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
50};
51
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010052static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
53 struct tcf_result *res)
54{
WANG Cong80dcbd12014-09-15 14:21:50 -070055 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010056 struct cls_bpf_prog *prog;
57 int ret;
58
John Fastabend1f947bf2014-09-12 20:10:24 -070059 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070060 int filter_res = BPF_PROG_RUN(prog->filter, skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010061
62 if (filter_res == 0)
63 continue;
64
65 *res = prog->res;
66 if (filter_res != -1)
67 res->classid = filter_res;
68
69 ret = tcf_exts_exec(skb, &prog->exts, res);
70 if (ret < 0)
71 continue;
72
73 return ret;
74 }
75
76 return -1;
77}
78
79static int cls_bpf_init(struct tcf_proto *tp)
80{
81 struct cls_bpf_head *head;
82
83 head = kzalloc(sizeof(*head), GFP_KERNEL);
84 if (head == NULL)
85 return -ENOBUFS;
86
John Fastabend1f947bf2014-09-12 20:10:24 -070087 INIT_LIST_HEAD_RCU(&head->plist);
88 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010089
90 return 0;
91}
92
93static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
94{
WANG Cong18d02642014-09-25 10:26:37 -070095 tcf_exts_destroy(&prog->exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010096
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070097 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010098
99 kfree(prog->bpf_ops);
100 kfree(prog);
101}
102
John Fastabend1f947bf2014-09-12 20:10:24 -0700103static void __cls_bpf_delete_prog(struct rcu_head *rcu)
104{
105 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
106
107 cls_bpf_delete_prog(prog->tp, prog);
108}
109
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100110static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
111{
John Fastabend1f947bf2014-09-12 20:10:24 -0700112 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100113 struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
114
115 list_for_each_entry(prog, &head->plist, link) {
116 if (prog == todel) {
John Fastabend1f947bf2014-09-12 20:10:24 -0700117 list_del_rcu(&prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700118 tcf_unbind_filter(tp, &prog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700119 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100120 return 0;
121 }
122 }
123
124 return -ENOENT;
125}
126
127static void cls_bpf_destroy(struct tcf_proto *tp)
128{
John Fastabend1f947bf2014-09-12 20:10:24 -0700129 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100130 struct cls_bpf_prog *prog, *tmp;
131
132 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
John Fastabend1f947bf2014-09-12 20:10:24 -0700133 list_del_rcu(&prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700134 tcf_unbind_filter(tp, &prog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700135 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100136 }
137
John Fastabend1f947bf2014-09-12 20:10:24 -0700138 RCU_INIT_POINTER(tp->root, NULL);
139 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100140}
141
142static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
143{
John Fastabend1f947bf2014-09-12 20:10:24 -0700144 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100145 struct cls_bpf_prog *prog;
146 unsigned long ret = 0UL;
147
148 if (head == NULL)
149 return 0UL;
150
John Fastabend1f947bf2014-09-12 20:10:24 -0700151 list_for_each_entry_rcu(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100152 if (prog->handle == handle) {
153 ret = (unsigned long) prog;
154 break;
155 }
156 }
157
158 return ret;
159}
160
161static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
162{
163}
164
165static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
166 struct cls_bpf_prog *prog,
167 unsigned long base, struct nlattr **tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700168 struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100169{
John Fastabend1f947bf2014-09-12 20:10:24 -0700170 struct sock_filter *bpf_ops;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100171 struct tcf_exts exts;
Daniel Borkmannb1fcd352014-05-23 18:43:58 +0200172 struct sock_fprog_kern tmp;
John Fastabend1f947bf2014-09-12 20:10:24 -0700173 struct bpf_prog *fp;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100174 u16 bpf_size, bpf_len;
175 u32 classid;
176 int ret;
177
178 if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
179 return -EINVAL;
180
WANG Cong5da57f42013-12-15 20:15:07 -0800181 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700182 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100183 if (ret < 0)
184 return ret;
185
186 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
187 bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
188 if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
189 ret = -EINVAL;
190 goto errout;
191 }
192
193 bpf_size = bpf_len * sizeof(*bpf_ops);
194 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
195 if (bpf_ops == NULL) {
196 ret = -ENOMEM;
197 goto errout;
198 }
199
200 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
201
202 tmp.len = bpf_len;
Daniel Borkmannb1fcd352014-05-23 18:43:58 +0200203 tmp.filter = bpf_ops;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100204
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700205 ret = bpf_prog_create(&fp, &tmp);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100206 if (ret)
207 goto errout_free;
208
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100209 prog->bpf_len = bpf_len;
210 prog->bpf_ops = bpf_ops;
211 prog->filter = fp;
212 prog->res.classid = classid;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100213
214 tcf_bind_filter(tp, &prog->res, base);
215 tcf_exts_change(tp, &prog->exts, &exts);
216
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100217 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100218errout_free:
219 kfree(bpf_ops);
220errout:
WANG Cong18d02642014-09-25 10:26:37 -0700221 tcf_exts_destroy(&exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100222 return ret;
223}
224
225static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
226 struct cls_bpf_head *head)
227{
228 unsigned int i = 0x80000000;
229
230 do {
231 if (++head->hgen == 0x7FFFFFFF)
232 head->hgen = 1;
233 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
234 if (i == 0)
235 pr_err("Insufficient number of handles\n");
236
237 return i;
238}
239
240static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
241 struct tcf_proto *tp, unsigned long base,
242 u32 handle, struct nlattr **tca,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700243 unsigned long *arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100244{
John Fastabend1f947bf2014-09-12 20:10:24 -0700245 struct cls_bpf_head *head = rtnl_dereference(tp->root);
246 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100247 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700248 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100249 int ret;
250
251 if (tca[TCA_OPTIONS] == NULL)
252 return -EINVAL;
253
254 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
255 if (ret < 0)
256 return ret;
257
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100258 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700259 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100260 return -ENOBUFS;
261
WANG Cong5da57f42013-12-15 20:15:07 -0800262 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
John Fastabend1f947bf2014-09-12 20:10:24 -0700263
264 if (oldprog) {
265 if (handle && oldprog->handle != handle) {
266 ret = -EINVAL;
267 goto errout;
268 }
269 }
270
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100271 if (handle == 0)
272 prog->handle = cls_bpf_grab_new_handle(tp, head);
273 else
274 prog->handle = handle;
275 if (prog->handle == 0) {
276 ret = -EINVAL;
277 goto errout;
278 }
279
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100281 if (ret < 0)
282 goto errout;
283
John Fastabend1f947bf2014-09-12 20:10:24 -0700284 if (oldprog) {
285 list_replace_rcu(&prog->link, &oldprog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700286 tcf_unbind_filter(tp, &oldprog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700287 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
288 } else {
289 list_add_rcu(&prog->link, &head->plist);
290 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100291
292 *arg = (unsigned long) prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100293 return 0;
294errout:
John Fastabend1f947bf2014-09-12 20:10:24 -0700295 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100296
297 return ret;
298}
299
WANG Cong832d1d52014-01-09 16:14:01 -0800300static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100301 struct sk_buff *skb, struct tcmsg *tm)
302{
303 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
304 struct nlattr *nest, *nla;
305
306 if (prog == NULL)
307 return skb->len;
308
309 tm->tcm_handle = prog->handle;
310
311 nest = nla_nest_start(skb, TCA_OPTIONS);
312 if (nest == NULL)
313 goto nla_put_failure;
314
315 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
316 goto nla_put_failure;
317 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
318 goto nla_put_failure;
319
320 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
321 sizeof(struct sock_filter));
322 if (nla == NULL)
323 goto nla_put_failure;
324
Yang Yingliang1fab9ab2013-12-10 20:55:30 +0800325 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100326
WANG Cong5da57f42013-12-15 20:15:07 -0800327 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100328 goto nla_put_failure;
329
330 nla_nest_end(skb, nest);
331
WANG Cong5da57f42013-12-15 20:15:07 -0800332 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100333 goto nla_put_failure;
334
335 return skb->len;
336
337nla_put_failure:
338 nla_nest_cancel(skb, nest);
339 return -1;
340}
341
342static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
343{
John Fastabend1f947bf2014-09-12 20:10:24 -0700344 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100345 struct cls_bpf_prog *prog;
346
John Fastabend1f947bf2014-09-12 20:10:24 -0700347 list_for_each_entry_rcu(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100348 if (arg->count < arg->skip)
349 goto skip;
350 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
351 arg->stop = 1;
352 break;
353 }
354skip:
355 arg->count++;
356 }
357}
358
359static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
360 .kind = "bpf",
361 .owner = THIS_MODULE,
362 .classify = cls_bpf_classify,
363 .init = cls_bpf_init,
364 .destroy = cls_bpf_destroy,
365 .get = cls_bpf_get,
366 .put = cls_bpf_put,
367 .change = cls_bpf_change,
368 .delete = cls_bpf_delete,
369 .walk = cls_bpf_walk,
370 .dump = cls_bpf_dump,
371};
372
373static int __init cls_bpf_init_mod(void)
374{
375 return register_tcf_proto_ops(&cls_bpf_ops);
376}
377
378static void __exit cls_bpf_exit_mod(void)
379{
380 unregister_tcf_proto_ops(&cls_bpf_ops);
381}
382
383module_init(cls_bpf_init_mod);
384module_exit(cls_bpf_exit_mod);