blob: 6f7ed8f8e6ee76379381955c0fff5a67599254bd [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
20
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010021#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
23#include <net/sock.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27MODULE_DESCRIPTION("TC BPF based classifier");
28
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010029#define CLS_BPF_NAME_LEN 256
30
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010031struct cls_bpf_head {
32 struct list_head plist;
33 u32 hgen;
John Fastabend1f947bf2014-09-12 20:10:24 -070034 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010035};
36
37struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070038 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010039 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010040 struct tcf_result res;
41 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010042 u32 handle;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010043 union {
44 u32 bpf_fd;
45 u16 bpf_num_ops;
46 };
47 struct sock_filter *bpf_ops;
48 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070049 struct tcf_proto *tp;
50 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010051};
52
53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
54 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010055 [TCA_BPF_FD] = { .type = NLA_U32 },
56 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010057 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
58 [TCA_BPF_OPS] = { .type = NLA_BINARY,
59 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
60};
61
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010062static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
63 struct tcf_result *res)
64{
WANG Cong80dcbd12014-09-15 14:21:50 -070065 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010066 struct cls_bpf_prog *prog;
67 int ret;
68
John Fastabend1f947bf2014-09-12 20:10:24 -070069 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070070 int filter_res = BPF_PROG_RUN(prog->filter, skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010071
72 if (filter_res == 0)
73 continue;
74
75 *res = prog->res;
76 if (filter_res != -1)
77 res->classid = filter_res;
78
79 ret = tcf_exts_exec(skb, &prog->exts, res);
80 if (ret < 0)
81 continue;
82
83 return ret;
84 }
85
86 return -1;
87}
88
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010089static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
90{
91 return !prog->bpf_ops;
92}
93
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010094static int cls_bpf_init(struct tcf_proto *tp)
95{
96 struct cls_bpf_head *head;
97
98 head = kzalloc(sizeof(*head), GFP_KERNEL);
99 if (head == NULL)
100 return -ENOBUFS;
101
John Fastabend1f947bf2014-09-12 20:10:24 -0700102 INIT_LIST_HEAD_RCU(&head->plist);
103 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100104
105 return 0;
106}
107
108static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
109{
WANG Cong18d02642014-09-25 10:26:37 -0700110 tcf_exts_destroy(&prog->exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100111
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100112 if (cls_bpf_is_ebpf(prog))
113 bpf_prog_put(prog->filter);
114 else
115 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100116
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100117 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100118 kfree(prog->bpf_ops);
119 kfree(prog);
120}
121
John Fastabend1f947bf2014-09-12 20:10:24 -0700122static void __cls_bpf_delete_prog(struct rcu_head *rcu)
123{
124 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
125
126 cls_bpf_delete_prog(prog->tp, prog);
127}
128
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100129static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
130{
Jiri Pirko472f5832014-12-02 18:00:32 +0100131 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100132
Jiri Pirko472f5832014-12-02 18:00:32 +0100133 list_del_rcu(&prog->link);
134 tcf_unbind_filter(tp, &prog->res);
135 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100136
Jiri Pirko472f5832014-12-02 18:00:32 +0100137 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100138}
139
140static void cls_bpf_destroy(struct tcf_proto *tp)
141{
John Fastabend1f947bf2014-09-12 20:10:24 -0700142 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100143 struct cls_bpf_prog *prog, *tmp;
144
145 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
John Fastabend1f947bf2014-09-12 20:10:24 -0700146 list_del_rcu(&prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700147 tcf_unbind_filter(tp, &prog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700148 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100149 }
150
John Fastabend1f947bf2014-09-12 20:10:24 -0700151 RCU_INIT_POINTER(tp->root, NULL);
152 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100153}
154
155static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
156{
John Fastabend1f947bf2014-09-12 20:10:24 -0700157 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100158 struct cls_bpf_prog *prog;
159 unsigned long ret = 0UL;
160
161 if (head == NULL)
162 return 0UL;
163
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100164 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100165 if (prog->handle == handle) {
166 ret = (unsigned long) prog;
167 break;
168 }
169 }
170
171 return ret;
172}
173
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100174static int cls_bpf_prog_from_ops(struct nlattr **tb,
175 struct cls_bpf_prog *prog, u32 classid)
176{
177 struct sock_filter *bpf_ops;
178 struct sock_fprog_kern fprog_tmp;
179 struct bpf_prog *fp;
180 u16 bpf_size, bpf_num_ops;
181 int ret;
182
183 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
184 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
185 return -EINVAL;
186
187 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
188 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
189 return -EINVAL;
190
191 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
192 if (bpf_ops == NULL)
193 return -ENOMEM;
194
195 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
196
197 fprog_tmp.len = bpf_num_ops;
198 fprog_tmp.filter = bpf_ops;
199
200 ret = bpf_prog_create(&fp, &fprog_tmp);
201 if (ret < 0) {
202 kfree(bpf_ops);
203 return ret;
204 }
205
206 prog->bpf_ops = bpf_ops;
207 prog->bpf_num_ops = bpf_num_ops;
208 prog->bpf_name = NULL;
209
210 prog->filter = fp;
211 prog->res.classid = classid;
212
213 return 0;
214}
215
216static int cls_bpf_prog_from_efd(struct nlattr **tb,
217 struct cls_bpf_prog *prog, u32 classid)
218{
219 struct bpf_prog *fp;
220 char *name = NULL;
221 u32 bpf_fd;
222
223 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
224
225 fp = bpf_prog_get(bpf_fd);
226 if (IS_ERR(fp))
227 return PTR_ERR(fp);
228
229 if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
230 bpf_prog_put(fp);
231 return -EINVAL;
232 }
233
234 if (tb[TCA_BPF_NAME]) {
235 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
236 nla_len(tb[TCA_BPF_NAME]),
237 GFP_KERNEL);
238 if (!name) {
239 bpf_prog_put(fp);
240 return -ENOMEM;
241 }
242 }
243
244 prog->bpf_ops = NULL;
245 prog->bpf_fd = bpf_fd;
246 prog->bpf_name = name;
247
248 prog->filter = fp;
249 prog->res.classid = classid;
250
251 return 0;
252}
253
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100254static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
255 struct cls_bpf_prog *prog,
256 unsigned long base, struct nlattr **tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700257 struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100258{
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100259 struct tcf_exts exts;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100260 bool is_bpf, is_ebpf;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100261 u32 classid;
262 int ret;
263
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100264 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
265 is_ebpf = tb[TCA_BPF_FD];
266
267 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
268 !tb[TCA_BPF_CLASSID])
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100269 return -EINVAL;
270
WANG Cong5da57f42013-12-15 20:15:07 -0800271 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700272 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100273 if (ret < 0)
274 return ret;
275
276 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100277
278 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
279 cls_bpf_prog_from_efd(tb, prog, classid);
280 if (ret < 0) {
281 tcf_exts_destroy(&exts);
282 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100283 }
284
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100285 tcf_bind_filter(tp, &prog->res, base);
286 tcf_exts_change(tp, &prog->exts, &exts);
287
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100288 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100289}
290
291static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
292 struct cls_bpf_head *head)
293{
294 unsigned int i = 0x80000000;
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100295 u32 handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100296
297 do {
298 if (++head->hgen == 0x7FFFFFFF)
299 head->hgen = 1;
300 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100301
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100302 if (unlikely(i == 0)) {
303 pr_err("Insufficient number of handles\n");
304 handle = 0;
305 } else {
306 handle = head->hgen;
307 }
308
309 return handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100310}
311
312static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
313 struct tcf_proto *tp, unsigned long base,
314 u32 handle, struct nlattr **tca,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700315 unsigned long *arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100316{
John Fastabend1f947bf2014-09-12 20:10:24 -0700317 struct cls_bpf_head *head = rtnl_dereference(tp->root);
318 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100319 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700320 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100321 int ret;
322
323 if (tca[TCA_OPTIONS] == NULL)
324 return -EINVAL;
325
326 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
327 if (ret < 0)
328 return ret;
329
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100330 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700331 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100332 return -ENOBUFS;
333
WANG Cong5da57f42013-12-15 20:15:07 -0800334 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
John Fastabend1f947bf2014-09-12 20:10:24 -0700335
336 if (oldprog) {
337 if (handle && oldprog->handle != handle) {
338 ret = -EINVAL;
339 goto errout;
340 }
341 }
342
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100343 if (handle == 0)
344 prog->handle = cls_bpf_grab_new_handle(tp, head);
345 else
346 prog->handle = handle;
347 if (prog->handle == 0) {
348 ret = -EINVAL;
349 goto errout;
350 }
351
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700352 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100353 if (ret < 0)
354 goto errout;
355
John Fastabend1f947bf2014-09-12 20:10:24 -0700356 if (oldprog) {
357 list_replace_rcu(&prog->link, &oldprog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700358 tcf_unbind_filter(tp, &oldprog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700359 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
360 } else {
361 list_add_rcu(&prog->link, &head->plist);
362 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100363
364 *arg = (unsigned long) prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100365 return 0;
366errout:
John Fastabend1f947bf2014-09-12 20:10:24 -0700367 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100368
369 return ret;
370}
371
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100372static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
373 struct sk_buff *skb)
374{
375 struct nlattr *nla;
376
377 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
378 return -EMSGSIZE;
379
380 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
381 sizeof(struct sock_filter));
382 if (nla == NULL)
383 return -EMSGSIZE;
384
385 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
386
387 return 0;
388}
389
390static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
391 struct sk_buff *skb)
392{
393 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
394 return -EMSGSIZE;
395
396 if (prog->bpf_name &&
397 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
398 return -EMSGSIZE;
399
400 return 0;
401}
402
WANG Cong832d1d52014-01-09 16:14:01 -0800403static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100404 struct sk_buff *skb, struct tcmsg *tm)
405{
406 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100407 struct nlattr *nest;
408 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100409
410 if (prog == NULL)
411 return skb->len;
412
413 tm->tcm_handle = prog->handle;
414
415 nest = nla_nest_start(skb, TCA_OPTIONS);
416 if (nest == NULL)
417 goto nla_put_failure;
418
419 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
420 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100421
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100422 if (cls_bpf_is_ebpf(prog))
423 ret = cls_bpf_dump_ebpf_info(prog, skb);
424 else
425 ret = cls_bpf_dump_bpf_info(prog, skb);
426 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100427 goto nla_put_failure;
428
WANG Cong5da57f42013-12-15 20:15:07 -0800429 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100430 goto nla_put_failure;
431
432 nla_nest_end(skb, nest);
433
WANG Cong5da57f42013-12-15 20:15:07 -0800434 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100435 goto nla_put_failure;
436
437 return skb->len;
438
439nla_put_failure:
440 nla_nest_cancel(skb, nest);
441 return -1;
442}
443
444static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
445{
John Fastabend1f947bf2014-09-12 20:10:24 -0700446 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100447 struct cls_bpf_prog *prog;
448
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100449 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100450 if (arg->count < arg->skip)
451 goto skip;
452 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
453 arg->stop = 1;
454 break;
455 }
456skip:
457 arg->count++;
458 }
459}
460
461static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
462 .kind = "bpf",
463 .owner = THIS_MODULE,
464 .classify = cls_bpf_classify,
465 .init = cls_bpf_init,
466 .destroy = cls_bpf_destroy,
467 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100468 .change = cls_bpf_change,
469 .delete = cls_bpf_delete,
470 .walk = cls_bpf_walk,
471 .dump = cls_bpf_dump,
472};
473
474static int __init cls_bpf_init_mod(void)
475{
476 return register_tcf_proto_ops(&cls_bpf_ops);
477}
478
479static void __exit cls_bpf_exit_mod(void)
480{
481 unregister_tcf_proto_ops(&cls_bpf_ops);
482}
483
484module_init(cls_bpf_init_mod);
485module_exit(cls_bpf_exit_mod);