blob: 13f64df2c710663f2753df92632c5575bc2fe317 [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
19#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25MODULE_DESCRIPTION("TC BPF based classifier");
26
27struct cls_bpf_head {
28 struct list_head plist;
29 u32 hgen;
30};
31
32struct cls_bpf_prog {
33 struct sk_filter *filter;
34 struct sock_filter *bpf_ops;
35 struct tcf_exts exts;
36 struct tcf_result res;
37 struct list_head link;
38 u32 handle;
39 u16 bpf_len;
40};
41
42static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
43 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
44 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
45 [TCA_BPF_OPS] = { .type = NLA_BINARY,
46 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
47};
48
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010049static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
50 struct tcf_result *res)
51{
52 struct cls_bpf_head *head = tp->root;
53 struct cls_bpf_prog *prog;
54 int ret;
55
56 list_for_each_entry(prog, &head->plist, link) {
57 int filter_res = SK_RUN_FILTER(prog->filter, skb);
58
59 if (filter_res == 0)
60 continue;
61
62 *res = prog->res;
63 if (filter_res != -1)
64 res->classid = filter_res;
65
66 ret = tcf_exts_exec(skb, &prog->exts, res);
67 if (ret < 0)
68 continue;
69
70 return ret;
71 }
72
73 return -1;
74}
75
76static int cls_bpf_init(struct tcf_proto *tp)
77{
78 struct cls_bpf_head *head;
79
80 head = kzalloc(sizeof(*head), GFP_KERNEL);
81 if (head == NULL)
82 return -ENOBUFS;
83
84 INIT_LIST_HEAD(&head->plist);
85 tp->root = head;
86
87 return 0;
88}
89
90static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
91{
92 tcf_unbind_filter(tp, &prog->res);
93 tcf_exts_destroy(tp, &prog->exts);
94
95 sk_unattached_filter_destroy(prog->filter);
96
97 kfree(prog->bpf_ops);
98 kfree(prog);
99}
100
101static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
102{
103 struct cls_bpf_head *head = tp->root;
104 struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
105
106 list_for_each_entry(prog, &head->plist, link) {
107 if (prog == todel) {
108 tcf_tree_lock(tp);
109 list_del(&prog->link);
110 tcf_tree_unlock(tp);
111
112 cls_bpf_delete_prog(tp, prog);
113 return 0;
114 }
115 }
116
117 return -ENOENT;
118}
119
120static void cls_bpf_destroy(struct tcf_proto *tp)
121{
122 struct cls_bpf_head *head = tp->root;
123 struct cls_bpf_prog *prog, *tmp;
124
125 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
126 list_del(&prog->link);
127 cls_bpf_delete_prog(tp, prog);
128 }
129
130 kfree(head);
131}
132
133static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
134{
135 struct cls_bpf_head *head = tp->root;
136 struct cls_bpf_prog *prog;
137 unsigned long ret = 0UL;
138
139 if (head == NULL)
140 return 0UL;
141
142 list_for_each_entry(prog, &head->plist, link) {
143 if (prog->handle == handle) {
144 ret = (unsigned long) prog;
145 break;
146 }
147 }
148
149 return ret;
150}
151
152static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
153{
154}
155
156static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
157 struct cls_bpf_prog *prog,
158 unsigned long base, struct nlattr **tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700159 struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100160{
161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts;
Daniel Borkmannb1fcd352014-05-23 18:43:58 +0200163 struct sock_fprog_kern tmp;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100164 struct sk_filter *fp, *fp_old;
165 u16 bpf_size, bpf_len;
166 u32 classid;
167 int ret;
168
169 if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
170 return -EINVAL;
171
WANG Cong5da57f42013-12-15 20:15:07 -0800172 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700173 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100174 if (ret < 0)
175 return ret;
176
177 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
178 bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
179 if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
180 ret = -EINVAL;
181 goto errout;
182 }
183
184 bpf_size = bpf_len * sizeof(*bpf_ops);
185 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
186 if (bpf_ops == NULL) {
187 ret = -ENOMEM;
188 goto errout;
189 }
190
191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192
193 tmp.len = bpf_len;
Daniel Borkmannb1fcd352014-05-23 18:43:58 +0200194 tmp.filter = bpf_ops;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100195
196 ret = sk_unattached_filter_create(&fp, &tmp);
197 if (ret)
198 goto errout_free;
199
200 tcf_tree_lock(tp);
201 fp_old = prog->filter;
202 bpf_old = prog->bpf_ops;
203
204 prog->bpf_len = bpf_len;
205 prog->bpf_ops = bpf_ops;
206 prog->filter = fp;
207 prog->res.classid = classid;
208 tcf_tree_unlock(tp);
209
210 tcf_bind_filter(tp, &prog->res, base);
211 tcf_exts_change(tp, &prog->exts, &exts);
212
213 if (fp_old)
214 sk_unattached_filter_destroy(fp_old);
215 if (bpf_old)
216 kfree(bpf_old);
217
218 return 0;
219
220errout_free:
221 kfree(bpf_ops);
222errout:
223 tcf_exts_destroy(tp, &exts);
224 return ret;
225}
226
227static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
228 struct cls_bpf_head *head)
229{
230 unsigned int i = 0x80000000;
231
232 do {
233 if (++head->hgen == 0x7FFFFFFF)
234 head->hgen = 1;
235 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
236 if (i == 0)
237 pr_err("Insufficient number of handles\n");
238
239 return i;
240}
241
242static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
243 struct tcf_proto *tp, unsigned long base,
244 u32 handle, struct nlattr **tca,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700245 unsigned long *arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100246{
247 struct cls_bpf_head *head = tp->root;
248 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
249 struct nlattr *tb[TCA_BPF_MAX + 1];
250 int ret;
251
252 if (tca[TCA_OPTIONS] == NULL)
253 return -EINVAL;
254
255 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
256 if (ret < 0)
257 return ret;
258
259 if (prog != NULL) {
260 if (handle && prog->handle != handle)
261 return -EINVAL;
262 return cls_bpf_modify_existing(net, tp, prog, base, tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700263 tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100264 }
265
266 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
267 if (prog == NULL)
268 return -ENOBUFS;
269
WANG Cong5da57f42013-12-15 20:15:07 -0800270 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100271 if (handle == 0)
272 prog->handle = cls_bpf_grab_new_handle(tp, head);
273 else
274 prog->handle = handle;
275 if (prog->handle == 0) {
276 ret = -EINVAL;
277 goto errout;
278 }
279
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100281 if (ret < 0)
282 goto errout;
283
284 tcf_tree_lock(tp);
285 list_add(&prog->link, &head->plist);
286 tcf_tree_unlock(tp);
287
288 *arg = (unsigned long) prog;
289
290 return 0;
291errout:
292 if (*arg == 0UL && prog)
293 kfree(prog);
294
295 return ret;
296}
297
WANG Cong832d1d52014-01-09 16:14:01 -0800298static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100299 struct sk_buff *skb, struct tcmsg *tm)
300{
301 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
302 struct nlattr *nest, *nla;
303
304 if (prog == NULL)
305 return skb->len;
306
307 tm->tcm_handle = prog->handle;
308
309 nest = nla_nest_start(skb, TCA_OPTIONS);
310 if (nest == NULL)
311 goto nla_put_failure;
312
313 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
314 goto nla_put_failure;
315 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
316 goto nla_put_failure;
317
318 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
319 sizeof(struct sock_filter));
320 if (nla == NULL)
321 goto nla_put_failure;
322
Yang Yingliang1fab9ab2013-12-10 20:55:30 +0800323 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100324
WANG Cong5da57f42013-12-15 20:15:07 -0800325 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100326 goto nla_put_failure;
327
328 nla_nest_end(skb, nest);
329
WANG Cong5da57f42013-12-15 20:15:07 -0800330 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100331 goto nla_put_failure;
332
333 return skb->len;
334
335nla_put_failure:
336 nla_nest_cancel(skb, nest);
337 return -1;
338}
339
340static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
341{
342 struct cls_bpf_head *head = tp->root;
343 struct cls_bpf_prog *prog;
344
345 list_for_each_entry(prog, &head->plist, link) {
346 if (arg->count < arg->skip)
347 goto skip;
348 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
349 arg->stop = 1;
350 break;
351 }
352skip:
353 arg->count++;
354 }
355}
356
357static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
358 .kind = "bpf",
359 .owner = THIS_MODULE,
360 .classify = cls_bpf_classify,
361 .init = cls_bpf_init,
362 .destroy = cls_bpf_destroy,
363 .get = cls_bpf_get,
364 .put = cls_bpf_put,
365 .change = cls_bpf_change,
366 .delete = cls_bpf_delete,
367 .walk = cls_bpf_walk,
368 .dump = cls_bpf_dump,
369};
370
371static int __init cls_bpf_init_mod(void)
372{
373 return register_tcf_proto_ops(&cls_bpf_ops);
374}
375
376static void __exit cls_bpf_exit_mod(void)
377{
378 unregister_tcf_proto_ops(&cls_bpf_ops);
379}
380
381module_init(cls_bpf_init_mod);
382module_exit(cls_bpf_exit_mod);