blob: 6f2dffe30f25a0d4beb30a33a6861159355abd3c [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
20
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010021#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
23#include <net/sock.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27MODULE_DESCRIPTION("TC BPF based classifier");
28
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010029#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010030#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010031 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010032
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010033struct cls_bpf_head {
34 struct list_head plist;
35 u32 hgen;
John Fastabend1f947bf2014-09-12 20:10:24 -070036 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010037};
38
39struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070040 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010041 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010042 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070043 bool exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +010044 bool offloaded;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010045 u32 gen_flags;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010046 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010047 u32 handle;
Daniel Borkmann55556dd2016-11-26 01:28:05 +010048 u16 bpf_num_ops;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010049 struct sock_filter *bpf_ops;
50 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070051 struct tcf_proto *tp;
52 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010053};
54
55static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070057 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010058 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010059 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040060 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
61 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010062 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
63 [TCA_BPF_OPS] = { .type = NLA_BINARY,
64 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
65};
66
Daniel Borkmann045efa82015-09-15 23:05:42 -070067static int cls_bpf_exec_opcode(int code)
68{
69 switch (code) {
70 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070071 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070072 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +020073 case TC_ACT_TRAP:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070074 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070075 case TC_ACT_UNSPEC:
76 return code;
77 default:
78 return TC_ACT_UNSPEC;
79 }
80}
81
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010082static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
WANG Cong80dcbd12014-09-15 14:21:50 -070085 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010086 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010087 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010088 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010089
Daniel Borkmann54720df2015-03-12 20:03:12 +010090 /* Needed here for accessing maps. */
91 rcu_read_lock();
John Fastabend1f947bf2014-09-12 20:10:24 -070092 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070093 int filter_res;
94
Daniel Borkmann045efa82015-09-15 23:05:42 -070095 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
96
Jakub Kicinskieadb4142016-09-21 11:43:55 +010097 if (tc_skip_sw(prog->gen_flags)) {
98 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
99 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -0700100 /* It is safe to push/pull even if skb_shared() */
101 __skb_push(skb, skb->mac_len);
Alexei Starovoitovdb58ba42016-05-05 19:49:12 -0700102 bpf_compute_data_end(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700103 filter_res = BPF_PROG_RUN(prog->filter, skb);
104 __skb_pull(skb, skb->mac_len);
105 } else {
Alexei Starovoitovdb58ba42016-05-05 19:49:12 -0700106 bpf_compute_data_end(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700107 filter_res = BPF_PROG_RUN(prog->filter, skb);
108 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100109
Daniel Borkmann045efa82015-09-15 23:05:42 -0700110 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100111 res->class = 0;
112 res->classid = TC_H_MAJ(prog->res.classid) |
113 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700114
115 ret = cls_bpf_exec_opcode(filter_res);
116 if (ret == TC_ACT_UNSPEC)
117 continue;
118 break;
119 }
120
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100121 if (filter_res == 0)
122 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100123 if (filter_res != -1) {
124 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100125 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100126 } else {
127 *res = prog->res;
128 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100129
130 ret = tcf_exts_exec(skb, &prog->exts, res);
131 if (ret < 0)
132 continue;
133
Daniel Borkmann54720df2015-03-12 20:03:12 +0100134 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100135 }
Daniel Borkmann54720df2015-03-12 20:03:12 +0100136 rcu_read_unlock();
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100137
Daniel Borkmann54720df2015-03-12 20:03:12 +0100138 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100139}
140
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100141static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
142{
143 return !prog->bpf_ops;
144}
145
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100146static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
147 enum tc_clsbpf_command cmd)
148{
149 struct net_device *dev = tp->q->dev_queue->dev;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200150 struct tc_cls_bpf_offload cls_bpf = {};
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200151 int err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100152
Jiri Pirkode4784c2017-08-07 10:15:32 +0200153 tc_cls_common_offload_init(&cls_bpf.common, tp);
154 cls_bpf.command = cmd;
155 cls_bpf.exts = &prog->exts;
156 cls_bpf.prog = prog->filter;
157 cls_bpf.name = prog->bpf_name;
158 cls_bpf.exts_integrated = prog->exts_integrated;
159 cls_bpf.gen_flags = prog->gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100160
Jiri Pirkode4784c2017-08-07 10:15:32 +0200161 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200162 if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
163 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
164
165 return err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100166}
167
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100168static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
169 struct cls_bpf_prog *oldprog)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100170{
171 struct net_device *dev = tp->q->dev_queue->dev;
172 struct cls_bpf_prog *obj = prog;
173 enum tc_clsbpf_command cmd;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100174 bool skip_sw;
175 int ret;
176
177 skip_sw = tc_skip_sw(prog->gen_flags) ||
178 (oldprog && tc_skip_sw(oldprog->gen_flags));
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100179
180 if (oldprog && oldprog->offloaded) {
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200181 if (tc_should_offload(dev, prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100182 cmd = TC_CLSBPF_REPLACE;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100183 } else if (!tc_skip_sw(prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100184 obj = oldprog;
185 cmd = TC_CLSBPF_DESTROY;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100186 } else {
187 return -EINVAL;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100188 }
189 } else {
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200190 if (!tc_should_offload(dev, prog->gen_flags))
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100191 return skip_sw ? -EINVAL : 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100192 cmd = TC_CLSBPF_ADD;
193 }
194
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100195 ret = cls_bpf_offload_cmd(tp, obj, cmd);
196 if (ret)
197 return skip_sw ? ret : 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100198
199 obj->offloaded = true;
200 if (oldprog)
201 oldprog->offloaded = false;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100202
203 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100204}
205
206static void cls_bpf_stop_offload(struct tcf_proto *tp,
207 struct cls_bpf_prog *prog)
208{
209 int err;
210
211 if (!prog->offloaded)
212 return;
213
214 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
215 if (err) {
216 pr_err("Stopping hardware offload failed: %d\n", err);
217 return;
218 }
219
220 prog->offloaded = false;
221}
222
Jakub Kicinski68d64062016-09-21 11:44:02 +0100223static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
224 struct cls_bpf_prog *prog)
225{
226 if (!prog->offloaded)
227 return;
228
229 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
230}
231
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100232static int cls_bpf_init(struct tcf_proto *tp)
233{
234 struct cls_bpf_head *head;
235
236 head = kzalloc(sizeof(*head), GFP_KERNEL);
237 if (head == NULL)
238 return -ENOBUFS;
239
John Fastabend1f947bf2014-09-12 20:10:24 -0700240 INIT_LIST_HEAD_RCU(&head->plist);
241 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100242
243 return 0;
244}
245
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100246static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100247{
WANG Cong18d02642014-09-25 10:26:37 -0700248 tcf_exts_destroy(&prog->exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100249
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100250 if (cls_bpf_is_ebpf(prog))
251 bpf_prog_put(prog->filter);
252 else
253 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100254
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100255 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100256 kfree(prog->bpf_ops);
257 kfree(prog);
258}
259
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100260static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
John Fastabend1f947bf2014-09-12 20:10:24 -0700261{
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100262 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
263}
John Fastabend1f947bf2014-09-12 20:10:24 -0700264
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100265static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
266{
267 cls_bpf_stop_offload(tp, prog);
268 list_del_rcu(&prog->link);
269 tcf_unbind_filter(tp, &prog->res);
270 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
John Fastabend1f947bf2014-09-12 20:10:24 -0700271}
272
WANG Cong8113c092017-08-04 21:31:43 -0700273static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100274{
WANG Cong763dbf62017-04-19 14:21:21 -0700275 struct cls_bpf_head *head = rtnl_dereference(tp->root);
276
WANG Cong8113c092017-08-04 21:31:43 -0700277 __cls_bpf_delete(tp, arg);
WANG Cong763dbf62017-04-19 14:21:21 -0700278 *last = list_empty(&head->plist);
Jiri Pirko472f5832014-12-02 18:00:32 +0100279 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100280}
281
WANG Cong763dbf62017-04-19 14:21:21 -0700282static void cls_bpf_destroy(struct tcf_proto *tp)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100283{
John Fastabend1f947bf2014-09-12 20:10:24 -0700284 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100285 struct cls_bpf_prog *prog, *tmp;
286
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100287 list_for_each_entry_safe(prog, tmp, &head->plist, link)
288 __cls_bpf_delete(tp, prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100289
John Fastabend1f947bf2014-09-12 20:10:24 -0700290 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100291}
292
WANG Cong8113c092017-08-04 21:31:43 -0700293static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100294{
John Fastabend1f947bf2014-09-12 20:10:24 -0700295 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100296 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100297
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100298 list_for_each_entry(prog, &head->plist, link) {
WANG Cong8113c092017-08-04 21:31:43 -0700299 if (prog->handle == handle)
300 return prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100301 }
302
WANG Cong8113c092017-08-04 21:31:43 -0700303 return NULL;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100304}
305
Daniel Borkmann045efa82015-09-15 23:05:42 -0700306static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100307{
308 struct sock_filter *bpf_ops;
309 struct sock_fprog_kern fprog_tmp;
310 struct bpf_prog *fp;
311 u16 bpf_size, bpf_num_ops;
312 int ret;
313
314 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
315 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
316 return -EINVAL;
317
318 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
319 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
320 return -EINVAL;
321
322 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
323 if (bpf_ops == NULL)
324 return -ENOMEM;
325
326 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
327
328 fprog_tmp.len = bpf_num_ops;
329 fprog_tmp.filter = bpf_ops;
330
331 ret = bpf_prog_create(&fp, &fprog_tmp);
332 if (ret < 0) {
333 kfree(bpf_ops);
334 return ret;
335 }
336
337 prog->bpf_ops = bpf_ops;
338 prog->bpf_num_ops = bpf_num_ops;
339 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100340 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100341
342 return 0;
343}
344
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200345static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
346 const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100347{
348 struct bpf_prog *fp;
349 char *name = NULL;
350 u32 bpf_fd;
351
352 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
353
Daniel Borkmann113214b2016-06-30 17:24:44 +0200354 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100355 if (IS_ERR(fp))
356 return PTR_ERR(fp);
357
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100358 if (tb[TCA_BPF_NAME]) {
Thomas Grafb15ca182016-10-26 10:53:16 +0200359 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100360 if (!name) {
361 bpf_prog_put(fp);
362 return -ENOMEM;
363 }
364 }
365
366 prog->bpf_ops = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100367 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100368 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100369
Daniel Borkmann1f211a12016-01-07 22:29:47 +0100370 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200371 netif_keep_dst(qdisc_dev(tp->q));
372
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100373 return 0;
374}
375
Jiri Pirko6a725c42017-08-04 14:29:04 +0200376static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
377 struct cls_bpf_prog *prog, unsigned long base,
378 struct nlattr **tb, struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100379{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700380 bool is_bpf, is_ebpf, have_exts = false;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100381 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100382 int ret;
383
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100384 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
385 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200386 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100387 return -EINVAL;
388
Jiri Pirko6839da32017-08-04 14:29:10 +0200389 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100390 if (ret < 0)
391 return ret;
392
Daniel Borkmann045efa82015-09-15 23:05:42 -0700393 if (tb[TCA_BPF_FLAGS]) {
394 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100395
Jiri Pirko6839da32017-08-04 14:29:10 +0200396 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
397 return -EINVAL;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700398
399 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
400 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100401 if (tb[TCA_BPF_FLAGS_GEN]) {
402 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
403 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
Jiri Pirko6839da32017-08-04 14:29:10 +0200404 !tc_flags_valid(gen_flags))
405 return -EINVAL;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100406 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700407
Daniel Borkmann045efa82015-09-15 23:05:42 -0700408 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100409 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700410
411 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200412 cls_bpf_prog_from_efd(tb, prog, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700413 if (ret < 0)
Jiri Pirko6839da32017-08-04 14:29:10 +0200414 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100415
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200416 if (tb[TCA_BPF_CLASSID]) {
417 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
418 tcf_bind_filter(tp, &prog->res, base);
419 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100420
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100421 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100422}
423
424static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
425 struct cls_bpf_head *head)
426{
427 unsigned int i = 0x80000000;
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100428 u32 handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100429
430 do {
431 if (++head->hgen == 0x7FFFFFFF)
432 head->hgen = 1;
433 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100434
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100435 if (unlikely(i == 0)) {
436 pr_err("Insufficient number of handles\n");
437 handle = 0;
438 } else {
439 handle = head->hgen;
440 }
441
442 return handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100443}
444
445static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
446 struct tcf_proto *tp, unsigned long base,
447 u32 handle, struct nlattr **tca,
WANG Cong8113c092017-08-04 21:31:43 -0700448 void **arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100449{
John Fastabend1f947bf2014-09-12 20:10:24 -0700450 struct cls_bpf_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700451 struct cls_bpf_prog *oldprog = *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100452 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700453 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100454 int ret;
455
456 if (tca[TCA_OPTIONS] == NULL)
457 return -EINVAL;
458
Johannes Bergfceb6432017-04-12 14:34:07 +0200459 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
460 NULL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100461 if (ret < 0)
462 return ret;
463
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100464 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700465 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100466 return -ENOBUFS;
467
WANG Congb9a24bb2016-08-19 12:36:54 -0700468 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
469 if (ret < 0)
470 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700471
472 if (oldprog) {
473 if (handle && oldprog->handle != handle) {
474 ret = -EINVAL;
475 goto errout;
476 }
477 }
478
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100479 if (handle == 0)
480 prog->handle = cls_bpf_grab_new_handle(tp, head);
481 else
482 prog->handle = handle;
483 if (prog->handle == 0) {
484 ret = -EINVAL;
485 goto errout;
486 }
487
Jiri Pirko6a725c42017-08-04 14:29:04 +0200488 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100489 if (ret < 0)
490 goto errout;
491
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100492 ret = cls_bpf_offload(tp, prog, oldprog);
493 if (ret) {
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100494 __cls_bpf_delete_prog(prog);
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100495 return ret;
496 }
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100497
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200498 if (!tc_in_hw(prog->gen_flags))
499 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
500
John Fastabend1f947bf2014-09-12 20:10:24 -0700501 if (oldprog) {
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200502 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700503 tcf_unbind_filter(tp, &oldprog->res);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100504 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
John Fastabend1f947bf2014-09-12 20:10:24 -0700505 } else {
506 list_add_rcu(&prog->link, &head->plist);
507 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100508
WANG Cong8113c092017-08-04 21:31:43 -0700509 *arg = prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100510 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100511
WANG Congb9a24bb2016-08-19 12:36:54 -0700512errout:
513 tcf_exts_destroy(&prog->exts);
514 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100515 return ret;
516}
517
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100518static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
519 struct sk_buff *skb)
520{
521 struct nlattr *nla;
522
523 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
524 return -EMSGSIZE;
525
526 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
527 sizeof(struct sock_filter));
528 if (nla == NULL)
529 return -EMSGSIZE;
530
531 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
532
533 return 0;
534}
535
536static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
537 struct sk_buff *skb)
538{
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100539 struct nlattr *nla;
540
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100541 if (prog->bpf_name &&
542 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
543 return -EMSGSIZE;
544
Daniel Borkmanne8628302017-06-21 20:16:11 +0200545 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
546 return -EMSGSIZE;
547
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100548 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100549 if (nla == NULL)
550 return -EMSGSIZE;
551
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100552 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100553
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100554 return 0;
555}
556
WANG Cong8113c092017-08-04 21:31:43 -0700557static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100558 struct sk_buff *skb, struct tcmsg *tm)
559{
WANG Cong8113c092017-08-04 21:31:43 -0700560 struct cls_bpf_prog *prog = fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100561 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200562 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100563 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100564
565 if (prog == NULL)
566 return skb->len;
567
568 tm->tcm_handle = prog->handle;
569
Jakub Kicinski68d64062016-09-21 11:44:02 +0100570 cls_bpf_offload_update_stats(tp, prog);
571
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100572 nest = nla_nest_start(skb, TCA_OPTIONS);
573 if (nest == NULL)
574 goto nla_put_failure;
575
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200576 if (prog->res.classid &&
577 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100578 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100579
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100580 if (cls_bpf_is_ebpf(prog))
581 ret = cls_bpf_dump_ebpf_info(prog, skb);
582 else
583 ret = cls_bpf_dump_bpf_info(prog, skb);
584 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100585 goto nla_put_failure;
586
WANG Cong5da57f42013-12-15 20:15:07 -0800587 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100588 goto nla_put_failure;
589
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200590 if (prog->exts_integrated)
591 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
592 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
593 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100594 if (prog->gen_flags &&
595 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
596 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200597
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100598 nla_nest_end(skb, nest);
599
WANG Cong5da57f42013-12-15 20:15:07 -0800600 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100601 goto nla_put_failure;
602
603 return skb->len;
604
605nla_put_failure:
606 nla_nest_cancel(skb, nest);
607 return -1;
608}
609
610static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
611{
John Fastabend1f947bf2014-09-12 20:10:24 -0700612 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100613 struct cls_bpf_prog *prog;
614
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100615 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100616 if (arg->count < arg->skip)
617 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -0700618 if (arg->fn(tp, prog, arg) < 0) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100619 arg->stop = 1;
620 break;
621 }
622skip:
623 arg->count++;
624 }
625}
626
627static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
628 .kind = "bpf",
629 .owner = THIS_MODULE,
630 .classify = cls_bpf_classify,
631 .init = cls_bpf_init,
632 .destroy = cls_bpf_destroy,
633 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100634 .change = cls_bpf_change,
635 .delete = cls_bpf_delete,
636 .walk = cls_bpf_walk,
637 .dump = cls_bpf_dump,
638};
639
640static int __init cls_bpf_init_mod(void)
641{
642 return register_tcf_proto_ops(&cls_bpf_ops);
643}
644
645static void __exit cls_bpf_exit_mod(void)
646{
647 unregister_tcf_proto_ops(&cls_bpf_ops);
648}
649
650module_init(cls_bpf_init_mod);
651module_exit(cls_bpf_exit_mod);