blob: bb1d5a487081f21f80a3042cd424cf7caedf6b37 [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
20
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010021#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
23#include <net/sock.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27MODULE_DESCRIPTION("TC BPF based classifier");
28
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010029#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010030#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010031 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010032
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010033struct cls_bpf_head {
34 struct list_head plist;
35 u32 hgen;
John Fastabend1f947bf2014-09-12 20:10:24 -070036 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010037};
38
39struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070040 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010041 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010042 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070043 bool exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +010044 bool offloaded;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010045 u32 gen_flags;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010046 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010047 u32 handle;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010048 union {
49 u32 bpf_fd;
50 u16 bpf_num_ops;
51 };
52 struct sock_filter *bpf_ops;
53 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070054 struct tcf_proto *tp;
55 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010056};
57
58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070060 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010061 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010062 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040063 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
64 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010065 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
66 [TCA_BPF_OPS] = { .type = NLA_BINARY,
67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68};
69
Daniel Borkmann045efa82015-09-15 23:05:42 -070070static int cls_bpf_exec_opcode(int code)
71{
72 switch (code) {
73 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070074 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070075 case TC_ACT_STOLEN:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070076 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070077 case TC_ACT_UNSPEC:
78 return code;
79 default:
80 return TC_ACT_UNSPEC;
81 }
82}
83
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010084static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
85 struct tcf_result *res)
86{
WANG Cong80dcbd12014-09-15 14:21:50 -070087 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010088 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010089 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010090 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010091
Daniel Borkmann54720df2015-03-12 20:03:12 +010092 /* Needed here for accessing maps. */
93 rcu_read_lock();
John Fastabend1f947bf2014-09-12 20:10:24 -070094 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070095 int filter_res;
96
Daniel Borkmann045efa82015-09-15 23:05:42 -070097 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
98
Jakub Kicinskieadb4142016-09-21 11:43:55 +010099 if (tc_skip_sw(prog->gen_flags)) {
100 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
101 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -0700102 /* It is safe to push/pull even if skb_shared() */
103 __skb_push(skb, skb->mac_len);
Alexei Starovoitovdb58ba42016-05-05 19:49:12 -0700104 bpf_compute_data_end(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700105 filter_res = BPF_PROG_RUN(prog->filter, skb);
106 __skb_pull(skb, skb->mac_len);
107 } else {
Alexei Starovoitovdb58ba42016-05-05 19:49:12 -0700108 bpf_compute_data_end(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700109 filter_res = BPF_PROG_RUN(prog->filter, skb);
110 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100111
Daniel Borkmann045efa82015-09-15 23:05:42 -0700112 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100113 res->class = 0;
114 res->classid = TC_H_MAJ(prog->res.classid) |
115 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700116
117 ret = cls_bpf_exec_opcode(filter_res);
118 if (ret == TC_ACT_UNSPEC)
119 continue;
120 break;
121 }
122
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100123 if (filter_res == 0)
124 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100125 if (filter_res != -1) {
126 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100127 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100128 } else {
129 *res = prog->res;
130 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100131
132 ret = tcf_exts_exec(skb, &prog->exts, res);
133 if (ret < 0)
134 continue;
135
Daniel Borkmann54720df2015-03-12 20:03:12 +0100136 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100137 }
Daniel Borkmann54720df2015-03-12 20:03:12 +0100138 rcu_read_unlock();
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100139
Daniel Borkmann54720df2015-03-12 20:03:12 +0100140 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100141}
142
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100143static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
144{
145 return !prog->bpf_ops;
146}
147
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100148static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
149 enum tc_clsbpf_command cmd)
150{
151 struct net_device *dev = tp->q->dev_queue->dev;
152 struct tc_cls_bpf_offload bpf_offload = {};
153 struct tc_to_netdev offload;
154
155 offload.type = TC_SETUP_CLSBPF;
156 offload.cls_bpf = &bpf_offload;
157
158 bpf_offload.command = cmd;
159 bpf_offload.exts = &prog->exts;
160 bpf_offload.prog = prog->filter;
161 bpf_offload.name = prog->bpf_name;
162 bpf_offload.exts_integrated = prog->exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100163 bpf_offload.gen_flags = prog->gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100164
165 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
166 tp->protocol, &offload);
167}
168
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100169static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
170 struct cls_bpf_prog *oldprog)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100171{
172 struct net_device *dev = tp->q->dev_queue->dev;
173 struct cls_bpf_prog *obj = prog;
174 enum tc_clsbpf_command cmd;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100175 bool skip_sw;
176 int ret;
177
178 skip_sw = tc_skip_sw(prog->gen_flags) ||
179 (oldprog && tc_skip_sw(oldprog->gen_flags));
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100180
181 if (oldprog && oldprog->offloaded) {
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100182 if (tc_should_offload(dev, tp, prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100183 cmd = TC_CLSBPF_REPLACE;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100184 } else if (!tc_skip_sw(prog->gen_flags)) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100185 obj = oldprog;
186 cmd = TC_CLSBPF_DESTROY;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100187 } else {
188 return -EINVAL;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100189 }
190 } else {
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100191 if (!tc_should_offload(dev, tp, prog->gen_flags))
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100192 return skip_sw ? -EINVAL : 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100193 cmd = TC_CLSBPF_ADD;
194 }
195
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100196 ret = cls_bpf_offload_cmd(tp, obj, cmd);
197 if (ret)
198 return skip_sw ? ret : 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100199
200 obj->offloaded = true;
201 if (oldprog)
202 oldprog->offloaded = false;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100203
204 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100205}
206
207static void cls_bpf_stop_offload(struct tcf_proto *tp,
208 struct cls_bpf_prog *prog)
209{
210 int err;
211
212 if (!prog->offloaded)
213 return;
214
215 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
216 if (err) {
217 pr_err("Stopping hardware offload failed: %d\n", err);
218 return;
219 }
220
221 prog->offloaded = false;
222}
223
Jakub Kicinski68d64062016-09-21 11:44:02 +0100224static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
225 struct cls_bpf_prog *prog)
226{
227 if (!prog->offloaded)
228 return;
229
230 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
231}
232
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100233static int cls_bpf_init(struct tcf_proto *tp)
234{
235 struct cls_bpf_head *head;
236
237 head = kzalloc(sizeof(*head), GFP_KERNEL);
238 if (head == NULL)
239 return -ENOBUFS;
240
John Fastabend1f947bf2014-09-12 20:10:24 -0700241 INIT_LIST_HEAD_RCU(&head->plist);
242 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100243
244 return 0;
245}
246
247static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
248{
WANG Cong18d02642014-09-25 10:26:37 -0700249 tcf_exts_destroy(&prog->exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100250
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100251 if (cls_bpf_is_ebpf(prog))
252 bpf_prog_put(prog->filter);
253 else
254 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100255
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100256 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100257 kfree(prog->bpf_ops);
258 kfree(prog);
259}
260
John Fastabend1f947bf2014-09-12 20:10:24 -0700261static void __cls_bpf_delete_prog(struct rcu_head *rcu)
262{
263 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
264
265 cls_bpf_delete_prog(prog->tp, prog);
266}
267
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100268static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
269{
Jiri Pirko472f5832014-12-02 18:00:32 +0100270 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100271
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100272 cls_bpf_stop_offload(tp, prog);
Jiri Pirko472f5832014-12-02 18:00:32 +0100273 list_del_rcu(&prog->link);
274 tcf_unbind_filter(tp, &prog->res);
275 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100276
Jiri Pirko472f5832014-12-02 18:00:32 +0100277 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100278}
279
Cong Wang1e052be2015-03-06 11:47:59 -0800280static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100281{
John Fastabend1f947bf2014-09-12 20:10:24 -0700282 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100283 struct cls_bpf_prog *prog, *tmp;
284
Cong Wang1e052be2015-03-06 11:47:59 -0800285 if (!force && !list_empty(&head->plist))
286 return false;
287
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100288 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100289 cls_bpf_stop_offload(tp, prog);
John Fastabend1f947bf2014-09-12 20:10:24 -0700290 list_del_rcu(&prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700291 tcf_unbind_filter(tp, &prog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700292 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100293 }
294
John Fastabend1f947bf2014-09-12 20:10:24 -0700295 RCU_INIT_POINTER(tp->root, NULL);
296 kfree_rcu(head, rcu);
Cong Wang1e052be2015-03-06 11:47:59 -0800297 return true;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100298}
299
300static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
301{
John Fastabend1f947bf2014-09-12 20:10:24 -0700302 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100303 struct cls_bpf_prog *prog;
304 unsigned long ret = 0UL;
305
306 if (head == NULL)
307 return 0UL;
308
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100309 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100310 if (prog->handle == handle) {
311 ret = (unsigned long) prog;
312 break;
313 }
314 }
315
316 return ret;
317}
318
Daniel Borkmann045efa82015-09-15 23:05:42 -0700319static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100320{
321 struct sock_filter *bpf_ops;
322 struct sock_fprog_kern fprog_tmp;
323 struct bpf_prog *fp;
324 u16 bpf_size, bpf_num_ops;
325 int ret;
326
327 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
328 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
329 return -EINVAL;
330
331 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
332 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
333 return -EINVAL;
334
335 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
336 if (bpf_ops == NULL)
337 return -ENOMEM;
338
339 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
340
341 fprog_tmp.len = bpf_num_ops;
342 fprog_tmp.filter = bpf_ops;
343
344 ret = bpf_prog_create(&fp, &fprog_tmp);
345 if (ret < 0) {
346 kfree(bpf_ops);
347 return ret;
348 }
349
350 prog->bpf_ops = bpf_ops;
351 prog->bpf_num_ops = bpf_num_ops;
352 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100353 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100354
355 return 0;
356}
357
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200358static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
359 const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100360{
361 struct bpf_prog *fp;
362 char *name = NULL;
363 u32 bpf_fd;
364
365 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
366
Daniel Borkmann113214b2016-06-30 17:24:44 +0200367 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100368 if (IS_ERR(fp))
369 return PTR_ERR(fp);
370
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100371 if (tb[TCA_BPF_NAME]) {
372 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
373 nla_len(tb[TCA_BPF_NAME]),
374 GFP_KERNEL);
375 if (!name) {
376 bpf_prog_put(fp);
377 return -ENOMEM;
378 }
379 }
380
381 prog->bpf_ops = NULL;
382 prog->bpf_fd = bpf_fd;
383 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100384 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100385
Daniel Borkmann1f211a12016-01-07 22:29:47 +0100386 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200387 netif_keep_dst(qdisc_dev(tp->q));
388
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100389 return 0;
390}
391
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100392static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
393 struct cls_bpf_prog *prog,
394 unsigned long base, struct nlattr **tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700395 struct nlattr *est, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100396{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700397 bool is_bpf, is_ebpf, have_exts = false;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100398 struct tcf_exts exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100399 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100400 int ret;
401
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100402 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
403 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200404 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100405 return -EINVAL;
406
WANG Congb9a24bb2016-08-19 12:36:54 -0700407 ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100408 if (ret < 0)
409 return ret;
WANG Congb9a24bb2016-08-19 12:36:54 -0700410 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
411 if (ret < 0)
412 goto errout;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100413
Daniel Borkmann045efa82015-09-15 23:05:42 -0700414 if (tb[TCA_BPF_FLAGS]) {
415 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100416
Daniel Borkmann045efa82015-09-15 23:05:42 -0700417 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
WANG Congb9a24bb2016-08-19 12:36:54 -0700418 ret = -EINVAL;
419 goto errout;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700420 }
421
422 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
423 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100424 if (tb[TCA_BPF_FLAGS_GEN]) {
425 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
426 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
427 !tc_flags_valid(gen_flags)) {
428 ret = -EINVAL;
429 goto errout;
430 }
431 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700432
Daniel Borkmann045efa82015-09-15 23:05:42 -0700433 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100434 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700435
436 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200437 cls_bpf_prog_from_efd(tb, prog, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700438 if (ret < 0)
439 goto errout;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100440
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200441 if (tb[TCA_BPF_CLASSID]) {
442 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
443 tcf_bind_filter(tp, &prog->res, base);
444 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100445
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200446 tcf_exts_change(tp, &prog->exts, &exts);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100447 return 0;
WANG Congb9a24bb2016-08-19 12:36:54 -0700448
449errout:
450 tcf_exts_destroy(&exts);
451 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100452}
453
454static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
455 struct cls_bpf_head *head)
456{
457 unsigned int i = 0x80000000;
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100458 u32 handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100459
460 do {
461 if (++head->hgen == 0x7FFFFFFF)
462 head->hgen = 1;
463 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100464
Daniel Borkmann3f2ab132015-01-22 10:41:02 +0100465 if (unlikely(i == 0)) {
466 pr_err("Insufficient number of handles\n");
467 handle = 0;
468 } else {
469 handle = head->hgen;
470 }
471
472 return handle;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100473}
474
475static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
476 struct tcf_proto *tp, unsigned long base,
477 u32 handle, struct nlattr **tca,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700478 unsigned long *arg, bool ovr)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100479{
John Fastabend1f947bf2014-09-12 20:10:24 -0700480 struct cls_bpf_head *head = rtnl_dereference(tp->root);
481 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100482 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700483 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100484 int ret;
485
486 if (tca[TCA_OPTIONS] == NULL)
487 return -EINVAL;
488
489 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
490 if (ret < 0)
491 return ret;
492
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100493 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700494 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100495 return -ENOBUFS;
496
WANG Congb9a24bb2016-08-19 12:36:54 -0700497 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
498 if (ret < 0)
499 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700500
501 if (oldprog) {
502 if (handle && oldprog->handle != handle) {
503 ret = -EINVAL;
504 goto errout;
505 }
506 }
507
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100508 if (handle == 0)
509 prog->handle = cls_bpf_grab_new_handle(tp, head);
510 else
511 prog->handle = handle;
512 if (prog->handle == 0) {
513 ret = -EINVAL;
514 goto errout;
515 }
516
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400517 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
518 ovr);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100519 if (ret < 0)
520 goto errout;
521
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100522 ret = cls_bpf_offload(tp, prog, oldprog);
523 if (ret) {
524 cls_bpf_delete_prog(tp, prog);
525 return ret;
526 }
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100527
John Fastabend1f947bf2014-09-12 20:10:24 -0700528 if (oldprog) {
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200529 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700530 tcf_unbind_filter(tp, &oldprog->res);
John Fastabend1f947bf2014-09-12 20:10:24 -0700531 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
532 } else {
533 list_add_rcu(&prog->link, &head->plist);
534 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100535
536 *arg = (unsigned long) prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100537 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100538
WANG Congb9a24bb2016-08-19 12:36:54 -0700539errout:
540 tcf_exts_destroy(&prog->exts);
541 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100542 return ret;
543}
544
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100545static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
546 struct sk_buff *skb)
547{
548 struct nlattr *nla;
549
550 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
551 return -EMSGSIZE;
552
553 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
554 sizeof(struct sock_filter));
555 if (nla == NULL)
556 return -EMSGSIZE;
557
558 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
559
560 return 0;
561}
562
563static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
564 struct sk_buff *skb)
565{
566 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
567 return -EMSGSIZE;
568
569 if (prog->bpf_name &&
570 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
571 return -EMSGSIZE;
572
573 return 0;
574}
575
WANG Cong832d1d52014-01-09 16:14:01 -0800576static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100577 struct sk_buff *skb, struct tcmsg *tm)
578{
579 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100580 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200581 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100582 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100583
584 if (prog == NULL)
585 return skb->len;
586
587 tm->tcm_handle = prog->handle;
588
Jakub Kicinski68d64062016-09-21 11:44:02 +0100589 cls_bpf_offload_update_stats(tp, prog);
590
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100591 nest = nla_nest_start(skb, TCA_OPTIONS);
592 if (nest == NULL)
593 goto nla_put_failure;
594
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200595 if (prog->res.classid &&
596 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100597 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100598
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100599 if (cls_bpf_is_ebpf(prog))
600 ret = cls_bpf_dump_ebpf_info(prog, skb);
601 else
602 ret = cls_bpf_dump_bpf_info(prog, skb);
603 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100604 goto nla_put_failure;
605
WANG Cong5da57f42013-12-15 20:15:07 -0800606 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100607 goto nla_put_failure;
608
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200609 if (prog->exts_integrated)
610 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
611 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
612 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100613 if (prog->gen_flags &&
614 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
615 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200616
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100617 nla_nest_end(skb, nest);
618
WANG Cong5da57f42013-12-15 20:15:07 -0800619 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100620 goto nla_put_failure;
621
622 return skb->len;
623
624nla_put_failure:
625 nla_nest_cancel(skb, nest);
626 return -1;
627}
628
629static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
630{
John Fastabend1f947bf2014-09-12 20:10:24 -0700631 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100632 struct cls_bpf_prog *prog;
633
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100634 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100635 if (arg->count < arg->skip)
636 goto skip;
637 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
638 arg->stop = 1;
639 break;
640 }
641skip:
642 arg->count++;
643 }
644}
645
646static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
647 .kind = "bpf",
648 .owner = THIS_MODULE,
649 .classify = cls_bpf_classify,
650 .init = cls_bpf_init,
651 .destroy = cls_bpf_destroy,
652 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100653 .change = cls_bpf_change,
654 .delete = cls_bpf_delete,
655 .walk = cls_bpf_walk,
656 .dump = cls_bpf_dump,
657};
658
659static int __init cls_bpf_init_mod(void)
660{
661 return register_tcf_proto_ops(&cls_bpf_ops);
662}
663
664static void __exit cls_bpf_exit_mod(void)
665{
666 unregister_tcf_proto_ops(&cls_bpf_ops);
667}
668
669module_init(cls_bpf_init_mod);
670module_exit(cls_bpf_exit_mod);