blob: 5d100126cbf3ea830b0fba200bcf1c86297018e8 [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
Cong Wang76cf5462017-09-25 10:13:49 -070020#include <linux/idr.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010021
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010022#include <net/rtnetlink.h>
23#include <net/pkt_cls.h>
24#include <net/sock.h>
25
26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
28MODULE_DESCRIPTION("TC BPF based classifier");
29
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010030#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010031#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010032 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010033
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010034struct cls_bpf_head {
35 struct list_head plist;
Cong Wang76cf5462017-09-25 10:13:49 -070036 struct idr handle_idr;
John Fastabend1f947bf2014-09-12 20:10:24 -070037 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010038};
39
40struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070041 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010042 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010043 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070044 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010045 u32 gen_flags;
John Hurley7e916b72018-06-25 14:30:09 -070046 unsigned int in_hw_count;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010047 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010048 u32 handle;
Daniel Borkmann55556dd2016-11-26 01:28:05 +010049 u16 bpf_num_ops;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010050 struct sock_filter *bpf_ops;
51 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070052 struct tcf_proto *tp;
Cong Wangaaa908f2018-05-23 15:26:53 -070053 struct rcu_work rwork;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010054};
55
56static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
57 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070058 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010059 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010060 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040061 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
62 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010063 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
64 [TCA_BPF_OPS] = { .type = NLA_BINARY,
65 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
66};
67
Daniel Borkmann045efa82015-09-15 23:05:42 -070068static int cls_bpf_exec_opcode(int code)
69{
70 switch (code) {
71 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070072 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070073 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +020074 case TC_ACT_TRAP:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070075 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070076 case TC_ACT_UNSPEC:
77 return code;
78 default:
79 return TC_ACT_UNSPEC;
80 }
81}
82
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010083static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 struct tcf_result *res)
85{
WANG Cong80dcbd12014-09-15 14:21:50 -070086 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010087 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010088 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010089 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010090
Daniel Borkmann54720df2015-03-12 20:03:12 +010091 /* Needed here for accessing maps. */
92 rcu_read_lock();
John Fastabend1f947bf2014-09-12 20:10:24 -070093 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070094 int filter_res;
95
Daniel Borkmann045efa82015-09-15 23:05:42 -070096 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
97
Jakub Kicinskieadb4142016-09-21 11:43:55 +010098 if (tc_skip_sw(prog->gen_flags)) {
99 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
100 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -0700101 /* It is safe to push/pull even if skb_shared() */
102 __skb_push(skb, skb->mac_len);
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200103 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700104 filter_res = BPF_PROG_RUN(prog->filter, skb);
105 __skb_pull(skb, skb->mac_len);
106 } else {
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200107 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700108 filter_res = BPF_PROG_RUN(prog->filter, skb);
109 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100110
Daniel Borkmann045efa82015-09-15 23:05:42 -0700111 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100112 res->class = 0;
113 res->classid = TC_H_MAJ(prog->res.classid) |
114 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700115
116 ret = cls_bpf_exec_opcode(filter_res);
117 if (ret == TC_ACT_UNSPEC)
118 continue;
119 break;
120 }
121
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100122 if (filter_res == 0)
123 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100124 if (filter_res != -1) {
125 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100126 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100127 } else {
128 *res = prog->res;
129 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100130
131 ret = tcf_exts_exec(skb, &prog->exts, res);
132 if (ret < 0)
133 continue;
134
Daniel Borkmann54720df2015-03-12 20:03:12 +0100135 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100136 }
Daniel Borkmann54720df2015-03-12 20:03:12 +0100137 rcu_read_unlock();
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100138
Daniel Borkmann54720df2015-03-12 20:03:12 +0100139 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100140}
141
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100142static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
143{
144 return !prog->bpf_ops;
145}
146
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100147static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800148 struct cls_bpf_prog *oldprog,
149 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100150{
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200151 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200152 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski102740b2017-12-19 13:32:13 -0800153 struct cls_bpf_prog *obj;
154 bool skip_sw;
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200155 int err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100156
Jakub Kicinski102740b2017-12-19 13:32:13 -0800157 skip_sw = prog && tc_skip_sw(prog->gen_flags);
158 obj = prog ?: oldprog;
159
Jakub Kicinskia6ffd6b2018-01-24 12:54:16 -0800160 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
161 extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800162 cls_bpf.command = TC_CLSBPF_OFFLOAD;
163 cls_bpf.exts = &obj->exts;
164 cls_bpf.prog = prog ? prog->filter : NULL;
165 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
166 cls_bpf.name = obj->bpf_name;
167 cls_bpf.exts_integrated = obj->exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100168
Jiri Pirkocaa72602018-01-17 11:46:50 +0100169 if (oldprog)
170 tcf_block_offload_dec(block, &oldprog->gen_flags);
171
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200172 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800173 if (prog) {
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200174 if (err < 0) {
Quentin Monnet631f65f2018-01-19 17:44:46 -0800175 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200176 return err;
177 } else if (err > 0) {
John Hurley7e916b72018-06-25 14:30:09 -0700178 prog->in_hw_count = err;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100179 tcf_block_offload_inc(block, &prog->gen_flags);
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200180 }
181 }
182
Jakub Kicinski102740b2017-12-19 13:32:13 -0800183 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200184 return -EINVAL;
185
186 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100187}
188
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100189static u32 cls_bpf_flags(u32 flags)
190{
191 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
192}
193
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100194static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800195 struct cls_bpf_prog *oldprog,
196 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100197{
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100198 if (prog && oldprog &&
199 cls_bpf_flags(prog->gen_flags) !=
200 cls_bpf_flags(oldprog->gen_flags))
Jakub Kicinski102740b2017-12-19 13:32:13 -0800201 return -EINVAL;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100202
Jakub Kicinski102740b2017-12-19 13:32:13 -0800203 if (prog && tc_skip_hw(prog->gen_flags))
204 prog = NULL;
205 if (oldprog && tc_skip_hw(oldprog->gen_flags))
206 oldprog = NULL;
207 if (!prog && !oldprog)
208 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100209
Quentin Monnet631f65f2018-01-19 17:44:46 -0800210 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100211}
212
213static void cls_bpf_stop_offload(struct tcf_proto *tp,
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800214 struct cls_bpf_prog *prog,
215 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100216{
217 int err;
218
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800219 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800220 if (err)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100221 pr_err("Stopping hardware offload failed: %d\n", err);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100222}
223
Jakub Kicinski68d64062016-09-21 11:44:02 +0100224static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
225 struct cls_bpf_prog *prog)
226{
Jakub Kicinski102740b2017-12-19 13:32:13 -0800227 struct tcf_block *block = tp->chain->block;
228 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski68d64062016-09-21 11:44:02 +0100229
Jakub Kicinskia6ffd6b2018-01-24 12:54:16 -0800230 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800231 cls_bpf.command = TC_CLSBPF_STATS;
232 cls_bpf.exts = &prog->exts;
233 cls_bpf.prog = prog->filter;
234 cls_bpf.name = prog->bpf_name;
235 cls_bpf.exts_integrated = prog->exts_integrated;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800236
237 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
Jakub Kicinski68d64062016-09-21 11:44:02 +0100238}
239
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100240static int cls_bpf_init(struct tcf_proto *tp)
241{
242 struct cls_bpf_head *head;
243
244 head = kzalloc(sizeof(*head), GFP_KERNEL);
245 if (head == NULL)
246 return -ENOBUFS;
247
John Fastabend1f947bf2014-09-12 20:10:24 -0700248 INIT_LIST_HEAD_RCU(&head->plist);
Cong Wang76cf5462017-09-25 10:13:49 -0700249 idr_init(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700250 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100251
252 return 0;
253}
254
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800255static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100256{
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100257 if (cls_bpf_is_ebpf(prog))
258 bpf_prog_put(prog->filter);
259 else
260 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100261
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100262 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100263 kfree(prog->bpf_ops);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800264}
265
266static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
267{
268 tcf_exts_destroy(&prog->exts);
269 tcf_exts_put_net(&prog->exts);
270
271 cls_bpf_free_parms(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100272 kfree(prog);
273}
274
Cong Wange910af62017-10-26 18:24:30 -0700275static void cls_bpf_delete_prog_work(struct work_struct *work)
276{
Cong Wangaaa908f2018-05-23 15:26:53 -0700277 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
278 struct cls_bpf_prog,
279 rwork);
Cong Wange910af62017-10-26 18:24:30 -0700280 rtnl_lock();
281 __cls_bpf_delete_prog(prog);
282 rtnl_unlock();
283}
284
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800285static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
286 struct netlink_ext_ack *extack)
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100287{
Cong Wang76cf5462017-09-25 10:13:49 -0700288 struct cls_bpf_head *head = rtnl_dereference(tp->root);
289
Matthew Wilcox9c160942017-11-28 09:48:43 -0500290 idr_remove(&head->handle_idr, prog->handle);
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800291 cls_bpf_stop_offload(tp, prog, extack);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100292 list_del_rcu(&prog->link);
293 tcf_unbind_filter(tp, &prog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800294 if (tcf_exts_get_net(&prog->exts))
Cong Wangaaa908f2018-05-23 15:26:53 -0700295 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
Cong Wangaae2c352017-11-06 13:47:21 -0800296 else
297 __cls_bpf_delete_prog(prog);
John Fastabend1f947bf2014-09-12 20:10:24 -0700298}
299
Alexander Aring571acf22018-01-18 11:20:53 -0500300static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
301 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100302{
WANG Cong763dbf62017-04-19 14:21:21 -0700303 struct cls_bpf_head *head = rtnl_dereference(tp->root);
304
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800305 __cls_bpf_delete(tp, arg, extack);
WANG Cong763dbf62017-04-19 14:21:21 -0700306 *last = list_empty(&head->plist);
Jiri Pirko472f5832014-12-02 18:00:32 +0100307 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100308}
309
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800310static void cls_bpf_destroy(struct tcf_proto *tp,
311 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100312{
John Fastabend1f947bf2014-09-12 20:10:24 -0700313 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100314 struct cls_bpf_prog *prog, *tmp;
315
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100316 list_for_each_entry_safe(prog, tmp, &head->plist, link)
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800317 __cls_bpf_delete(tp, prog, extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100318
Cong Wang76cf5462017-09-25 10:13:49 -0700319 idr_destroy(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700320 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100321}
322
WANG Cong8113c092017-08-04 21:31:43 -0700323static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100324{
John Fastabend1f947bf2014-09-12 20:10:24 -0700325 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100326 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100327
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100328 list_for_each_entry(prog, &head->plist, link) {
WANG Cong8113c092017-08-04 21:31:43 -0700329 if (prog->handle == handle)
330 return prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100331 }
332
WANG Cong8113c092017-08-04 21:31:43 -0700333 return NULL;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100334}
335
Daniel Borkmann045efa82015-09-15 23:05:42 -0700336static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100337{
338 struct sock_filter *bpf_ops;
339 struct sock_fprog_kern fprog_tmp;
340 struct bpf_prog *fp;
341 u16 bpf_size, bpf_num_ops;
342 int ret;
343
344 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
345 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
346 return -EINVAL;
347
348 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
349 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
350 return -EINVAL;
351
YueHaibingf9562fa2018-07-28 18:35:15 +0800352 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100353 if (bpf_ops == NULL)
354 return -ENOMEM;
355
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100356 fprog_tmp.len = bpf_num_ops;
357 fprog_tmp.filter = bpf_ops;
358
359 ret = bpf_prog_create(&fp, &fprog_tmp);
360 if (ret < 0) {
361 kfree(bpf_ops);
362 return ret;
363 }
364
365 prog->bpf_ops = bpf_ops;
366 prog->bpf_num_ops = bpf_num_ops;
367 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100368 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100369
370 return 0;
371}
372
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200373static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700374 u32 gen_flags, const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100375{
376 struct bpf_prog *fp;
377 char *name = NULL;
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800378 bool skip_sw;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100379 u32 bpf_fd;
380
381 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800382 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100383
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800384 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100385 if (IS_ERR(fp))
386 return PTR_ERR(fp);
387
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100388 if (tb[TCA_BPF_NAME]) {
Thomas Grafb15ca182016-10-26 10:53:16 +0200389 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100390 if (!name) {
391 bpf_prog_put(fp);
392 return -ENOMEM;
393 }
394 }
395
396 prog->bpf_ops = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100397 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100398 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100399
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100400 if (fp->dst_needed)
401 tcf_block_netif_keep_dst(tp->chain->block);
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200402
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100403 return 0;
404}
405
Jiri Pirko6a725c42017-08-04 14:29:04 +0200406static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
407 struct cls_bpf_prog *prog, unsigned long base,
Alexander Aring50a56192018-01-18 11:20:52 -0500408 struct nlattr **tb, struct nlattr *est, bool ovr,
409 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100410{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700411 bool is_bpf, is_ebpf, have_exts = false;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100412 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100413 int ret;
414
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100415 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
416 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200417 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100418 return -EINVAL;
419
Alexander Aring50a56192018-01-18 11:20:52 -0500420 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100421 if (ret < 0)
422 return ret;
423
Daniel Borkmann045efa82015-09-15 23:05:42 -0700424 if (tb[TCA_BPF_FLAGS]) {
425 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100426
Jiri Pirko6839da32017-08-04 14:29:10 +0200427 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
428 return -EINVAL;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700429
430 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
431 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100432 if (tb[TCA_BPF_FLAGS_GEN]) {
433 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
434 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
Jiri Pirko6839da32017-08-04 14:29:10 +0200435 !tc_flags_valid(gen_flags))
436 return -EINVAL;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100437 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700438
Daniel Borkmann045efa82015-09-15 23:05:42 -0700439 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100440 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700441
442 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700443 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700444 if (ret < 0)
Jiri Pirko6839da32017-08-04 14:29:10 +0200445 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100446
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200447 if (tb[TCA_BPF_CLASSID]) {
448 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
449 tcf_bind_filter(tp, &prog->res, base);
450 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100451
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100452 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100453}
454
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100455static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
456 struct tcf_proto *tp, unsigned long base,
457 u32 handle, struct nlattr **tca,
Alexander Aring7306db32018-01-18 11:20:51 -0500458 void **arg, bool ovr, struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100459{
John Fastabend1f947bf2014-09-12 20:10:24 -0700460 struct cls_bpf_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700461 struct cls_bpf_prog *oldprog = *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100462 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700463 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100464 int ret;
465
466 if (tca[TCA_OPTIONS] == NULL)
467 return -EINVAL;
468
Johannes Bergfceb6432017-04-12 14:34:07 +0200469 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
470 NULL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100471 if (ret < 0)
472 return ret;
473
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100474 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700475 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100476 return -ENOBUFS;
477
WANG Congb9a24bb2016-08-19 12:36:54 -0700478 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
479 if (ret < 0)
480 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700481
482 if (oldprog) {
483 if (handle && oldprog->handle != handle) {
484 ret = -EINVAL;
485 goto errout;
486 }
487 }
488
Cong Wang76cf5462017-09-25 10:13:49 -0700489 if (handle == 0) {
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500490 handle = 1;
491 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
492 INT_MAX, GFP_KERNEL);
493 } else if (!oldprog) {
494 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
495 handle, GFP_KERNEL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100496 }
497
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500498 if (ret)
499 goto errout;
500 prog->handle = handle;
501
Alexander Aring50a56192018-01-18 11:20:52 -0500502 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
503 extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100504 if (ret < 0)
Cong Wang76cf5462017-09-25 10:13:49 -0700505 goto errout_idr;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100506
Quentin Monnet631f65f2018-01-19 17:44:46 -0800507 ret = cls_bpf_offload(tp, prog, oldprog, extack);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800508 if (ret)
509 goto errout_parms;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100510
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200511 if (!tc_in_hw(prog->gen_flags))
512 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
513
John Fastabend1f947bf2014-09-12 20:10:24 -0700514 if (oldprog) {
Matthew Wilcox234a4622017-11-28 09:56:36 -0500515 idr_replace(&head->handle_idr, prog, handle);
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200516 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700517 tcf_unbind_filter(tp, &oldprog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800518 tcf_exts_get_net(&oldprog->exts);
Cong Wangaaa908f2018-05-23 15:26:53 -0700519 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
John Fastabend1f947bf2014-09-12 20:10:24 -0700520 } else {
521 list_add_rcu(&prog->link, &head->plist);
522 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100523
WANG Cong8113c092017-08-04 21:31:43 -0700524 *arg = prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100525 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100526
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800527errout_parms:
528 cls_bpf_free_parms(prog);
Cong Wang76cf5462017-09-25 10:13:49 -0700529errout_idr:
530 if (!oldprog)
Matthew Wilcox9c160942017-11-28 09:48:43 -0500531 idr_remove(&head->handle_idr, prog->handle);
WANG Congb9a24bb2016-08-19 12:36:54 -0700532errout:
533 tcf_exts_destroy(&prog->exts);
534 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100535 return ret;
536}
537
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100538static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
539 struct sk_buff *skb)
540{
541 struct nlattr *nla;
542
543 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
544 return -EMSGSIZE;
545
546 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
547 sizeof(struct sock_filter));
548 if (nla == NULL)
549 return -EMSGSIZE;
550
551 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
552
553 return 0;
554}
555
556static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
557 struct sk_buff *skb)
558{
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100559 struct nlattr *nla;
560
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100561 if (prog->bpf_name &&
562 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
563 return -EMSGSIZE;
564
Daniel Borkmanne8628302017-06-21 20:16:11 +0200565 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
566 return -EMSGSIZE;
567
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100568 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100569 if (nla == NULL)
570 return -EMSGSIZE;
571
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100572 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100573
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100574 return 0;
575}
576
WANG Cong8113c092017-08-04 21:31:43 -0700577static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100578 struct sk_buff *skb, struct tcmsg *tm)
579{
WANG Cong8113c092017-08-04 21:31:43 -0700580 struct cls_bpf_prog *prog = fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100581 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200582 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100583 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100584
585 if (prog == NULL)
586 return skb->len;
587
588 tm->tcm_handle = prog->handle;
589
Jakub Kicinski68d64062016-09-21 11:44:02 +0100590 cls_bpf_offload_update_stats(tp, prog);
591
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100592 nest = nla_nest_start(skb, TCA_OPTIONS);
593 if (nest == NULL)
594 goto nla_put_failure;
595
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200596 if (prog->res.classid &&
597 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100598 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100599
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100600 if (cls_bpf_is_ebpf(prog))
601 ret = cls_bpf_dump_ebpf_info(prog, skb);
602 else
603 ret = cls_bpf_dump_bpf_info(prog, skb);
604 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100605 goto nla_put_failure;
606
WANG Cong5da57f42013-12-15 20:15:07 -0800607 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100608 goto nla_put_failure;
609
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200610 if (prog->exts_integrated)
611 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
612 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
613 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100614 if (prog->gen_flags &&
615 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
616 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200617
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100618 nla_nest_end(skb, nest);
619
WANG Cong5da57f42013-12-15 20:15:07 -0800620 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100621 goto nla_put_failure;
622
623 return skb->len;
624
625nla_put_failure:
626 nla_nest_cancel(skb, nest);
627 return -1;
628}
629
Cong Wang9f7a3282020-01-23 16:26:18 -0800630static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
631 void *q, unsigned long base)
Cong Wang07d79fc2017-08-30 14:30:36 -0700632{
633 struct cls_bpf_prog *prog = fh;
634
Cong Wang9f7a3282020-01-23 16:26:18 -0800635 if (prog && prog->res.classid == classid) {
636 if (cl)
637 __tcf_bind_filter(q, &prog->res, base);
638 else
639 __tcf_unbind_filter(q, &prog->res);
640 }
Cong Wang07d79fc2017-08-30 14:30:36 -0700641}
642
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100643static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
644{
John Fastabend1f947bf2014-09-12 20:10:24 -0700645 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100646 struct cls_bpf_prog *prog;
647
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100648 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100649 if (arg->count < arg->skip)
650 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -0700651 if (arg->fn(tp, prog, arg) < 0) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100652 arg->stop = 1;
653 break;
654 }
655skip:
656 arg->count++;
657 }
658}
659
John Hurley7e916b72018-06-25 14:30:09 -0700660static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
661 void *cb_priv, struct netlink_ext_ack *extack)
662{
663 struct cls_bpf_head *head = rtnl_dereference(tp->root);
664 struct tcf_block *block = tp->chain->block;
665 struct tc_cls_bpf_offload cls_bpf = {};
666 struct cls_bpf_prog *prog;
667 int err;
668
669 list_for_each_entry(prog, &head->plist, link) {
670 if (tc_skip_hw(prog->gen_flags))
671 continue;
672
673 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
674 extack);
675 cls_bpf.command = TC_CLSBPF_OFFLOAD;
676 cls_bpf.exts = &prog->exts;
677 cls_bpf.prog = add ? prog->filter : NULL;
678 cls_bpf.oldprog = add ? NULL : prog->filter;
679 cls_bpf.name = prog->bpf_name;
680 cls_bpf.exts_integrated = prog->exts_integrated;
681
682 err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
683 if (err) {
684 if (add && tc_skip_sw(prog->gen_flags))
685 return err;
686 continue;
687 }
688
689 tc_cls_offload_cnt_update(block, &prog->in_hw_count,
690 &prog->gen_flags, add);
691 }
692
693 return 0;
694}
695
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100696static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
697 .kind = "bpf",
698 .owner = THIS_MODULE,
699 .classify = cls_bpf_classify,
700 .init = cls_bpf_init,
701 .destroy = cls_bpf_destroy,
702 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100703 .change = cls_bpf_change,
704 .delete = cls_bpf_delete,
705 .walk = cls_bpf_walk,
John Hurley7e916b72018-06-25 14:30:09 -0700706 .reoffload = cls_bpf_reoffload,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100707 .dump = cls_bpf_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -0700708 .bind_class = cls_bpf_bind_class,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100709};
710
711static int __init cls_bpf_init_mod(void)
712{
713 return register_tcf_proto_ops(&cls_bpf_ops);
714}
715
716static void __exit cls_bpf_exit_mod(void)
717{
718 unregister_tcf_proto_ops(&cls_bpf_ops);
719}
720
721module_init(cls_bpf_init_mod);
722module_exit(cls_bpf_exit_mod);