blob: 867b4a3e39800fb44819e3cfd68ad0fb30d6ea05 [file] [log] [blame]
Thomas Graff4009232008-11-07 22:56:00 -08001/*
2 * net/sched/cls_cgroup.c Control Group Classifier
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Thomas Graff4009232008-11-07 22:56:00 -080014#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <linux/cgroup.h>
Herbert Xuf8451722010-05-24 00:12:34 -070019#include <linux/rcupdate.h>
Daniel Wagner6a328d82012-10-25 04:16:59 +000020#include <linux/fdtable.h>
Thomas Graff4009232008-11-07 22:56:00 -080021#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
Herbert Xuf8451722010-05-24 00:12:34 -070023#include <net/sock.h>
24#include <net/cls_cgroup.h>
Thomas Graff4009232008-11-07 22:56:00 -080025
Tejun Heoa7c6d552013-08-08 20:11:23 -040026static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
27{
28 return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
29}
30
Li Zefan8e8ba852008-12-29 19:39:03 -080031static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
32{
Tejun Heoa7c6d552013-08-08 20:11:23 -040033 return css_cls_state(task_css(p, net_cls_subsys_id));
Thomas Graff4009232008-11-07 22:56:00 -080034}
35
Tejun Heoeb954192013-08-08 20:11:23 -040036static struct cgroup_subsys_state *
37cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
Thomas Graff4009232008-11-07 22:56:00 -080038{
39 struct cgroup_cls_state *cs;
40
Eric Dumazetcc7ec452011-01-19 19:26:56 +000041 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
42 if (!cs)
Thomas Graff4009232008-11-07 22:56:00 -080043 return ERR_PTR(-ENOMEM);
Thomas Graff4009232008-11-07 22:56:00 -080044 return &cs->css;
45}
46
Tejun Heoeb954192013-08-08 20:11:23 -040047static int cgrp_css_online(struct cgroup_subsys_state *css)
Tejun Heo0ba18f72012-11-22 07:32:46 -080048{
Tejun Heoeb954192013-08-08 20:11:23 -040049 struct cgroup_cls_state *cs = css_cls_state(css);
50 struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
Tejun Heo63876982013-08-08 20:11:23 -040051
52 if (parent)
53 cs->classid = parent->classid;
Tejun Heo0ba18f72012-11-22 07:32:46 -080054 return 0;
55}
56
Tejun Heoeb954192013-08-08 20:11:23 -040057static void cgrp_css_free(struct cgroup_subsys_state *css)
Thomas Graff4009232008-11-07 22:56:00 -080058{
Tejun Heoeb954192013-08-08 20:11:23 -040059 kfree(css_cls_state(css));
Thomas Graff4009232008-11-07 22:56:00 -080060}
61
Daniel Wagner6a328d82012-10-25 04:16:59 +000062static int update_classid(const void *v, struct file *file, unsigned n)
63{
64 int err;
65 struct socket *sock = sock_from_file(file, &err);
66 if (sock)
67 sock->sk->sk_classid = (u32)(unsigned long)v;
68 return 0;
69}
70
Tejun Heoeb954192013-08-08 20:11:23 -040071static void cgrp_attach(struct cgroup_subsys_state *css,
72 struct cgroup_taskset *tset)
Daniel Wagner6a328d82012-10-25 04:16:59 +000073{
74 struct task_struct *p;
75 void *v;
76
Tejun Heod99c8722013-08-08 20:11:27 -040077 cgroup_taskset_for_each(p, css, tset) {
Daniel Wagner6a328d82012-10-25 04:16:59 +000078 task_lock(p);
79 v = (void *)(unsigned long)task_cls_classid(p);
80 iterate_fd(p->files, 0, update_classid, v);
81 task_unlock(p);
82 }
83}
84
Tejun Heo182446d2013-08-08 20:11:24 -040085static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
Thomas Graff4009232008-11-07 22:56:00 -080086{
Tejun Heo182446d2013-08-08 20:11:24 -040087 return css_cls_state(css)->classid;
Thomas Graff4009232008-11-07 22:56:00 -080088}
89
Tejun Heo182446d2013-08-08 20:11:24 -040090static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
91 u64 value)
Thomas Graff4009232008-11-07 22:56:00 -080092{
Tejun Heo182446d2013-08-08 20:11:24 -040093 css_cls_state(css)->classid = (u32) value;
Thomas Graff4009232008-11-07 22:56:00 -080094 return 0;
95}
96
97static struct cftype ss_files[] = {
98 {
99 .name = "classid",
100 .read_u64 = read_classid,
101 .write_u64 = write_classid,
102 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700103 { } /* terminate */
Thomas Graff4009232008-11-07 22:56:00 -0800104};
105
Tejun Heo676f7c82012-04-01 12:09:55 -0700106struct cgroup_subsys net_cls_subsys = {
107 .name = "net_cls",
Tejun Heo92fb9742012-11-19 08:13:38 -0800108 .css_alloc = cgrp_css_alloc,
Tejun Heo0ba18f72012-11-22 07:32:46 -0800109 .css_online = cgrp_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -0800110 .css_free = cgrp_css_free,
Daniel Wagner6a328d82012-10-25 04:16:59 +0000111 .attach = cgrp_attach,
Tejun Heo676f7c82012-04-01 12:09:55 -0700112 .subsys_id = net_cls_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700113 .base_cftypes = ss_files,
Tejun Heo676f7c82012-04-01 12:09:55 -0700114 .module = THIS_MODULE,
115};
116
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000117struct cls_cgroup_head {
Thomas Graff4009232008-11-07 22:56:00 -0800118 u32 handle;
119 struct tcf_exts exts;
120 struct tcf_ematch_tree ematches;
121};
122
Eric Dumazetdc7f9f62011-07-05 23:25:42 +0000123static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Thomas Graff4009232008-11-07 22:56:00 -0800124 struct tcf_result *res)
125{
126 struct cls_cgroup_head *head = tp->root;
Paul Menagee65fcfd2009-05-26 20:47:02 -0700127 u32 classid;
Thomas Graff4009232008-11-07 22:56:00 -0800128
Herbert Xuf8451722010-05-24 00:12:34 -0700129 rcu_read_lock();
130 classid = task_cls_state(current)->classid;
131 rcu_read_unlock();
132
Thomas Graff4009232008-11-07 22:56:00 -0800133 /*
134 * Due to the nature of the classifier it is required to ignore all
135 * packets originating from softirq context as accessing `current'
136 * would lead to false results.
137 *
138 * This test assumes that all callers of dev_queue_xmit() explicitely
139 * disable bh. Knowing this, it is possible to detect softirq based
140 * calls by looking at the number of nested bh disable calls because
141 * softirqs always disables bh.
142 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700143 if (in_serving_softirq()) {
Herbert Xuf8451722010-05-24 00:12:34 -0700144 /* If there is an sk_classid we'll use that. */
145 if (!skb->sk)
146 return -1;
147 classid = skb->sk->sk_classid;
148 }
Thomas Graff4009232008-11-07 22:56:00 -0800149
Paul Menagee65fcfd2009-05-26 20:47:02 -0700150 if (!classid)
151 return -1;
152
153 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
154 return -1;
155
156 res->classid = classid;
157 res->class = 0;
158 return tcf_exts_exec(skb, &head->exts, res);
Thomas Graff4009232008-11-07 22:56:00 -0800159}
160
161static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
162{
163 return 0UL;
164}
165
166static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
167{
168}
169
170static int cls_cgroup_init(struct tcf_proto *tp)
171{
172 return 0;
173}
174
175static const struct tcf_ext_map cgroup_ext_map = {
176 .action = TCA_CGROUP_ACT,
177 .police = TCA_CGROUP_POLICE,
178};
179
180static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
181 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
182};
183
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000184static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
Eric W. Biedermanaf4c6642012-05-25 13:42:45 -0600185 struct tcf_proto *tp, unsigned long base,
Thomas Graff4009232008-11-07 22:56:00 -0800186 u32 handle, struct nlattr **tca,
187 unsigned long *arg)
188{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000189 struct nlattr *tb[TCA_CGROUP_MAX + 1];
Thomas Graff4009232008-11-07 22:56:00 -0800190 struct cls_cgroup_head *head = tp->root;
191 struct tcf_ematch_tree t;
192 struct tcf_exts e;
193 int err;
194
Minoru Usui52ea3a52009-06-09 04:03:09 -0700195 if (!tca[TCA_OPTIONS])
196 return -EINVAL;
197
Thomas Graff4009232008-11-07 22:56:00 -0800198 if (head == NULL) {
199 if (!handle)
200 return -EINVAL;
201
202 head = kzalloc(sizeof(*head), GFP_KERNEL);
203 if (head == NULL)
204 return -ENOBUFS;
205
206 head->handle = handle;
207
208 tcf_tree_lock(tp);
209 tp->root = head;
210 tcf_tree_unlock(tp);
211 }
212
213 if (handle != head->handle)
214 return -ENOENT;
215
216 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
217 cgroup_policy);
218 if (err < 0)
219 return err;
220
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000221 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
222 &cgroup_ext_map);
Thomas Graff4009232008-11-07 22:56:00 -0800223 if (err < 0)
224 return err;
225
226 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
227 if (err < 0)
228 return err;
229
230 tcf_exts_change(tp, &head->exts, &e);
231 tcf_em_tree_change(tp, &head->ematches, &t);
232
233 return 0;
234}
235
236static void cls_cgroup_destroy(struct tcf_proto *tp)
237{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000238 struct cls_cgroup_head *head = tp->root;
Thomas Graff4009232008-11-07 22:56:00 -0800239
240 if (head) {
241 tcf_exts_destroy(tp, &head->exts);
242 tcf_em_tree_destroy(tp, &head->ematches);
243 kfree(head);
244 }
245}
246
247static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
248{
249 return -EOPNOTSUPP;
250}
251
252static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
253{
254 struct cls_cgroup_head *head = tp->root;
255
256 if (arg->count < arg->skip)
257 goto skip;
258
259 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
260 arg->stop = 1;
261 return;
262 }
263skip:
264 arg->count++;
265}
266
267static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
268 struct sk_buff *skb, struct tcmsg *t)
269{
270 struct cls_cgroup_head *head = tp->root;
271 unsigned char *b = skb_tail_pointer(skb);
272 struct nlattr *nest;
273
274 t->tcm_handle = head->handle;
275
276 nest = nla_nest_start(skb, TCA_OPTIONS);
277 if (nest == NULL)
278 goto nla_put_failure;
279
280 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
281 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
282 goto nla_put_failure;
283
284 nla_nest_end(skb, nest);
285
286 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
287 goto nla_put_failure;
288
289 return skb->len;
290
291nla_put_failure:
292 nlmsg_trim(skb, b);
293 return -1;
294}
295
296static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
297 .kind = "cgroup",
298 .init = cls_cgroup_init,
299 .change = cls_cgroup_change,
300 .classify = cls_cgroup_classify,
301 .destroy = cls_cgroup_destroy,
302 .get = cls_cgroup_get,
303 .put = cls_cgroup_put,
304 .delete = cls_cgroup_delete,
305 .walk = cls_cgroup_walk,
306 .dump = cls_cgroup_dump,
307 .owner = THIS_MODULE,
308};
309
310static int __init init_cgroup_cls(void)
311{
Herbert Xuf8451722010-05-24 00:12:34 -0700312 int ret;
313
Ben Blum8e039d82010-03-23 05:24:03 +0000314 ret = cgroup_load_subsys(&net_cls_subsys);
315 if (ret)
Herbert Xuf8451722010-05-24 00:12:34 -0700316 goto out;
317
Herbert Xuf8451722010-05-24 00:12:34 -0700318 ret = register_tcf_proto_ops(&cls_cgroup_ops);
319 if (ret)
320 cgroup_unload_subsys(&net_cls_subsys);
321
322out:
Ben Blum8e039d82010-03-23 05:24:03 +0000323 return ret;
Thomas Graff4009232008-11-07 22:56:00 -0800324}
325
326static void __exit exit_cgroup_cls(void)
327{
328 unregister_tcf_proto_ops(&cls_cgroup_ops);
Herbert Xuf8451722010-05-24 00:12:34 -0700329
Ben Blum8e039d82010-03-23 05:24:03 +0000330 cgroup_unload_subsys(&net_cls_subsys);
Thomas Graff4009232008-11-07 22:56:00 -0800331}
332
333module_init(init_cgroup_cls);
334module_exit(exit_cgroup_cls);
335MODULE_LICENSE("GPL");