blob: 91a3db4a76f837ae88d28d65012c073f5049ac61 [file] [log] [blame]
Thomas Graff4009232008-11-07 22:56:00 -08001/*
2 * net/sched/cls_cgroup.c Control Group Classifier
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <linux/cgroup.h>
18#include <net/rtnetlink.h>
19#include <net/pkt_cls.h>
20
21struct cgroup_cls_state
22{
23 struct cgroup_subsys_state css;
24 u32 classid;
25};
26
Li Zefan8e8ba852008-12-29 19:39:03 -080027static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
Thomas Graff4009232008-11-07 22:56:00 -080028{
Li Zefan8e8ba852008-12-29 19:39:03 -080029 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
30 struct cgroup_cls_state, css);
31}
32
33static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
34{
35 return container_of(task_subsys_state(p, net_cls_subsys_id),
36 struct cgroup_cls_state, css);
Thomas Graff4009232008-11-07 22:56:00 -080037}
38
39static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
40 struct cgroup *cgrp)
41{
42 struct cgroup_cls_state *cs;
43
44 if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
45 return ERR_PTR(-ENOMEM);
46
47 if (cgrp->parent)
Li Zefan8e8ba852008-12-29 19:39:03 -080048 cs->classid = cgrp_cls_state(cgrp->parent)->classid;
Thomas Graff4009232008-11-07 22:56:00 -080049
50 return &cs->css;
51}
52
53static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
54{
Li Zefan8e8ba852008-12-29 19:39:03 -080055 kfree(cgrp_cls_state(cgrp));
Thomas Graff4009232008-11-07 22:56:00 -080056}
57
58static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
59{
Li Zefan8e8ba852008-12-29 19:39:03 -080060 return cgrp_cls_state(cgrp)->classid;
Thomas Graff4009232008-11-07 22:56:00 -080061}
62
63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
64{
65 if (!cgroup_lock_live_group(cgrp))
66 return -ENODEV;
67
Li Zefan8e8ba852008-12-29 19:39:03 -080068 cgrp_cls_state(cgrp)->classid = (u32) value;
Thomas Graff4009232008-11-07 22:56:00 -080069
70 cgroup_unlock();
71
72 return 0;
73}
74
75static struct cftype ss_files[] = {
76 {
77 .name = "classid",
78 .read_u64 = read_classid,
79 .write_u64 = write_classid,
80 },
81};
82
83static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
84{
85 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
86}
87
88struct cgroup_subsys net_cls_subsys = {
89 .name = "net_cls",
90 .create = cgrp_create,
91 .destroy = cgrp_destroy,
92 .populate = cgrp_populate,
93 .subsys_id = net_cls_subsys_id,
94};
95
96struct cls_cgroup_head
97{
98 u32 handle;
99 struct tcf_exts exts;
100 struct tcf_ematch_tree ematches;
101};
102
103static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
104 struct tcf_result *res)
105{
106 struct cls_cgroup_head *head = tp->root;
107 struct cgroup_cls_state *cs;
108 int ret = 0;
109
110 /*
111 * Due to the nature of the classifier it is required to ignore all
112 * packets originating from softirq context as accessing `current'
113 * would lead to false results.
114 *
115 * This test assumes that all callers of dev_queue_xmit() explicitely
116 * disable bh. Knowing this, it is possible to detect softirq based
117 * calls by looking at the number of nested bh disable calls because
118 * softirqs always disables bh.
119 */
120 if (softirq_count() != SOFTIRQ_OFFSET)
121 return -1;
122
123 rcu_read_lock();
Li Zefan8e8ba852008-12-29 19:39:03 -0800124 cs = task_cls_state(current);
Thomas Graff4009232008-11-07 22:56:00 -0800125 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
126 res->classid = cs->classid;
127 res->class = 0;
128 ret = tcf_exts_exec(skb, &head->exts, res);
129 } else
130 ret = -1;
131
132 rcu_read_unlock();
133
134 return ret;
135}
136
137static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
138{
139 return 0UL;
140}
141
142static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
143{
144}
145
146static int cls_cgroup_init(struct tcf_proto *tp)
147{
148 return 0;
149}
150
151static const struct tcf_ext_map cgroup_ext_map = {
152 .action = TCA_CGROUP_ACT,
153 .police = TCA_CGROUP_POLICE,
154};
155
156static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
157 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
158};
159
160static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
161 u32 handle, struct nlattr **tca,
162 unsigned long *arg)
163{
164 struct nlattr *tb[TCA_CGROUP_MAX+1];
165 struct cls_cgroup_head *head = tp->root;
166 struct tcf_ematch_tree t;
167 struct tcf_exts e;
168 int err;
169
170 if (head == NULL) {
171 if (!handle)
172 return -EINVAL;
173
174 head = kzalloc(sizeof(*head), GFP_KERNEL);
175 if (head == NULL)
176 return -ENOBUFS;
177
178 head->handle = handle;
179
180 tcf_tree_lock(tp);
181 tp->root = head;
182 tcf_tree_unlock(tp);
183 }
184
185 if (handle != head->handle)
186 return -ENOENT;
187
188 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
189 cgroup_policy);
190 if (err < 0)
191 return err;
192
193 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
194 if (err < 0)
195 return err;
196
197 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
198 if (err < 0)
199 return err;
200
201 tcf_exts_change(tp, &head->exts, &e);
202 tcf_em_tree_change(tp, &head->ematches, &t);
203
204 return 0;
205}
206
207static void cls_cgroup_destroy(struct tcf_proto *tp)
208{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000209 struct cls_cgroup_head *head = tp->root;
Thomas Graff4009232008-11-07 22:56:00 -0800210
211 if (head) {
212 tcf_exts_destroy(tp, &head->exts);
213 tcf_em_tree_destroy(tp, &head->ematches);
214 kfree(head);
215 }
216}
217
218static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
219{
220 return -EOPNOTSUPP;
221}
222
223static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
224{
225 struct cls_cgroup_head *head = tp->root;
226
227 if (arg->count < arg->skip)
228 goto skip;
229
230 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
231 arg->stop = 1;
232 return;
233 }
234skip:
235 arg->count++;
236}
237
238static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
239 struct sk_buff *skb, struct tcmsg *t)
240{
241 struct cls_cgroup_head *head = tp->root;
242 unsigned char *b = skb_tail_pointer(skb);
243 struct nlattr *nest;
244
245 t->tcm_handle = head->handle;
246
247 nest = nla_nest_start(skb, TCA_OPTIONS);
248 if (nest == NULL)
249 goto nla_put_failure;
250
251 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
252 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
253 goto nla_put_failure;
254
255 nla_nest_end(skb, nest);
256
257 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
258 goto nla_put_failure;
259
260 return skb->len;
261
262nla_put_failure:
263 nlmsg_trim(skb, b);
264 return -1;
265}
266
267static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
268 .kind = "cgroup",
269 .init = cls_cgroup_init,
270 .change = cls_cgroup_change,
271 .classify = cls_cgroup_classify,
272 .destroy = cls_cgroup_destroy,
273 .get = cls_cgroup_get,
274 .put = cls_cgroup_put,
275 .delete = cls_cgroup_delete,
276 .walk = cls_cgroup_walk,
277 .dump = cls_cgroup_dump,
278 .owner = THIS_MODULE,
279};
280
281static int __init init_cgroup_cls(void)
282{
283 return register_tcf_proto_ops(&cls_cgroup_ops);
284}
285
286static void __exit exit_cgroup_cls(void)
287{
288 unregister_tcf_proto_ops(&cls_cgroup_ops);
289}
290
291module_init(init_cgroup_cls);
292module_exit(exit_cgroup_cls);
293MODULE_LICENSE("GPL");