blob: d3b6d2cd3a06272a4f6cbd2ed75870db28ad5b48 [file] [log] [blame]
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07001/*
Lai Jiangshan47c59802008-10-18 20:28:07 -07002 * device_cgroup.c - device cgroup subsystem
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07003 *
4 * Copyright 2007 IBM Corp
5 */
6
7#include <linux/device_cgroup.h>
8#include <linux/cgroup.h>
9#include <linux/ctype.h>
10#include <linux/list.h>
11#include <linux/uaccess.h>
Serge E. Hallyn29486df2008-04-29 01:00:14 -070012#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Lai Jiangshan47c59802008-10-18 20:28:07 -070014#include <linux/rcupdate.h>
Li Zefanb4046f02009-04-02 16:57:32 -070015#include <linux/mutex.h>
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070016
17#define ACC_MKNOD 1
18#define ACC_READ 2
19#define ACC_WRITE 4
20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22#define DEV_BLOCK 1
23#define DEV_CHAR 2
24#define DEV_ALL 4 /* this represents all devices */
25
Li Zefanb4046f02009-04-02 16:57:32 -070026static DEFINE_MUTEX(devcgroup_mutex);
27
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050028enum devcg_behavior {
29 DEVCG_DEFAULT_NONE,
30 DEVCG_DEFAULT_ALLOW,
31 DEVCG_DEFAULT_DENY,
32};
33
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070034/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070035 * exception list locking rules:
Li Zefanb4046f02009-04-02 16:57:32 -070036 * hold devcgroup_mutex for update/read.
Lai Jiangshan47c59802008-10-18 20:28:07 -070037 * hold rcu_read_lock() for read.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070038 */
39
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070040struct dev_exception_item {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070041 u32 major, minor;
42 short type;
43 short access;
44 struct list_head list;
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -070045 struct rcu_head rcu;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070046};
47
48struct dev_cgroup {
49 struct cgroup_subsys_state css;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070050 struct list_head exceptions;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050051 enum devcg_behavior behavior;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070052};
53
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070054static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
55{
Tejun Heoa7c6d552013-08-08 20:11:23 -040056 return s ? container_of(s, struct dev_cgroup, css) : NULL;
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070057}
58
Paul Menagef92523e2008-07-25 01:47:03 -070059static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
60{
Tejun Heo8af01f52013-08-08 20:11:22 -040061 return css_to_devcgroup(task_css(task, devices_subsys_id));
Paul Menagef92523e2008-07-25 01:47:03 -070062}
63
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070064struct cgroup_subsys devices_subsys;
65
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070066/*
Li Zefanb4046f02009-04-02 16:57:32 -070067 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070068 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070069static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070070{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070071 struct dev_exception_item *ex, *tmp, *new;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070072
Tejun Heo4b1c7842012-11-06 09:16:53 -080073 lockdep_assert_held(&devcgroup_mutex);
74
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070075 list_for_each_entry(ex, orig, list) {
76 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070077 if (!new)
78 goto free_and_exit;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070079 list_add_tail(&new->list, dest);
80 }
81
82 return 0;
83
84free_and_exit:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070085 list_for_each_entry_safe(ex, tmp, dest, list) {
86 list_del(&ex->list);
87 kfree(ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070088 }
89 return -ENOMEM;
90}
91
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070092/*
Li Zefanb4046f02009-04-02 16:57:32 -070093 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070094 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070095static int dev_exception_add(struct dev_cgroup *dev_cgroup,
96 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070097{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070098 struct dev_exception_item *excopy, *walk;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070099
Tejun Heo4b1c7842012-11-06 09:16:53 -0800100 lockdep_assert_held(&devcgroup_mutex);
101
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700102 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
103 if (!excopy)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700104 return -ENOMEM;
105
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700106 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
107 if (walk->type != ex->type)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700108 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700109 if (walk->major != ex->major)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700110 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700111 if (walk->minor != ex->minor)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700112 continue;
113
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700114 walk->access |= ex->access;
115 kfree(excopy);
116 excopy = NULL;
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700117 }
118
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700119 if (excopy != NULL)
120 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700121 return 0;
122}
123
124/*
Li Zefanb4046f02009-04-02 16:57:32 -0700125 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700126 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700127static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
128 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700129{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700130 struct dev_exception_item *walk, *tmp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700131
Tejun Heo4b1c7842012-11-06 09:16:53 -0800132 lockdep_assert_held(&devcgroup_mutex);
133
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700134 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
135 if (walk->type != ex->type)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700136 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700137 if (walk->major != ex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700138 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700139 if (walk->minor != ex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700140 continue;
141
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700142 walk->access &= ~ex->access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700143 if (!walk->access) {
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700144 list_del_rcu(&walk->list);
Lai Jiangshan6034f7e2011-03-15 18:07:57 +0800145 kfree_rcu(walk, rcu);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700146 }
147 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700148}
149
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800150static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
151{
152 struct dev_exception_item *ex, *tmp;
153
154 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
155 list_del_rcu(&ex->list);
156 kfree_rcu(ex, rcu);
157 }
158}
159
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700160/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700161 * dev_exception_clean - frees all entries of the exception list
162 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700163 *
164 * called under devcgroup_mutex
165 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700166static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700167{
Tejun Heo4b1c7842012-11-06 09:16:53 -0800168 lockdep_assert_held(&devcgroup_mutex);
169
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800170 __dev_exception_clean(dev_cgroup);
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700171}
172
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500173static inline bool is_devcg_online(const struct dev_cgroup *devcg)
174{
175 return (devcg->behavior != DEVCG_DEFAULT_NONE);
176}
177
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500178/**
179 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
180 * parent's
Tejun Heoeb954192013-08-08 20:11:23 -0400181 * @css: css getting online
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500182 * returns 0 in case of success, error code otherwise
183 */
Tejun Heoeb954192013-08-08 20:11:23 -0400184static int devcgroup_online(struct cgroup_subsys_state *css)
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500185{
Tejun Heoeb954192013-08-08 20:11:23 -0400186 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
187 struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500188 int ret = 0;
189
190 mutex_lock(&devcgroup_mutex);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500191
192 if (parent_dev_cgroup == NULL)
193 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
194 else {
195 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
196 &parent_dev_cgroup->exceptions);
197 if (!ret)
198 dev_cgroup->behavior = parent_dev_cgroup->behavior;
199 }
200 mutex_unlock(&devcgroup_mutex);
201
202 return ret;
203}
204
Tejun Heoeb954192013-08-08 20:11:23 -0400205static void devcgroup_offline(struct cgroup_subsys_state *css)
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500206{
Tejun Heoeb954192013-08-08 20:11:23 -0400207 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500208
209 mutex_lock(&devcgroup_mutex);
210 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
211 mutex_unlock(&devcgroup_mutex);
212}
213
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700214/*
215 * called from kernel/cgroup.c with cgroup_lock() held.
216 */
Tejun Heoeb954192013-08-08 20:11:23 -0400217static struct cgroup_subsys_state *
218devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700219{
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500220 struct dev_cgroup *dev_cgroup;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700221
222 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
223 if (!dev_cgroup)
224 return ERR_PTR(-ENOMEM);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700225 INIT_LIST_HEAD(&dev_cgroup->exceptions);
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500226 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700227
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700228 return &dev_cgroup->css;
229}
230
Tejun Heoeb954192013-08-08 20:11:23 -0400231static void devcgroup_css_free(struct cgroup_subsys_state *css)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700232{
Tejun Heoeb954192013-08-08 20:11:23 -0400233 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700234
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800235 __dev_exception_clean(dev_cgroup);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700236 kfree(dev_cgroup);
237}
238
239#define DEVCG_ALLOW 1
240#define DEVCG_DENY 2
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700241#define DEVCG_LIST 3
242
Li Zefan17d213f2008-07-13 12:14:02 -0700243#define MAJMINLEN 13
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700244#define ACCLEN 4
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700245
246static void set_access(char *acc, short access)
247{
248 int idx = 0;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700249 memset(acc, 0, ACCLEN);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700250 if (access & ACC_READ)
251 acc[idx++] = 'r';
252 if (access & ACC_WRITE)
253 acc[idx++] = 'w';
254 if (access & ACC_MKNOD)
255 acc[idx++] = 'm';
256}
257
258static char type_to_char(short type)
259{
260 if (type == DEV_ALL)
261 return 'a';
262 if (type == DEV_CHAR)
263 return 'c';
264 if (type == DEV_BLOCK)
265 return 'b';
266 return 'X';
267}
268
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700269static void set_majmin(char *str, unsigned m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700270{
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700271 if (m == ~0)
Li Zefan7759fc92008-07-25 01:47:08 -0700272 strcpy(str, "*");
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700273 else
Li Zefan7759fc92008-07-25 01:47:08 -0700274 sprintf(str, "%u", m);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700275}
276
Tejun Heo2da8ca82013-12-05 12:28:04 -0500277static int devcgroup_seq_show(struct seq_file *m, void *v)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700278{
Tejun Heo2da8ca82013-12-05 12:28:04 -0500279 struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700280 struct dev_exception_item *ex;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700281 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700282
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700283 rcu_read_lock();
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700284 /*
285 * To preserve the compatibility:
286 * - Only show the "all devices" when the default policy is to allow
287 * - List the exceptions in case the default policy is to deny
288 * This way, the file remains as a "whitelist of devices"
289 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700290 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700291 set_access(acc, ACC_MASK);
292 set_majmin(maj, ~0);
293 set_majmin(min, ~0);
294 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700295 maj, min, acc);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700296 } else {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700297 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
298 set_access(acc, ex->access);
299 set_majmin(maj, ex->major);
300 set_majmin(min, ex->minor);
301 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700302 maj, min, acc);
303 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700304 }
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700305 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700306
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700307 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700308}
309
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700310/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700311 * may_access - verifies if a new exception is part of what is allowed
312 * by a dev cgroup based on the default policy +
313 * exceptions. This is used to make sure a child cgroup
314 * won't have more privileges than its parent or to
315 * verify if a certain access is allowed.
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700316 * @dev_cgroup: dev cgroup to be tested against
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700317 * @refex: new exception
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500318 * @behavior: behavior of the exception
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700319 */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500320static bool may_access(struct dev_cgroup *dev_cgroup,
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500321 struct dev_exception_item *refex,
322 enum devcg_behavior behavior)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700323{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700324 struct dev_exception_item *ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700325 bool match = false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700326
Tejun Heo4b1c7842012-11-06 09:16:53 -0800327 rcu_lockdep_assert(rcu_read_lock_held() ||
328 lockdep_is_held(&devcgroup_mutex),
329 "device_cgroup::may_access() called without proper synchronization");
330
Tejun Heo201e72a2012-11-06 09:17:37 -0800331 list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700332 if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700333 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700334 if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700335 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700336 if (ex->major != ~0 && ex->major != refex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700337 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700338 if (ex->minor != ~0 && ex->minor != refex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700339 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700340 if (refex->access & (~ex->access))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700341 continue;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700342 match = true;
343 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700344 }
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700345
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500346 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
347 if (behavior == DEVCG_DEFAULT_ALLOW) {
348 /* the exception will deny access to certain devices */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500349 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500350 } else {
351 /* the exception will allow access to certain devices */
352 if (match)
353 /*
354 * a new exception allowing access shouldn't
355 * match an parent's exception
356 */
357 return false;
358 return true;
359 }
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500360 } else {
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500361 /* only behavior == DEVCG_DEFAULT_DENY allowed here */
362 if (match)
363 /* parent has an exception that matches the proposed */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500364 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500365 else
366 return false;
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500367 }
368 return false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700369}
370
371/*
372 * parent_has_perm:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700373 * when adding a new allow rule to a device exception list, the rule
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700374 * must be allowed in the parent device
375 */
Paul Menagef92523e2008-07-25 01:47:03 -0700376static int parent_has_perm(struct dev_cgroup *childcg,
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700377 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700378{
Tejun Heo63876982013-08-08 20:11:23 -0400379 struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700380
Tejun Heo63876982013-08-08 20:11:23 -0400381 if (!parent)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700382 return 1;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500383 return may_access(parent, ex, childcg->behavior);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700384}
385
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700386/**
387 * may_allow_all - checks if it's possible to change the behavior to
388 * allow based on parent's rules.
389 * @parent: device cgroup's parent
390 * returns: != 0 in case it's allowed, 0 otherwise
391 */
392static inline int may_allow_all(struct dev_cgroup *parent)
393{
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800394 if (!parent)
395 return 1;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700396 return parent->behavior == DEVCG_DEFAULT_ALLOW;
397}
398
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500399/**
400 * revalidate_active_exceptions - walks through the active exception list and
401 * revalidates the exceptions based on parent's
402 * behavior and exceptions. The exceptions that
403 * are no longer valid will be removed.
404 * Called with devcgroup_mutex held.
405 * @devcg: cgroup which exceptions will be checked
406 *
407 * This is one of the three key functions for hierarchy implementation.
408 * This function is responsible for re-evaluating all the cgroup's active
409 * exceptions due to a parent's exception change.
410 * Refer to Documentation/cgroups/devices.txt for more details.
411 */
412static void revalidate_active_exceptions(struct dev_cgroup *devcg)
413{
414 struct dev_exception_item *ex;
415 struct list_head *this, *tmp;
416
417 list_for_each_safe(this, tmp, &devcg->exceptions) {
418 ex = container_of(this, struct dev_exception_item, list);
419 if (!parent_has_perm(devcg, ex))
420 dev_exception_rm(devcg, ex);
421 }
422}
423
424/**
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500425 * propagate_exception - propagates a new exception to the children
426 * @devcg_root: device cgroup that added a new exception
427 * @ex: new exception to be propagated
428 *
429 * returns: 0 in case of success, != 0 in case of error
430 */
431static int propagate_exception(struct dev_cgroup *devcg_root,
432 struct dev_exception_item *ex)
433{
Tejun Heo492eb212013-08-08 20:11:25 -0400434 struct cgroup_subsys_state *pos;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500435 int rc = 0;
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500436
Tejun Heod591fb52013-05-24 10:55:38 +0900437 rcu_read_lock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500438
Tejun Heo492eb212013-08-08 20:11:25 -0400439 css_for_each_descendant_pre(pos, &devcg_root->css) {
440 struct dev_cgroup *devcg = css_to_devcgroup(pos);
Tejun Heod591fb52013-05-24 10:55:38 +0900441
442 /*
443 * Because devcgroup_mutex is held, no devcg will become
444 * online or offline during the tree walk (see on/offline
445 * methods), and online ones are safe to access outside RCU
446 * read lock without bumping refcnt.
447 */
Tejun Heobd8815a2013-08-08 20:11:27 -0400448 if (pos == &devcg_root->css || !is_devcg_online(devcg))
Tejun Heod591fb52013-05-24 10:55:38 +0900449 continue;
450
451 rcu_read_unlock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500452
453 /*
454 * in case both root's behavior and devcg is allow, a new
455 * restriction means adding to the exception list
456 */
457 if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
458 devcg->behavior == DEVCG_DEFAULT_ALLOW) {
459 rc = dev_exception_add(devcg, ex);
460 if (rc)
461 break;
462 } else {
463 /*
464 * in the other possible cases:
465 * root's behavior: allow, devcg's: deny
466 * root's behavior: deny, devcg's: deny
467 * the exception will be removed
468 */
469 dev_exception_rm(devcg, ex);
470 }
471 revalidate_active_exceptions(devcg);
472
Tejun Heod591fb52013-05-24 10:55:38 +0900473 rcu_read_lock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500474 }
Tejun Heod591fb52013-05-24 10:55:38 +0900475
476 rcu_read_unlock();
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500477 return rc;
478}
479
480static inline bool has_children(struct dev_cgroup *devcgroup)
481{
482 struct cgroup *cgrp = devcgroup->css.cgroup;
483
484 return !list_empty(&cgrp->children);
485}
486
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700487/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700488 * Modify the exception list using allow/deny rules.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700489 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
490 * so we can give a container CAP_MKNOD to let it create devices but not
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700491 * modify the exception list.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700492 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
493 * us to also grant CAP_SYS_ADMIN to containers without giving away the
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700494 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700495 *
496 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
497 * new access is only allowed if you're in the top-level cgroup, or your
498 * parent cgroup has the access you're asking for.
499 */
Paul Menagef92523e2008-07-25 01:47:03 -0700500static int devcgroup_update_access(struct dev_cgroup *devcgroup,
501 int filetype, const char *buffer)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700502{
Paul Menagef92523e2008-07-25 01:47:03 -0700503 const char *b;
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700504 char temp[12]; /* 11 + 1 characters needed for a u32 */
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500505 int count, rc = 0;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700506 struct dev_exception_item ex;
Tejun Heo63876982013-08-08 20:11:23 -0400507 struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700508
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
511
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700512 memset(&ex, 0, sizeof(ex));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700513 b = buffer;
514
515 switch (*b) {
516 case 'a':
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700517 switch (filetype) {
518 case DEVCG_ALLOW:
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500519 if (has_children(devcgroup))
520 return -EINVAL;
521
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700522 if (!may_allow_all(parent))
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700523 return -EPERM;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700524 dev_exception_clean(devcgroup);
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800525 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
526 if (!parent)
527 break;
528
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700529 rc = dev_exceptions_copy(&devcgroup->exceptions,
530 &parent->exceptions);
531 if (rc)
532 return rc;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700533 break;
534 case DEVCG_DENY:
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500535 if (has_children(devcgroup))
536 return -EINVAL;
537
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700538 dev_exception_clean(devcgroup);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700539 devcgroup->behavior = DEVCG_DEFAULT_DENY;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700540 break;
541 default:
542 return -EINVAL;
543 }
544 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700545 case 'b':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700546 ex.type = DEV_BLOCK;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700547 break;
548 case 'c':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700549 ex.type = DEV_CHAR;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700550 break;
551 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700552 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700553 }
554 b++;
Paul Menagef92523e2008-07-25 01:47:03 -0700555 if (!isspace(*b))
556 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700557 b++;
558 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700559 ex.major = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700560 b++;
561 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700562 memset(temp, 0, sizeof(temp));
563 for (count = 0; count < sizeof(temp) - 1; count++) {
564 temp[count] = *b;
565 b++;
566 if (!isdigit(*b))
567 break;
568 }
569 rc = kstrtou32(temp, 10, &ex.major);
570 if (rc)
571 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700572 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700573 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700574 }
Paul Menagef92523e2008-07-25 01:47:03 -0700575 if (*b != ':')
576 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700577 b++;
578
579 /* read minor */
580 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700581 ex.minor = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700582 b++;
583 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700584 memset(temp, 0, sizeof(temp));
585 for (count = 0; count < sizeof(temp) - 1; count++) {
586 temp[count] = *b;
587 b++;
588 if (!isdigit(*b))
589 break;
590 }
591 rc = kstrtou32(temp, 10, &ex.minor);
592 if (rc)
593 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700594 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700595 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700596 }
Paul Menagef92523e2008-07-25 01:47:03 -0700597 if (!isspace(*b))
598 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700599 for (b++, count = 0; count < 3; count++, b++) {
600 switch (*b) {
601 case 'r':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700602 ex.access |= ACC_READ;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700603 break;
604 case 'w':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700605 ex.access |= ACC_WRITE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700606 break;
607 case 'm':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700608 ex.access |= ACC_MKNOD;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700609 break;
610 case '\n':
611 case '\0':
612 count = 3;
613 break;
614 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700615 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700616 }
617 }
618
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700619 switch (filetype) {
620 case DEVCG_ALLOW:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700621 if (!parent_has_perm(devcgroup, &ex))
Paul Menagef92523e2008-07-25 01:47:03 -0700622 return -EPERM;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700623 /*
624 * If the default policy is to allow by default, try to remove
625 * an matching exception instead. And be silent about it: we
626 * don't want to break compatibility
627 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700628 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700629 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700630 return 0;
631 }
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500632 rc = dev_exception_add(devcgroup, &ex);
633 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700634 case DEVCG_DENY:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700635 /*
636 * If the default policy is to deny by default, try to remove
637 * an matching exception instead. And be silent about it: we
638 * don't want to break compatibility
639 */
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500640 if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700641 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500642 else
643 rc = dev_exception_add(devcgroup, &ex);
644
645 if (rc)
646 break;
647 /* we only propagate new restrictions */
648 rc = propagate_exception(devcgroup, &ex);
649 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700650 default:
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500651 rc = -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700652 }
Aristeu Rozanskibd2953e2013-02-15 11:55:47 -0500653 return rc;
Paul Menagef92523e2008-07-25 01:47:03 -0700654}
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700655
Tejun Heo182446d2013-08-08 20:11:24 -0400656static int devcgroup_access_write(struct cgroup_subsys_state *css,
657 struct cftype *cft, const char *buffer)
Paul Menagef92523e2008-07-25 01:47:03 -0700658{
659 int retval;
Li Zefanb4046f02009-04-02 16:57:32 -0700660
661 mutex_lock(&devcgroup_mutex);
Tejun Heo182446d2013-08-08 20:11:24 -0400662 retval = devcgroup_update_access(css_to_devcgroup(css),
Paul Menagef92523e2008-07-25 01:47:03 -0700663 cft->private, buffer);
Li Zefanb4046f02009-04-02 16:57:32 -0700664 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700665 return retval;
666}
667
668static struct cftype dev_cgroup_files[] = {
669 {
670 .name = "allow",
Paul Menagef92523e2008-07-25 01:47:03 -0700671 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700672 .private = DEVCG_ALLOW,
673 },
674 {
675 .name = "deny",
Paul Menagef92523e2008-07-25 01:47:03 -0700676 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700677 .private = DEVCG_DENY,
678 },
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700679 {
680 .name = "list",
Tejun Heo2da8ca82013-12-05 12:28:04 -0500681 .seq_show = devcgroup_seq_show,
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700682 .private = DEVCG_LIST,
683 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700684 { } /* terminate */
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700685};
686
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700687struct cgroup_subsys devices_subsys = {
688 .name = "devices",
Tejun Heo92fb9742012-11-19 08:13:38 -0800689 .css_alloc = devcgroup_css_alloc,
690 .css_free = devcgroup_css_free,
Aristeu Rozanski1909554c2013-02-15 11:55:46 -0500691 .css_online = devcgroup_online,
692 .css_offline = devcgroup_offline,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700693 .subsys_id = devices_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700694 .base_cftypes = dev_cgroup_files,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700695};
696
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700697/**
698 * __devcgroup_check_permission - checks if an inode operation is permitted
699 * @dev_cgroup: the dev cgroup to be tested against
700 * @type: device type
701 * @major: device major number
702 * @minor: device minor number
703 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
704 *
705 * returns 0 on success, -EPERM case the operation is not permitted
706 */
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700707static int __devcgroup_check_permission(short type, u32 major, u32 minor,
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700708 short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700709{
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700710 struct dev_cgroup *dev_cgroup;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700711 struct dev_exception_item ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700712 int rc;
713
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700714 memset(&ex, 0, sizeof(ex));
715 ex.type = type;
716 ex.major = major;
717 ex.minor = minor;
718 ex.access = access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700719
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700720 rcu_read_lock();
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700721 dev_cgroup = task_devcgroup(current);
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500722 rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700723 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700724
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700725 if (!rc)
726 return -EPERM;
727
728 return 0;
729}
730
731int __devcgroup_inode_permission(struct inode *inode, int mask)
732{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700733 short type, access = 0;
734
735 if (S_ISBLK(inode->i_mode))
736 type = DEV_BLOCK;
737 if (S_ISCHR(inode->i_mode))
738 type = DEV_CHAR;
739 if (mask & MAY_WRITE)
740 access |= ACC_WRITE;
741 if (mask & MAY_READ)
742 access |= ACC_READ;
743
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700744 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
745 access);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700746}
747
748int devcgroup_inode_mknod(int mode, dev_t dev)
749{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700750 short type;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700751
Serge E. Hallyn0b82ac32009-01-07 18:07:46 -0800752 if (!S_ISBLK(mode) && !S_ISCHR(mode))
753 return 0;
754
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700755 if (S_ISBLK(mode))
756 type = DEV_BLOCK;
757 else
758 type = DEV_CHAR;
Li Zefan36fd71d2008-09-02 14:35:52 -0700759
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700760 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
761 ACC_MKNOD);
Li Zefan36fd71d2008-09-02 14:35:52 -0700762
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700763}