blob: ddf3c709c0c2acf9f94c10e8a5b2b89c6d5d1037 [file] [log] [blame]
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07001/*
Lai Jiangshan47c59802008-10-18 20:28:07 -07002 * device_cgroup.c - device cgroup subsystem
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07003 *
4 * Copyright 2007 IBM Corp
5 */
6
7#include <linux/device_cgroup.h>
8#include <linux/cgroup.h>
9#include <linux/ctype.h>
10#include <linux/list.h>
11#include <linux/uaccess.h>
Serge E. Hallyn29486df2008-04-29 01:00:14 -070012#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Lai Jiangshan47c59802008-10-18 20:28:07 -070014#include <linux/rcupdate.h>
Li Zefanb4046f02009-04-02 16:57:32 -070015#include <linux/mutex.h>
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070016
17#define ACC_MKNOD 1
18#define ACC_READ 2
19#define ACC_WRITE 4
20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22#define DEV_BLOCK 1
23#define DEV_CHAR 2
24#define DEV_ALL 4 /* this represents all devices */
25
Li Zefanb4046f02009-04-02 16:57:32 -070026static DEFINE_MUTEX(devcgroup_mutex);
27
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050028enum devcg_behavior {
29 DEVCG_DEFAULT_NONE,
30 DEVCG_DEFAULT_ALLOW,
31 DEVCG_DEFAULT_DENY,
32};
33
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070034/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070035 * exception list locking rules:
Li Zefanb4046f02009-04-02 16:57:32 -070036 * hold devcgroup_mutex for update/read.
Lai Jiangshan47c59802008-10-18 20:28:07 -070037 * hold rcu_read_lock() for read.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070038 */
39
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070040struct dev_exception_item {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070041 u32 major, minor;
42 short type;
43 short access;
44 struct list_head list;
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -070045 struct rcu_head rcu;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070046};
47
48struct dev_cgroup {
49 struct cgroup_subsys_state css;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070050 struct list_head exceptions;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -050051 enum devcg_behavior behavior;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070052};
53
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070054static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
55{
56 return container_of(s, struct dev_cgroup, css);
57}
58
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070059static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
60{
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070061 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070062}
63
Paul Menagef92523e2008-07-25 01:47:03 -070064static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
65{
66 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
67}
68
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070069struct cgroup_subsys devices_subsys;
70
Li Zefan761b3ef2012-01-31 13:47:36 +080071static int devcgroup_can_attach(struct cgroup *new_cgrp,
72 struct cgroup_taskset *set)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070073{
Tejun Heo2f7ee562011-12-12 18:12:21 -080074 struct task_struct *task = cgroup_taskset_first(set);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070075
Tejun Heo2f7ee562011-12-12 18:12:21 -080076 if (current != task && !capable(CAP_SYS_ADMIN))
77 return -EPERM;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070078 return 0;
79}
80
81/*
Li Zefanb4046f02009-04-02 16:57:32 -070082 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070083 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070084static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070085{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070086 struct dev_exception_item *ex, *tmp, *new;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070087
Tejun Heo4b1c7842012-11-06 09:16:53 -080088 lockdep_assert_held(&devcgroup_mutex);
89
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070090 list_for_each_entry(ex, orig, list) {
91 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070092 if (!new)
93 goto free_and_exit;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070094 list_add_tail(&new->list, dest);
95 }
96
97 return 0;
98
99free_and_exit:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700100 list_for_each_entry_safe(ex, tmp, dest, list) {
101 list_del(&ex->list);
102 kfree(ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700103 }
104 return -ENOMEM;
105}
106
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700107/*
Li Zefanb4046f02009-04-02 16:57:32 -0700108 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700109 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700110static int dev_exception_add(struct dev_cgroup *dev_cgroup,
111 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700112{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700113 struct dev_exception_item *excopy, *walk;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700114
Tejun Heo4b1c7842012-11-06 09:16:53 -0800115 lockdep_assert_held(&devcgroup_mutex);
116
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700117 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
118 if (!excopy)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700119 return -ENOMEM;
120
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700121 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
122 if (walk->type != ex->type)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700123 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700124 if (walk->major != ex->major)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700125 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700126 if (walk->minor != ex->minor)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700127 continue;
128
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700129 walk->access |= ex->access;
130 kfree(excopy);
131 excopy = NULL;
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700132 }
133
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700134 if (excopy != NULL)
135 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700136 return 0;
137}
138
139/*
Li Zefanb4046f02009-04-02 16:57:32 -0700140 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700141 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700142static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
143 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700144{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700145 struct dev_exception_item *walk, *tmp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700146
Tejun Heo4b1c7842012-11-06 09:16:53 -0800147 lockdep_assert_held(&devcgroup_mutex);
148
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700149 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
150 if (walk->type != ex->type)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700151 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700152 if (walk->major != ex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700153 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700154 if (walk->minor != ex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700155 continue;
156
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700157 walk->access &= ~ex->access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700158 if (!walk->access) {
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700159 list_del_rcu(&walk->list);
Lai Jiangshan6034f7e2011-03-15 18:07:57 +0800160 kfree_rcu(walk, rcu);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700161 }
162 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700163}
164
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800165static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
166{
167 struct dev_exception_item *ex, *tmp;
168
169 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
170 list_del_rcu(&ex->list);
171 kfree_rcu(ex, rcu);
172 }
173}
174
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700175/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700176 * dev_exception_clean - frees all entries of the exception list
177 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700178 *
179 * called under devcgroup_mutex
180 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700181static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700182{
Tejun Heo4b1c7842012-11-06 09:16:53 -0800183 lockdep_assert_held(&devcgroup_mutex);
184
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800185 __dev_exception_clean(dev_cgroup);
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700186}
187
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700188/*
189 * called from kernel/cgroup.c with cgroup_lock() held.
190 */
Tejun Heo92fb9742012-11-19 08:13:38 -0800191static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700192{
193 struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
194 struct cgroup *parent_cgroup;
195 int ret;
196
197 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
198 if (!dev_cgroup)
199 return ERR_PTR(-ENOMEM);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700200 INIT_LIST_HEAD(&dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700201 parent_cgroup = cgroup->parent;
202
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700203 if (parent_cgroup == NULL)
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700204 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700205 else {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700206 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
Li Zefanb4046f02009-04-02 16:57:32 -0700207 mutex_lock(&devcgroup_mutex);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700208 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
209 &parent_dev_cgroup->exceptions);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700210 dev_cgroup->behavior = parent_dev_cgroup->behavior;
Li Zefanb4046f02009-04-02 16:57:32 -0700211 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700212 if (ret) {
213 kfree(dev_cgroup);
214 return ERR_PTR(ret);
215 }
216 }
217
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700218 return &dev_cgroup->css;
219}
220
Tejun Heo92fb9742012-11-19 08:13:38 -0800221static void devcgroup_css_free(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700222{
223 struct dev_cgroup *dev_cgroup;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700224
225 dev_cgroup = cgroup_to_devcgroup(cgroup);
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800226 __dev_exception_clean(dev_cgroup);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700227 kfree(dev_cgroup);
228}
229
230#define DEVCG_ALLOW 1
231#define DEVCG_DENY 2
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700232#define DEVCG_LIST 3
233
Li Zefan17d213f2008-07-13 12:14:02 -0700234#define MAJMINLEN 13
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700235#define ACCLEN 4
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700236
237static void set_access(char *acc, short access)
238{
239 int idx = 0;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700240 memset(acc, 0, ACCLEN);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700241 if (access & ACC_READ)
242 acc[idx++] = 'r';
243 if (access & ACC_WRITE)
244 acc[idx++] = 'w';
245 if (access & ACC_MKNOD)
246 acc[idx++] = 'm';
247}
248
249static char type_to_char(short type)
250{
251 if (type == DEV_ALL)
252 return 'a';
253 if (type == DEV_CHAR)
254 return 'c';
255 if (type == DEV_BLOCK)
256 return 'b';
257 return 'X';
258}
259
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700260static void set_majmin(char *str, unsigned m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700261{
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700262 if (m == ~0)
Li Zefan7759fc92008-07-25 01:47:08 -0700263 strcpy(str, "*");
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700264 else
Li Zefan7759fc92008-07-25 01:47:08 -0700265 sprintf(str, "%u", m);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700266}
267
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700268static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
269 struct seq_file *m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700270{
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700271 struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700272 struct dev_exception_item *ex;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700273 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700274
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700275 rcu_read_lock();
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700276 /*
277 * To preserve the compatibility:
278 * - Only show the "all devices" when the default policy is to allow
279 * - List the exceptions in case the default policy is to deny
280 * This way, the file remains as a "whitelist of devices"
281 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700282 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700283 set_access(acc, ACC_MASK);
284 set_majmin(maj, ~0);
285 set_majmin(min, ~0);
286 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700287 maj, min, acc);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700288 } else {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700289 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
290 set_access(acc, ex->access);
291 set_majmin(maj, ex->major);
292 set_majmin(min, ex->minor);
293 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700294 maj, min, acc);
295 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700296 }
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700297 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700298
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700299 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700300}
301
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700302/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700303 * may_access - verifies if a new exception is part of what is allowed
304 * by a dev cgroup based on the default policy +
305 * exceptions. This is used to make sure a child cgroup
306 * won't have more privileges than its parent or to
307 * verify if a certain access is allowed.
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700308 * @dev_cgroup: dev cgroup to be tested against
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700309 * @refex: new exception
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500310 * @behavior: behavior of the exception
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700311 */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500312static bool may_access(struct dev_cgroup *dev_cgroup,
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500313 struct dev_exception_item *refex,
314 enum devcg_behavior behavior)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700315{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700316 struct dev_exception_item *ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700317 bool match = false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700318
Tejun Heo4b1c7842012-11-06 09:16:53 -0800319 rcu_lockdep_assert(rcu_read_lock_held() ||
320 lockdep_is_held(&devcgroup_mutex),
321 "device_cgroup::may_access() called without proper synchronization");
322
Tejun Heo201e72a2012-11-06 09:17:37 -0800323 list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700324 if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700325 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700326 if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700327 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700328 if (ex->major != ~0 && ex->major != refex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700329 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700330 if (ex->minor != ~0 && ex->minor != refex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700331 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700332 if (refex->access & (~ex->access))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700333 continue;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700334 match = true;
335 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700336 }
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700337
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500338 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
339 if (behavior == DEVCG_DEFAULT_ALLOW) {
340 /* the exception will deny access to certain devices */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500341 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500342 } else {
343 /* the exception will allow access to certain devices */
344 if (match)
345 /*
346 * a new exception allowing access shouldn't
347 * match an parent's exception
348 */
349 return false;
350 return true;
351 }
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500352 } else {
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500353 /* only behavior == DEVCG_DEFAULT_DENY allowed here */
354 if (match)
355 /* parent has an exception that matches the proposed */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500356 return true;
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500357 else
358 return false;
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500359 }
360 return false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700361}
362
363/*
364 * parent_has_perm:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700365 * when adding a new allow rule to a device exception list, the rule
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700366 * must be allowed in the parent device
367 */
Paul Menagef92523e2008-07-25 01:47:03 -0700368static int parent_has_perm(struct dev_cgroup *childcg,
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700369 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700370{
Paul Menagef92523e2008-07-25 01:47:03 -0700371 struct cgroup *pcg = childcg->css.cgroup->parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700372 struct dev_cgroup *parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700373
374 if (!pcg)
375 return 1;
376 parent = cgroup_to_devcgroup(pcg);
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500377 return may_access(parent, ex, childcg->behavior);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700378}
379
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700380/**
381 * may_allow_all - checks if it's possible to change the behavior to
382 * allow based on parent's rules.
383 * @parent: device cgroup's parent
384 * returns: != 0 in case it's allowed, 0 otherwise
385 */
386static inline int may_allow_all(struct dev_cgroup *parent)
387{
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800388 if (!parent)
389 return 1;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700390 return parent->behavior == DEVCG_DEFAULT_ALLOW;
391}
392
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700393/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700394 * Modify the exception list using allow/deny rules.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700395 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
396 * so we can give a container CAP_MKNOD to let it create devices but not
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700397 * modify the exception list.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700398 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
399 * us to also grant CAP_SYS_ADMIN to containers without giving away the
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700400 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700401 *
402 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
403 * new access is only allowed if you're in the top-level cgroup, or your
404 * parent cgroup has the access you're asking for.
405 */
Paul Menagef92523e2008-07-25 01:47:03 -0700406static int devcgroup_update_access(struct dev_cgroup *devcgroup,
407 int filetype, const char *buffer)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700408{
Paul Menagef92523e2008-07-25 01:47:03 -0700409 const char *b;
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700410 char temp[12]; /* 11 + 1 characters needed for a u32 */
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500411 int count, rc = 0;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700412 struct dev_exception_item ex;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700413 struct cgroup *p = devcgroup->css.cgroup;
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800414 struct dev_cgroup *parent = NULL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700415
416 if (!capable(CAP_SYS_ADMIN))
417 return -EPERM;
418
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800419 if (p->parent)
420 parent = cgroup_to_devcgroup(p->parent);
421
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700422 memset(&ex, 0, sizeof(ex));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700423 b = buffer;
424
425 switch (*b) {
426 case 'a':
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700427 switch (filetype) {
428 case DEVCG_ALLOW:
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700429 if (!may_allow_all(parent))
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700430 return -EPERM;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700431 dev_exception_clean(devcgroup);
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800432 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
433 if (!parent)
434 break;
435
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700436 rc = dev_exceptions_copy(&devcgroup->exceptions,
437 &parent->exceptions);
438 if (rc)
439 return rc;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700440 break;
441 case DEVCG_DENY:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700442 dev_exception_clean(devcgroup);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700443 devcgroup->behavior = DEVCG_DEFAULT_DENY;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700444 break;
445 default:
446 return -EINVAL;
447 }
448 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700449 case 'b':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700450 ex.type = DEV_BLOCK;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700451 break;
452 case 'c':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700453 ex.type = DEV_CHAR;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700454 break;
455 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700456 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700457 }
458 b++;
Paul Menagef92523e2008-07-25 01:47:03 -0700459 if (!isspace(*b))
460 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700461 b++;
462 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700463 ex.major = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700464 b++;
465 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700466 memset(temp, 0, sizeof(temp));
467 for (count = 0; count < sizeof(temp) - 1; count++) {
468 temp[count] = *b;
469 b++;
470 if (!isdigit(*b))
471 break;
472 }
473 rc = kstrtou32(temp, 10, &ex.major);
474 if (rc)
475 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700476 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700477 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700478 }
Paul Menagef92523e2008-07-25 01:47:03 -0700479 if (*b != ':')
480 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700481 b++;
482
483 /* read minor */
484 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700485 ex.minor = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700486 b++;
487 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700488 memset(temp, 0, sizeof(temp));
489 for (count = 0; count < sizeof(temp) - 1; count++) {
490 temp[count] = *b;
491 b++;
492 if (!isdigit(*b))
493 break;
494 }
495 rc = kstrtou32(temp, 10, &ex.minor);
496 if (rc)
497 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700498 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700499 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700500 }
Paul Menagef92523e2008-07-25 01:47:03 -0700501 if (!isspace(*b))
502 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700503 for (b++, count = 0; count < 3; count++, b++) {
504 switch (*b) {
505 case 'r':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700506 ex.access |= ACC_READ;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700507 break;
508 case 'w':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700509 ex.access |= ACC_WRITE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700510 break;
511 case 'm':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700512 ex.access |= ACC_MKNOD;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700513 break;
514 case '\n':
515 case '\0':
516 count = 3;
517 break;
518 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700519 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700520 }
521 }
522
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700523 switch (filetype) {
524 case DEVCG_ALLOW:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700525 if (!parent_has_perm(devcgroup, &ex))
Paul Menagef92523e2008-07-25 01:47:03 -0700526 return -EPERM;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700527 /*
528 * If the default policy is to allow by default, try to remove
529 * an matching exception instead. And be silent about it: we
530 * don't want to break compatibility
531 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700532 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700533 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700534 return 0;
535 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700536 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700537 case DEVCG_DENY:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700538 /*
539 * If the default policy is to deny by default, try to remove
540 * an matching exception instead. And be silent about it: we
541 * don't want to break compatibility
542 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700543 if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700544 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700545 return 0;
546 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700547 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700548 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700549 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700550 }
Paul Menagef92523e2008-07-25 01:47:03 -0700551 return 0;
552}
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700553
Paul Menagef92523e2008-07-25 01:47:03 -0700554static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
555 const char *buffer)
556{
557 int retval;
Li Zefanb4046f02009-04-02 16:57:32 -0700558
559 mutex_lock(&devcgroup_mutex);
Paul Menagef92523e2008-07-25 01:47:03 -0700560 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
561 cft->private, buffer);
Li Zefanb4046f02009-04-02 16:57:32 -0700562 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700563 return retval;
564}
565
566static struct cftype dev_cgroup_files[] = {
567 {
568 .name = "allow",
Paul Menagef92523e2008-07-25 01:47:03 -0700569 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700570 .private = DEVCG_ALLOW,
571 },
572 {
573 .name = "deny",
Paul Menagef92523e2008-07-25 01:47:03 -0700574 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700575 .private = DEVCG_DENY,
576 },
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700577 {
578 .name = "list",
579 .read_seq_string = devcgroup_seq_read,
580 .private = DEVCG_LIST,
581 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700582 { } /* terminate */
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700583};
584
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700585struct cgroup_subsys devices_subsys = {
586 .name = "devices",
587 .can_attach = devcgroup_can_attach,
Tejun Heo92fb9742012-11-19 08:13:38 -0800588 .css_alloc = devcgroup_css_alloc,
589 .css_free = devcgroup_css_free,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700590 .subsys_id = devices_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700591 .base_cftypes = dev_cgroup_files,
Tejun Heo8c7f6ed2012-09-13 12:20:58 -0700592
593 /*
594 * While devices cgroup has the rudimentary hierarchy support which
595 * checks the parent's restriction, it doesn't properly propagates
596 * config changes in ancestors to their descendents. A child
597 * should only be allowed to add more restrictions to the parent's
598 * configuration. Fix it and remove the following.
599 */
600 .broken_hierarchy = true,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700601};
602
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700603/**
604 * __devcgroup_check_permission - checks if an inode operation is permitted
605 * @dev_cgroup: the dev cgroup to be tested against
606 * @type: device type
607 * @major: device major number
608 * @minor: device minor number
609 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
610 *
611 * returns 0 on success, -EPERM case the operation is not permitted
612 */
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700613static int __devcgroup_check_permission(short type, u32 major, u32 minor,
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700614 short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700615{
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700616 struct dev_cgroup *dev_cgroup;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700617 struct dev_exception_item ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700618 int rc;
619
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700620 memset(&ex, 0, sizeof(ex));
621 ex.type = type;
622 ex.major = major;
623 ex.minor = minor;
624 ex.access = access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700625
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700626 rcu_read_lock();
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700627 dev_cgroup = task_devcgroup(current);
Aristeu Rozanskic39a2a32013-02-15 11:55:45 -0500628 rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700629 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700630
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700631 if (!rc)
632 return -EPERM;
633
634 return 0;
635}
636
637int __devcgroup_inode_permission(struct inode *inode, int mask)
638{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700639 short type, access = 0;
640
641 if (S_ISBLK(inode->i_mode))
642 type = DEV_BLOCK;
643 if (S_ISCHR(inode->i_mode))
644 type = DEV_CHAR;
645 if (mask & MAY_WRITE)
646 access |= ACC_WRITE;
647 if (mask & MAY_READ)
648 access |= ACC_READ;
649
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700650 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
651 access);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700652}
653
654int devcgroup_inode_mknod(int mode, dev_t dev)
655{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700656 short type;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700657
Serge E. Hallyn0b82ac32009-01-07 18:07:46 -0800658 if (!S_ISBLK(mode) && !S_ISCHR(mode))
659 return 0;
660
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700661 if (S_ISBLK(mode))
662 type = DEV_BLOCK;
663 else
664 type = DEV_CHAR;
Li Zefan36fd71d2008-09-02 14:35:52 -0700665
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700666 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
667 ACC_MKNOD);
Li Zefan36fd71d2008-09-02 14:35:52 -0700668
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700669}