blob: 4ad55a9c692079d85936260bd90fb9216aba099b [file] [log] [blame]
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07001/*
Lai Jiangshan47c59802008-10-18 20:28:07 -07002 * device_cgroup.c - device cgroup subsystem
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07003 *
4 * Copyright 2007 IBM Corp
5 */
6
7#include <linux/device_cgroup.h>
8#include <linux/cgroup.h>
9#include <linux/ctype.h>
10#include <linux/list.h>
11#include <linux/uaccess.h>
Serge E. Hallyn29486df2008-04-29 01:00:14 -070012#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Lai Jiangshan47c59802008-10-18 20:28:07 -070014#include <linux/rcupdate.h>
Li Zefanb4046f02009-04-02 16:57:32 -070015#include <linux/mutex.h>
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070016
17#define ACC_MKNOD 1
18#define ACC_READ 2
19#define ACC_WRITE 4
20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22#define DEV_BLOCK 1
23#define DEV_CHAR 2
24#define DEV_ALL 4 /* this represents all devices */
25
Li Zefanb4046f02009-04-02 16:57:32 -070026static DEFINE_MUTEX(devcgroup_mutex);
27
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070028/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070029 * exception list locking rules:
Li Zefanb4046f02009-04-02 16:57:32 -070030 * hold devcgroup_mutex for update/read.
Lai Jiangshan47c59802008-10-18 20:28:07 -070031 * hold rcu_read_lock() for read.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070032 */
33
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070034struct dev_exception_item {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070035 u32 major, minor;
36 short type;
37 short access;
38 struct list_head list;
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -070039 struct rcu_head rcu;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070040};
41
42struct dev_cgroup {
43 struct cgroup_subsys_state css;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070044 struct list_head exceptions;
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -070045 enum {
46 DEVCG_DEFAULT_ALLOW,
47 DEVCG_DEFAULT_DENY,
48 } behavior;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070049};
50
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070051static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
52{
53 return container_of(s, struct dev_cgroup, css);
54}
55
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070056static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
57{
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070058 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070059}
60
Paul Menagef92523e2008-07-25 01:47:03 -070061static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
62{
63 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
64}
65
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070066struct cgroup_subsys devices_subsys;
67
Li Zefan761b3ef2012-01-31 13:47:36 +080068static int devcgroup_can_attach(struct cgroup *new_cgrp,
69 struct cgroup_taskset *set)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070070{
Tejun Heo2f7ee562011-12-12 18:12:21 -080071 struct task_struct *task = cgroup_taskset_first(set);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070072
Tejun Heo2f7ee562011-12-12 18:12:21 -080073 if (current != task && !capable(CAP_SYS_ADMIN))
74 return -EPERM;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070075 return 0;
76}
77
78/*
Li Zefanb4046f02009-04-02 16:57:32 -070079 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070080 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070081static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070082{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070083 struct dev_exception_item *ex, *tmp, *new;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070084
Tejun Heo4b1c7842012-11-06 09:16:53 -080085 lockdep_assert_held(&devcgroup_mutex);
86
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070087 list_for_each_entry(ex, orig, list) {
88 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070089 if (!new)
90 goto free_and_exit;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070091 list_add_tail(&new->list, dest);
92 }
93
94 return 0;
95
96free_and_exit:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070097 list_for_each_entry_safe(ex, tmp, dest, list) {
98 list_del(&ex->list);
99 kfree(ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700100 }
101 return -ENOMEM;
102}
103
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700104/*
Li Zefanb4046f02009-04-02 16:57:32 -0700105 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700106 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700107static int dev_exception_add(struct dev_cgroup *dev_cgroup,
108 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700109{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700110 struct dev_exception_item *excopy, *walk;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700111
Tejun Heo4b1c7842012-11-06 09:16:53 -0800112 lockdep_assert_held(&devcgroup_mutex);
113
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700114 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
115 if (!excopy)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700116 return -ENOMEM;
117
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700118 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
119 if (walk->type != ex->type)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700120 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700121 if (walk->major != ex->major)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700122 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700123 if (walk->minor != ex->minor)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700124 continue;
125
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700126 walk->access |= ex->access;
127 kfree(excopy);
128 excopy = NULL;
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700129 }
130
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700131 if (excopy != NULL)
132 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700133 return 0;
134}
135
136/*
Li Zefanb4046f02009-04-02 16:57:32 -0700137 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700138 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700139static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
140 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700141{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700142 struct dev_exception_item *walk, *tmp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700143
Tejun Heo4b1c7842012-11-06 09:16:53 -0800144 lockdep_assert_held(&devcgroup_mutex);
145
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700146 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
147 if (walk->type != ex->type)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700148 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700149 if (walk->major != ex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700150 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700151 if (walk->minor != ex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700152 continue;
153
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700154 walk->access &= ~ex->access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700155 if (!walk->access) {
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700156 list_del_rcu(&walk->list);
Lai Jiangshan6034f7e2011-03-15 18:07:57 +0800157 kfree_rcu(walk, rcu);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700158 }
159 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700160}
161
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800162static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
163{
164 struct dev_exception_item *ex, *tmp;
165
166 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
167 list_del_rcu(&ex->list);
168 kfree_rcu(ex, rcu);
169 }
170}
171
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700172/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700173 * dev_exception_clean - frees all entries of the exception list
174 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700175 *
176 * called under devcgroup_mutex
177 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700178static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700179{
Tejun Heo4b1c7842012-11-06 09:16:53 -0800180 lockdep_assert_held(&devcgroup_mutex);
181
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800182 __dev_exception_clean(dev_cgroup);
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700183}
184
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700185/*
186 * called from kernel/cgroup.c with cgroup_lock() held.
187 */
Tejun Heo92fb9742012-11-19 08:13:38 -0800188static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700189{
190 struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
191 struct cgroup *parent_cgroup;
192 int ret;
193
194 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
195 if (!dev_cgroup)
196 return ERR_PTR(-ENOMEM);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700197 INIT_LIST_HEAD(&dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700198 parent_cgroup = cgroup->parent;
199
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700200 if (parent_cgroup == NULL)
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700201 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700202 else {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700203 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
Li Zefanb4046f02009-04-02 16:57:32 -0700204 mutex_lock(&devcgroup_mutex);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700205 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
206 &parent_dev_cgroup->exceptions);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700207 dev_cgroup->behavior = parent_dev_cgroup->behavior;
Li Zefanb4046f02009-04-02 16:57:32 -0700208 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700209 if (ret) {
210 kfree(dev_cgroup);
211 return ERR_PTR(ret);
212 }
213 }
214
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700215 return &dev_cgroup->css;
216}
217
Tejun Heo92fb9742012-11-19 08:13:38 -0800218static void devcgroup_css_free(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700219{
220 struct dev_cgroup *dev_cgroup;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700221
222 dev_cgroup = cgroup_to_devcgroup(cgroup);
Jerry Snitselaar53eb8c82013-02-21 16:41:31 -0800223 __dev_exception_clean(dev_cgroup);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700224 kfree(dev_cgroup);
225}
226
227#define DEVCG_ALLOW 1
228#define DEVCG_DENY 2
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700229#define DEVCG_LIST 3
230
Li Zefan17d213f2008-07-13 12:14:02 -0700231#define MAJMINLEN 13
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700232#define ACCLEN 4
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700233
234static void set_access(char *acc, short access)
235{
236 int idx = 0;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700237 memset(acc, 0, ACCLEN);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700238 if (access & ACC_READ)
239 acc[idx++] = 'r';
240 if (access & ACC_WRITE)
241 acc[idx++] = 'w';
242 if (access & ACC_MKNOD)
243 acc[idx++] = 'm';
244}
245
246static char type_to_char(short type)
247{
248 if (type == DEV_ALL)
249 return 'a';
250 if (type == DEV_CHAR)
251 return 'c';
252 if (type == DEV_BLOCK)
253 return 'b';
254 return 'X';
255}
256
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700257static void set_majmin(char *str, unsigned m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700258{
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700259 if (m == ~0)
Li Zefan7759fc92008-07-25 01:47:08 -0700260 strcpy(str, "*");
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700261 else
Li Zefan7759fc92008-07-25 01:47:08 -0700262 sprintf(str, "%u", m);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700263}
264
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700265static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
266 struct seq_file *m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700267{
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700268 struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700269 struct dev_exception_item *ex;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700270 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700271
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700272 rcu_read_lock();
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700273 /*
274 * To preserve the compatibility:
275 * - Only show the "all devices" when the default policy is to allow
276 * - List the exceptions in case the default policy is to deny
277 * This way, the file remains as a "whitelist of devices"
278 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700279 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700280 set_access(acc, ACC_MASK);
281 set_majmin(maj, ~0);
282 set_majmin(min, ~0);
283 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700284 maj, min, acc);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700285 } else {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700286 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
287 set_access(acc, ex->access);
288 set_majmin(maj, ex->major);
289 set_majmin(min, ex->minor);
290 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700291 maj, min, acc);
292 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700293 }
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700294 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700295
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700296 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700297}
298
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700299/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700300 * may_access - verifies if a new exception is part of what is allowed
301 * by a dev cgroup based on the default policy +
302 * exceptions. This is used to make sure a child cgroup
303 * won't have more privileges than its parent or to
304 * verify if a certain access is allowed.
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700305 * @dev_cgroup: dev cgroup to be tested against
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700306 * @refex: new exception
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700307 */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500308static bool may_access(struct dev_cgroup *dev_cgroup,
309 struct dev_exception_item *refex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700310{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700311 struct dev_exception_item *ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700312 bool match = false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700313
Tejun Heo4b1c7842012-11-06 09:16:53 -0800314 rcu_lockdep_assert(rcu_read_lock_held() ||
315 lockdep_is_held(&devcgroup_mutex),
316 "device_cgroup::may_access() called without proper synchronization");
317
Tejun Heo201e72a2012-11-06 09:17:37 -0800318 list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700319 if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700320 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700321 if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700322 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700323 if (ex->major != ~0 && ex->major != refex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700324 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700325 if (ex->minor != ~0 && ex->minor != refex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700326 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700327 if (refex->access & (~ex->access))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700328 continue;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700329 match = true;
330 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700331 }
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700332
333 /*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700334 * In two cases we'll consider this new exception valid:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700335 * - the dev cgroup has its default policy to deny + exception list:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700336 * the new exception *should* match the exceptions
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500337 * - the dev cgroup has its default policy to allow + exception list:
338 * the new exception should *not* match any of the exceptions
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700339 */
Aristeu Rozanski26898fd2013-02-15 11:55:44 -0500340 if (dev_cgroup->behavior == DEVCG_DEFAULT_DENY) {
341 if (match)
342 return true;
343 } else {
344 if (!match)
345 return true;
346 }
347 return false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700348}
349
350/*
351 * parent_has_perm:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700352 * when adding a new allow rule to a device exception list, the rule
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700353 * must be allowed in the parent device
354 */
Paul Menagef92523e2008-07-25 01:47:03 -0700355static int parent_has_perm(struct dev_cgroup *childcg,
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700356 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700357{
Paul Menagef92523e2008-07-25 01:47:03 -0700358 struct cgroup *pcg = childcg->css.cgroup->parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700359 struct dev_cgroup *parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700360
361 if (!pcg)
362 return 1;
363 parent = cgroup_to_devcgroup(pcg);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700364 return may_access(parent, ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700365}
366
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700367/**
368 * may_allow_all - checks if it's possible to change the behavior to
369 * allow based on parent's rules.
370 * @parent: device cgroup's parent
371 * returns: != 0 in case it's allowed, 0 otherwise
372 */
373static inline int may_allow_all(struct dev_cgroup *parent)
374{
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800375 if (!parent)
376 return 1;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700377 return parent->behavior == DEVCG_DEFAULT_ALLOW;
378}
379
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700380/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700381 * Modify the exception list using allow/deny rules.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700382 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
383 * so we can give a container CAP_MKNOD to let it create devices but not
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700384 * modify the exception list.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700385 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
386 * us to also grant CAP_SYS_ADMIN to containers without giving away the
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700387 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700388 *
389 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
390 * new access is only allowed if you're in the top-level cgroup, or your
391 * parent cgroup has the access you're asking for.
392 */
Paul Menagef92523e2008-07-25 01:47:03 -0700393static int devcgroup_update_access(struct dev_cgroup *devcgroup,
394 int filetype, const char *buffer)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700395{
Paul Menagef92523e2008-07-25 01:47:03 -0700396 const char *b;
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700397 char temp[12]; /* 11 + 1 characters needed for a u32 */
398 int count, rc;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700399 struct dev_exception_item ex;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700400 struct cgroup *p = devcgroup->css.cgroup;
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800401 struct dev_cgroup *parent = NULL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700402
403 if (!capable(CAP_SYS_ADMIN))
404 return -EPERM;
405
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800406 if (p->parent)
407 parent = cgroup_to_devcgroup(p->parent);
408
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700409 memset(&ex, 0, sizeof(ex));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700410 b = buffer;
411
412 switch (*b) {
413 case 'a':
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700414 switch (filetype) {
415 case DEVCG_ALLOW:
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700416 if (!may_allow_all(parent))
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700417 return -EPERM;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700418 dev_exception_clean(devcgroup);
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800419 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
420 if (!parent)
421 break;
422
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700423 rc = dev_exceptions_copy(&devcgroup->exceptions,
424 &parent->exceptions);
425 if (rc)
426 return rc;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700427 break;
428 case DEVCG_DENY:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700429 dev_exception_clean(devcgroup);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700430 devcgroup->behavior = DEVCG_DEFAULT_DENY;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700431 break;
432 default:
433 return -EINVAL;
434 }
435 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700436 case 'b':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700437 ex.type = DEV_BLOCK;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700438 break;
439 case 'c':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700440 ex.type = DEV_CHAR;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700441 break;
442 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700443 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700444 }
445 b++;
Paul Menagef92523e2008-07-25 01:47:03 -0700446 if (!isspace(*b))
447 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700448 b++;
449 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700450 ex.major = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700451 b++;
452 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700453 memset(temp, 0, sizeof(temp));
454 for (count = 0; count < sizeof(temp) - 1; count++) {
455 temp[count] = *b;
456 b++;
457 if (!isdigit(*b))
458 break;
459 }
460 rc = kstrtou32(temp, 10, &ex.major);
461 if (rc)
462 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700463 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700464 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700465 }
Paul Menagef92523e2008-07-25 01:47:03 -0700466 if (*b != ':')
467 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700468 b++;
469
470 /* read minor */
471 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700472 ex.minor = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700473 b++;
474 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700475 memset(temp, 0, sizeof(temp));
476 for (count = 0; count < sizeof(temp) - 1; count++) {
477 temp[count] = *b;
478 b++;
479 if (!isdigit(*b))
480 break;
481 }
482 rc = kstrtou32(temp, 10, &ex.minor);
483 if (rc)
484 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700485 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700486 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700487 }
Paul Menagef92523e2008-07-25 01:47:03 -0700488 if (!isspace(*b))
489 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700490 for (b++, count = 0; count < 3; count++, b++) {
491 switch (*b) {
492 case 'r':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700493 ex.access |= ACC_READ;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700494 break;
495 case 'w':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700496 ex.access |= ACC_WRITE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700497 break;
498 case 'm':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700499 ex.access |= ACC_MKNOD;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700500 break;
501 case '\n':
502 case '\0':
503 count = 3;
504 break;
505 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700506 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700507 }
508 }
509
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700510 switch (filetype) {
511 case DEVCG_ALLOW:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700512 if (!parent_has_perm(devcgroup, &ex))
Paul Menagef92523e2008-07-25 01:47:03 -0700513 return -EPERM;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700514 /*
515 * If the default policy is to allow by default, try to remove
516 * an matching exception instead. And be silent about it: we
517 * don't want to break compatibility
518 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700519 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700520 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700521 return 0;
522 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700523 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700524 case DEVCG_DENY:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700525 /*
526 * If the default policy is to deny by default, try to remove
527 * an matching exception instead. And be silent about it: we
528 * don't want to break compatibility
529 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700530 if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700531 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700532 return 0;
533 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700534 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700535 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700536 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700537 }
Paul Menagef92523e2008-07-25 01:47:03 -0700538 return 0;
539}
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700540
Paul Menagef92523e2008-07-25 01:47:03 -0700541static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
542 const char *buffer)
543{
544 int retval;
Li Zefanb4046f02009-04-02 16:57:32 -0700545
546 mutex_lock(&devcgroup_mutex);
Paul Menagef92523e2008-07-25 01:47:03 -0700547 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
548 cft->private, buffer);
Li Zefanb4046f02009-04-02 16:57:32 -0700549 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700550 return retval;
551}
552
553static struct cftype dev_cgroup_files[] = {
554 {
555 .name = "allow",
Paul Menagef92523e2008-07-25 01:47:03 -0700556 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700557 .private = DEVCG_ALLOW,
558 },
559 {
560 .name = "deny",
Paul Menagef92523e2008-07-25 01:47:03 -0700561 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700562 .private = DEVCG_DENY,
563 },
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700564 {
565 .name = "list",
566 .read_seq_string = devcgroup_seq_read,
567 .private = DEVCG_LIST,
568 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700569 { } /* terminate */
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700570};
571
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700572struct cgroup_subsys devices_subsys = {
573 .name = "devices",
574 .can_attach = devcgroup_can_attach,
Tejun Heo92fb9742012-11-19 08:13:38 -0800575 .css_alloc = devcgroup_css_alloc,
576 .css_free = devcgroup_css_free,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700577 .subsys_id = devices_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700578 .base_cftypes = dev_cgroup_files,
Tejun Heo8c7f6ed2012-09-13 12:20:58 -0700579
580 /*
581 * While devices cgroup has the rudimentary hierarchy support which
582 * checks the parent's restriction, it doesn't properly propagates
583 * config changes in ancestors to their descendents. A child
584 * should only be allowed to add more restrictions to the parent's
585 * configuration. Fix it and remove the following.
586 */
587 .broken_hierarchy = true,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700588};
589
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700590/**
591 * __devcgroup_check_permission - checks if an inode operation is permitted
592 * @dev_cgroup: the dev cgroup to be tested against
593 * @type: device type
594 * @major: device major number
595 * @minor: device minor number
596 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
597 *
598 * returns 0 on success, -EPERM case the operation is not permitted
599 */
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700600static int __devcgroup_check_permission(short type, u32 major, u32 minor,
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700601 short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700602{
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700603 struct dev_cgroup *dev_cgroup;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700604 struct dev_exception_item ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700605 int rc;
606
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700607 memset(&ex, 0, sizeof(ex));
608 ex.type = type;
609 ex.major = major;
610 ex.minor = minor;
611 ex.access = access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700612
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700613 rcu_read_lock();
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700614 dev_cgroup = task_devcgroup(current);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700615 rc = may_access(dev_cgroup, &ex);
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700616 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700617
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700618 if (!rc)
619 return -EPERM;
620
621 return 0;
622}
623
624int __devcgroup_inode_permission(struct inode *inode, int mask)
625{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700626 short type, access = 0;
627
628 if (S_ISBLK(inode->i_mode))
629 type = DEV_BLOCK;
630 if (S_ISCHR(inode->i_mode))
631 type = DEV_CHAR;
632 if (mask & MAY_WRITE)
633 access |= ACC_WRITE;
634 if (mask & MAY_READ)
635 access |= ACC_READ;
636
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700637 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
638 access);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700639}
640
641int devcgroup_inode_mknod(int mode, dev_t dev)
642{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700643 short type;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700644
Serge E. Hallyn0b82ac32009-01-07 18:07:46 -0800645 if (!S_ISBLK(mode) && !S_ISCHR(mode))
646 return 0;
647
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700648 if (S_ISBLK(mode))
649 type = DEV_BLOCK;
650 else
651 type = DEV_CHAR;
Li Zefan36fd71d2008-09-02 14:35:52 -0700652
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700653 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
654 ACC_MKNOD);
Li Zefan36fd71d2008-09-02 14:35:52 -0700655
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700656}