blob: d794abcc4b3b7e5e87173ba8ca45deca5b6d09d8 [file] [log] [blame]
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07001/*
Lai Jiangshan47c59802008-10-18 20:28:07 -07002 * device_cgroup.c - device cgroup subsystem
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -07003 *
4 * Copyright 2007 IBM Corp
5 */
6
7#include <linux/device_cgroup.h>
8#include <linux/cgroup.h>
9#include <linux/ctype.h>
10#include <linux/list.h>
11#include <linux/uaccess.h>
Serge E. Hallyn29486df2008-04-29 01:00:14 -070012#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Lai Jiangshan47c59802008-10-18 20:28:07 -070014#include <linux/rcupdate.h>
Li Zefanb4046f02009-04-02 16:57:32 -070015#include <linux/mutex.h>
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070016
17#define ACC_MKNOD 1
18#define ACC_READ 2
19#define ACC_WRITE 4
20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22#define DEV_BLOCK 1
23#define DEV_CHAR 2
24#define DEV_ALL 4 /* this represents all devices */
25
Li Zefanb4046f02009-04-02 16:57:32 -070026static DEFINE_MUTEX(devcgroup_mutex);
27
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070028/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070029 * exception list locking rules:
Li Zefanb4046f02009-04-02 16:57:32 -070030 * hold devcgroup_mutex for update/read.
Lai Jiangshan47c59802008-10-18 20:28:07 -070031 * hold rcu_read_lock() for read.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070032 */
33
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070034struct dev_exception_item {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070035 u32 major, minor;
36 short type;
37 short access;
38 struct list_head list;
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -070039 struct rcu_head rcu;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070040};
41
42struct dev_cgroup {
43 struct cgroup_subsys_state css;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070044 struct list_head exceptions;
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -070045 enum {
46 DEVCG_DEFAULT_ALLOW,
47 DEVCG_DEFAULT_DENY,
48 } behavior;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070049};
50
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070051static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
52{
53 return container_of(s, struct dev_cgroup, css);
54}
55
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070056static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
57{
Pavel Emelyanovb66862f2008-06-05 22:46:24 -070058 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070059}
60
Paul Menagef92523e2008-07-25 01:47:03 -070061static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
62{
63 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
64}
65
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070066struct cgroup_subsys devices_subsys;
67
Li Zefan761b3ef2012-01-31 13:47:36 +080068static int devcgroup_can_attach(struct cgroup *new_cgrp,
69 struct cgroup_taskset *set)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070070{
Tejun Heo2f7ee562011-12-12 18:12:21 -080071 struct task_struct *task = cgroup_taskset_first(set);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070072
Tejun Heo2f7ee562011-12-12 18:12:21 -080073 if (current != task && !capable(CAP_SYS_ADMIN))
74 return -EPERM;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070075 return 0;
76}
77
78/*
Li Zefanb4046f02009-04-02 16:57:32 -070079 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070080 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070081static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070082{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070083 struct dev_exception_item *ex, *tmp, *new;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070084
Tejun Heo4b1c7842012-11-06 09:16:53 -080085 lockdep_assert_held(&devcgroup_mutex);
86
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070087 list_for_each_entry(ex, orig, list) {
88 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070089 if (!new)
90 goto free_and_exit;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -070091 list_add_tail(&new->list, dest);
92 }
93
94 return 0;
95
96free_and_exit:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -070097 list_for_each_entry_safe(ex, tmp, dest, list) {
98 list_del(&ex->list);
99 kfree(ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700100 }
101 return -ENOMEM;
102}
103
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700104/*
Li Zefanb4046f02009-04-02 16:57:32 -0700105 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700106 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700107static int dev_exception_add(struct dev_cgroup *dev_cgroup,
108 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700109{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700110 struct dev_exception_item *excopy, *walk;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700111
Tejun Heo4b1c7842012-11-06 09:16:53 -0800112 lockdep_assert_held(&devcgroup_mutex);
113
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700114 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
115 if (!excopy)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700116 return -ENOMEM;
117
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700118 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
119 if (walk->type != ex->type)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700120 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700121 if (walk->major != ex->major)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700122 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700123 if (walk->minor != ex->minor)
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700124 continue;
125
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700126 walk->access |= ex->access;
127 kfree(excopy);
128 excopy = NULL;
Pavel Emelyanovd1ee2972008-06-05 22:46:28 -0700129 }
130
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700131 if (excopy != NULL)
132 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700133 return 0;
134}
135
136/*
Li Zefanb4046f02009-04-02 16:57:32 -0700137 * called under devcgroup_mutex
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700138 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700139static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
140 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700141{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700142 struct dev_exception_item *walk, *tmp;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700143
Tejun Heo4b1c7842012-11-06 09:16:53 -0800144 lockdep_assert_held(&devcgroup_mutex);
145
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700146 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
147 if (walk->type != ex->type)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700148 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700149 if (walk->major != ex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700150 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700151 if (walk->minor != ex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700152 continue;
153
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700154 walk->access &= ~ex->access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700155 if (!walk->access) {
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700156 list_del_rcu(&walk->list);
Lai Jiangshan6034f7e2011-03-15 18:07:57 +0800157 kfree_rcu(walk, rcu);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700158 }
159 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700160}
161
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700162/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700163 * dev_exception_clean - frees all entries of the exception list
164 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700165 *
166 * called under devcgroup_mutex
167 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700168static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700169{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700170 struct dev_exception_item *ex, *tmp;
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700171
Tejun Heo4b1c7842012-11-06 09:16:53 -0800172 lockdep_assert_held(&devcgroup_mutex);
173
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700174 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
Tejun Heo201e72a2012-11-06 09:17:37 -0800175 list_del_rcu(&ex->list);
176 kfree_rcu(ex, rcu);
Aristeu Rozanski868539a2012-10-04 17:15:15 -0700177 }
178}
179
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700180/*
181 * called from kernel/cgroup.c with cgroup_lock() held.
182 */
Tejun Heo92fb9742012-11-19 08:13:38 -0800183static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700184{
185 struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
186 struct cgroup *parent_cgroup;
187 int ret;
188
189 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
190 if (!dev_cgroup)
191 return ERR_PTR(-ENOMEM);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700192 INIT_LIST_HEAD(&dev_cgroup->exceptions);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700193 parent_cgroup = cgroup->parent;
194
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700195 if (parent_cgroup == NULL)
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700196 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700197 else {
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700198 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
Li Zefanb4046f02009-04-02 16:57:32 -0700199 mutex_lock(&devcgroup_mutex);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700200 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
201 &parent_dev_cgroup->exceptions);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700202 dev_cgroup->behavior = parent_dev_cgroup->behavior;
Li Zefanb4046f02009-04-02 16:57:32 -0700203 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700204 if (ret) {
205 kfree(dev_cgroup);
206 return ERR_PTR(ret);
207 }
208 }
209
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700210 return &dev_cgroup->css;
211}
212
Tejun Heo92fb9742012-11-19 08:13:38 -0800213static void devcgroup_css_free(struct cgroup *cgroup)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700214{
215 struct dev_cgroup *dev_cgroup;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700216
217 dev_cgroup = cgroup_to_devcgroup(cgroup);
Jerry Snitselaar103a1972013-01-17 01:04:14 -0700218 mutex_lock(&devcgroup_mutex);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700219 dev_exception_clean(dev_cgroup);
Jerry Snitselaar103a1972013-01-17 01:04:14 -0700220 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700221 kfree(dev_cgroup);
222}
223
224#define DEVCG_ALLOW 1
225#define DEVCG_DENY 2
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700226#define DEVCG_LIST 3
227
Li Zefan17d213f2008-07-13 12:14:02 -0700228#define MAJMINLEN 13
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700229#define ACCLEN 4
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700230
231static void set_access(char *acc, short access)
232{
233 int idx = 0;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700234 memset(acc, 0, ACCLEN);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700235 if (access & ACC_READ)
236 acc[idx++] = 'r';
237 if (access & ACC_WRITE)
238 acc[idx++] = 'w';
239 if (access & ACC_MKNOD)
240 acc[idx++] = 'm';
241}
242
243static char type_to_char(short type)
244{
245 if (type == DEV_ALL)
246 return 'a';
247 if (type == DEV_CHAR)
248 return 'c';
249 if (type == DEV_BLOCK)
250 return 'b';
251 return 'X';
252}
253
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700254static void set_majmin(char *str, unsigned m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700255{
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700256 if (m == ~0)
Li Zefan7759fc92008-07-25 01:47:08 -0700257 strcpy(str, "*");
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700258 else
Li Zefan7759fc92008-07-25 01:47:08 -0700259 sprintf(str, "%u", m);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700260}
261
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700262static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
263 struct seq_file *m)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700264{
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700265 struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700266 struct dev_exception_item *ex;
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700267 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700268
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700269 rcu_read_lock();
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700270 /*
271 * To preserve the compatibility:
272 * - Only show the "all devices" when the default policy is to allow
273 * - List the exceptions in case the default policy is to deny
274 * This way, the file remains as a "whitelist of devices"
275 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700276 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700277 set_access(acc, ACC_MASK);
278 set_majmin(maj, ~0);
279 set_majmin(min, ~0);
280 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700281 maj, min, acc);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700282 } else {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700283 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
284 set_access(acc, ex->access);
285 set_majmin(maj, ex->major);
286 set_majmin(min, ex->minor);
287 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700288 maj, min, acc);
289 }
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700290 }
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700291 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700292
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700293 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700294}
295
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700296/**
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700297 * may_access - verifies if a new exception is part of what is allowed
298 * by a dev cgroup based on the default policy +
299 * exceptions. This is used to make sure a child cgroup
300 * won't have more privileges than its parent or to
301 * verify if a certain access is allowed.
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700302 * @dev_cgroup: dev cgroup to be tested against
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700303 * @refex: new exception
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700304 */
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700305static int may_access(struct dev_cgroup *dev_cgroup,
306 struct dev_exception_item *refex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700307{
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700308 struct dev_exception_item *ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700309 bool match = false;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700310
Tejun Heo4b1c7842012-11-06 09:16:53 -0800311 rcu_lockdep_assert(rcu_read_lock_held() ||
312 lockdep_is_held(&devcgroup_mutex),
313 "device_cgroup::may_access() called without proper synchronization");
314
Tejun Heo201e72a2012-11-06 09:17:37 -0800315 list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700316 if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700317 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700318 if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700319 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700320 if (ex->major != ~0 && ex->major != refex->major)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700321 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700322 if (ex->minor != ~0 && ex->minor != refex->minor)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700323 continue;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700324 if (refex->access & (~ex->access))
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700325 continue;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700326 match = true;
327 break;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700328 }
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700329
330 /*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700331 * In two cases we'll consider this new exception valid:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700332 * - the dev cgroup has its default policy to allow + exception list:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700333 * the new exception should *not* match any of the exceptions
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700334 * (behavior == DEVCG_DEFAULT_ALLOW, !match)
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700335 * - the dev cgroup has its default policy to deny + exception list:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700336 * the new exception *should* match the exceptions
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700337 * (behavior == DEVCG_DEFAULT_DENY, match)
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700338 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700339 if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700340 return 1;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700341 return 0;
342}
343
344/*
345 * parent_has_perm:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700346 * when adding a new allow rule to a device exception list, the rule
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700347 * must be allowed in the parent device
348 */
Paul Menagef92523e2008-07-25 01:47:03 -0700349static int parent_has_perm(struct dev_cgroup *childcg,
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700350 struct dev_exception_item *ex)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700351{
Paul Menagef92523e2008-07-25 01:47:03 -0700352 struct cgroup *pcg = childcg->css.cgroup->parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700353 struct dev_cgroup *parent;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700354
355 if (!pcg)
356 return 1;
357 parent = cgroup_to_devcgroup(pcg);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700358 return may_access(parent, ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700359}
360
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700361/**
362 * may_allow_all - checks if it's possible to change the behavior to
363 * allow based on parent's rules.
364 * @parent: device cgroup's parent
365 * returns: != 0 in case it's allowed, 0 otherwise
366 */
367static inline int may_allow_all(struct dev_cgroup *parent)
368{
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800369 if (!parent)
370 return 1;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700371 return parent->behavior == DEVCG_DEFAULT_ALLOW;
372}
373
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700374/*
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700375 * Modify the exception list using allow/deny rules.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700376 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
377 * so we can give a container CAP_MKNOD to let it create devices but not
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700378 * modify the exception list.
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700379 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
380 * us to also grant CAP_SYS_ADMIN to containers without giving away the
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700381 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700382 *
383 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
384 * new access is only allowed if you're in the top-level cgroup, or your
385 * parent cgroup has the access you're asking for.
386 */
Paul Menagef92523e2008-07-25 01:47:03 -0700387static int devcgroup_update_access(struct dev_cgroup *devcgroup,
388 int filetype, const char *buffer)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700389{
Paul Menagef92523e2008-07-25 01:47:03 -0700390 const char *b;
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700391 char temp[12]; /* 11 + 1 characters needed for a u32 */
392 int count, rc;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700393 struct dev_exception_item ex;
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700394 struct cgroup *p = devcgroup->css.cgroup;
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800395 struct dev_cgroup *parent = NULL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700396
397 if (!capable(CAP_SYS_ADMIN))
398 return -EPERM;
399
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800400 if (p->parent)
401 parent = cgroup_to_devcgroup(p->parent);
402
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700403 memset(&ex, 0, sizeof(ex));
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700404 b = buffer;
405
406 switch (*b) {
407 case 'a':
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700408 switch (filetype) {
409 case DEVCG_ALLOW:
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700410 if (!may_allow_all(parent))
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700411 return -EPERM;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700412 dev_exception_clean(devcgroup);
Aristeu Rozanski64e10472012-11-06 07:25:04 -0800413 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
414 if (!parent)
415 break;
416
Aristeu Rozanski4cef7292012-10-25 13:37:45 -0700417 rc = dev_exceptions_copy(&devcgroup->exceptions,
418 &parent->exceptions);
419 if (rc)
420 return rc;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700421 break;
422 case DEVCG_DENY:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700423 dev_exception_clean(devcgroup);
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700424 devcgroup->behavior = DEVCG_DEFAULT_DENY;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700425 break;
426 default:
427 return -EINVAL;
428 }
429 return 0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700430 case 'b':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700431 ex.type = DEV_BLOCK;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700432 break;
433 case 'c':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700434 ex.type = DEV_CHAR;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700435 break;
436 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700437 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700438 }
439 b++;
Paul Menagef92523e2008-07-25 01:47:03 -0700440 if (!isspace(*b))
441 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700442 b++;
443 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700444 ex.major = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700445 b++;
446 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700447 memset(temp, 0, sizeof(temp));
448 for (count = 0; count < sizeof(temp) - 1; count++) {
449 temp[count] = *b;
450 b++;
451 if (!isdigit(*b))
452 break;
453 }
454 rc = kstrtou32(temp, 10, &ex.major);
455 if (rc)
456 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700457 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700458 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700459 }
Paul Menagef92523e2008-07-25 01:47:03 -0700460 if (*b != ':')
461 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700462 b++;
463
464 /* read minor */
465 if (*b == '*') {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700466 ex.minor = ~0;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700467 b++;
468 } else if (isdigit(*b)) {
Aristeu Rozanski26fd8402012-10-25 13:37:41 -0700469 memset(temp, 0, sizeof(temp));
470 for (count = 0; count < sizeof(temp) - 1; count++) {
471 temp[count] = *b;
472 b++;
473 if (!isdigit(*b))
474 break;
475 }
476 rc = kstrtou32(temp, 10, &ex.minor);
477 if (rc)
478 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700479 } else {
Paul Menagef92523e2008-07-25 01:47:03 -0700480 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700481 }
Paul Menagef92523e2008-07-25 01:47:03 -0700482 if (!isspace(*b))
483 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700484 for (b++, count = 0; count < 3; count++, b++) {
485 switch (*b) {
486 case 'r':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700487 ex.access |= ACC_READ;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700488 break;
489 case 'w':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700490 ex.access |= ACC_WRITE;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700491 break;
492 case 'm':
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700493 ex.access |= ACC_MKNOD;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700494 break;
495 case '\n':
496 case '\0':
497 count = 3;
498 break;
499 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700500 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700501 }
502 }
503
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700504 switch (filetype) {
505 case DEVCG_ALLOW:
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700506 if (!parent_has_perm(devcgroup, &ex))
Paul Menagef92523e2008-07-25 01:47:03 -0700507 return -EPERM;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700508 /*
509 * If the default policy is to allow by default, try to remove
510 * an matching exception instead. And be silent about it: we
511 * don't want to break compatibility
512 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700513 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700514 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700515 return 0;
516 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700517 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700518 case DEVCG_DENY:
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700519 /*
520 * If the default policy is to deny by default, try to remove
521 * an matching exception instead. And be silent about it: we
522 * don't want to break compatibility
523 */
Aristeu Rozanski5b7aa7d2012-10-25 13:37:38 -0700524 if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700525 dev_exception_rm(devcgroup, &ex);
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700526 return 0;
527 }
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700528 return dev_exception_add(devcgroup, &ex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700529 default:
Paul Menagef92523e2008-07-25 01:47:03 -0700530 return -EINVAL;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700531 }
Paul Menagef92523e2008-07-25 01:47:03 -0700532 return 0;
533}
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700534
Paul Menagef92523e2008-07-25 01:47:03 -0700535static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
536 const char *buffer)
537{
538 int retval;
Li Zefanb4046f02009-04-02 16:57:32 -0700539
540 mutex_lock(&devcgroup_mutex);
Paul Menagef92523e2008-07-25 01:47:03 -0700541 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
542 cft->private, buffer);
Li Zefanb4046f02009-04-02 16:57:32 -0700543 mutex_unlock(&devcgroup_mutex);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700544 return retval;
545}
546
547static struct cftype dev_cgroup_files[] = {
548 {
549 .name = "allow",
Paul Menagef92523e2008-07-25 01:47:03 -0700550 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700551 .private = DEVCG_ALLOW,
552 },
553 {
554 .name = "deny",
Paul Menagef92523e2008-07-25 01:47:03 -0700555 .write_string = devcgroup_access_write,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700556 .private = DEVCG_DENY,
557 },
Serge E. Hallyn29486df2008-04-29 01:00:14 -0700558 {
559 .name = "list",
560 .read_seq_string = devcgroup_seq_read,
561 .private = DEVCG_LIST,
562 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700563 { } /* terminate */
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700564};
565
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700566struct cgroup_subsys devices_subsys = {
567 .name = "devices",
568 .can_attach = devcgroup_can_attach,
Tejun Heo92fb9742012-11-19 08:13:38 -0800569 .css_alloc = devcgroup_css_alloc,
570 .css_free = devcgroup_css_free,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700571 .subsys_id = devices_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700572 .base_cftypes = dev_cgroup_files,
Tejun Heo8c7f6ed2012-09-13 12:20:58 -0700573
574 /*
575 * While devices cgroup has the rudimentary hierarchy support which
576 * checks the parent's restriction, it doesn't properly propagates
577 * config changes in ancestors to their descendents. A child
578 * should only be allowed to add more restrictions to the parent's
579 * configuration. Fix it and remove the following.
580 */
581 .broken_hierarchy = true,
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700582};
583
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700584/**
585 * __devcgroup_check_permission - checks if an inode operation is permitted
586 * @dev_cgroup: the dev cgroup to be tested against
587 * @type: device type
588 * @major: device major number
589 * @minor: device minor number
590 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
591 *
592 * returns 0 on success, -EPERM case the operation is not permitted
593 */
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700594static int __devcgroup_check_permission(short type, u32 major, u32 minor,
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700595 short access)
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700596{
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700597 struct dev_cgroup *dev_cgroup;
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700598 struct dev_exception_item ex;
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700599 int rc;
600
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700601 memset(&ex, 0, sizeof(ex));
602 ex.type = type;
603 ex.major = major;
604 ex.minor = minor;
605 ex.access = access;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700606
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700607 rcu_read_lock();
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700608 dev_cgroup = task_devcgroup(current);
Aristeu Rozanskidb9aeca2012-10-04 17:15:20 -0700609 rc = may_access(dev_cgroup, &ex);
Pavel Emelyanov4efd1a12008-07-25 01:47:07 -0700610 rcu_read_unlock();
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700611
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700612 if (!rc)
613 return -EPERM;
614
615 return 0;
616}
617
618int __devcgroup_inode_permission(struct inode *inode, int mask)
619{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700620 short type, access = 0;
621
622 if (S_ISBLK(inode->i_mode))
623 type = DEV_BLOCK;
624 if (S_ISCHR(inode->i_mode))
625 type = DEV_CHAR;
626 if (mask & MAY_WRITE)
627 access |= ACC_WRITE;
628 if (mask & MAY_READ)
629 access |= ACC_READ;
630
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700631 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
632 access);
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700633}
634
635int devcgroup_inode_mknod(int mode, dev_t dev)
636{
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700637 short type;
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700638
Serge E. Hallyn0b82ac32009-01-07 18:07:46 -0800639 if (!S_ISBLK(mode) && !S_ISCHR(mode))
640 return 0;
641
Aristeu Rozanskiad676072012-10-04 17:15:17 -0700642 if (S_ISBLK(mode))
643 type = DEV_BLOCK;
644 else
645 type = DEV_CHAR;
Li Zefan36fd71d2008-09-02 14:35:52 -0700646
Jiri Slaby8c9506d2012-10-25 13:37:34 -0700647 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
648 ACC_MKNOD);
Li Zefan36fd71d2008-09-02 14:35:52 -0700649
Serge E. Hallyn08ce5f12008-04-29 01:00:10 -0700650}