blob: a63332253c7e8fc4af2ca1b4ddd7ec5e3927d1d6 [file] [log] [blame]
Cedric Le Goateracce2922007-07-15 23:40:59 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
Paul Gortmaker9984de12011-05-23 14:51:41 -04008#include <linux/export.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -07009#include <linux/nsproxy.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070010#include <linux/slab.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070011#include <linux/user_namespace.h>
David Howells0bb80f22013-04-12 01:50:06 +010012#include <linux/proc_ns.h>
Eric W. Biederman5c1469d2010-06-13 03:28:03 +000013#include <linux/highuid.h>
Serge Hallyn18b6e042008-10-15 16:38:45 -050014#include <linux/cred.h>
Eric W. Biederman973c5912011-11-17 01:59:07 -080015#include <linux/securebits.h>
Eric W. Biederman22d917d2011-11-17 00:11:58 -080016#include <linux/keyctl.h>
17#include <linux/key-type.h>
18#include <keys/user-type.h>
19#include <linux/seq_file.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/ctype.h>
Eric W. Biedermanf76d2072012-08-30 01:24:05 -070023#include <linux/projid.h>
Eric W. Biedermane66eded2013-03-13 11:51:49 -070024#include <linux/fs_struct.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070025
Eric W. Biedermandbec2842016-07-30 13:58:49 -050026extern bool setup_userns_sysctls(struct user_namespace *ns);
27extern void retire_userns_sysctls(struct user_namespace *ns);
28
Pavel Emelyanov61642812011-01-12 17:00:46 -080029static struct kmem_cache *user_ns_cachep __read_mostly;
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -060030static DEFINE_MUTEX(userns_state_mutex);
Pavel Emelyanov61642812011-01-12 17:00:46 -080031
Eric W. Biederman67080752013-04-14 13:47:02 -070032static bool new_idmap_permitted(const struct file *file,
33 struct user_namespace *ns, int cap_setid,
Eric W. Biederman22d917d2011-11-17 00:11:58 -080034 struct uid_gid_map *map);
Eric W. Biedermanb0321322016-07-30 13:53:37 -050035static void free_user_ns(struct work_struct *work);
Eric W. Biederman22d917d2011-11-17 00:11:58 -080036
Eric W. Biedermancde19752012-07-26 06:24:06 -070037static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
38{
39 /* Start with the same capabilities as init but useless for doing
40 * anything as the capabilities are bound to the new user namespace.
41 */
42 cred->securebits = SECUREBITS_DEFAULT;
43 cred->cap_inheritable = CAP_EMPTY_SET;
44 cred->cap_permitted = CAP_FULL_SET;
45 cred->cap_effective = CAP_FULL_SET;
Andy Lutomirski58319052015-09-04 15:42:45 -070046 cred->cap_ambient = CAP_EMPTY_SET;
Eric W. Biedermancde19752012-07-26 06:24:06 -070047 cred->cap_bset = CAP_FULL_SET;
48#ifdef CONFIG_KEYS
49 key_put(cred->request_key_auth);
50 cred->request_key_auth = NULL;
51#endif
52 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
53 cred->user_ns = user_ns;
54}
55
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070056/*
Serge Hallyn18b6e042008-10-15 16:38:45 -050057 * Create a new user namespace, deriving the creator from the user in the
58 * passed credentials, and replacing that user with the new root user for the
59 * new namespace.
60 *
61 * This is called by copy_creds(), which will finish setting the target task's
62 * credentials.
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070063 */
Serge Hallyn18b6e042008-10-15 16:38:45 -050064int create_user_ns(struct cred *new)
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070065{
Eric W. Biederman0093ccb2011-11-16 21:52:53 -080066 struct user_namespace *ns, *parent_ns = new->user_ns;
Eric W. Biederman078de5f2012-02-08 07:00:08 -080067 kuid_t owner = new->euid;
68 kgid_t group = new->egid;
Eric W. Biederman98f842e2011-06-15 10:21:48 -070069 int ret;
Eric W. Biederman783291e2011-11-17 01:32:59 -080070
Oleg Nesterov8742f222013-08-08 18:55:32 +020071 if (parent_ns->level > 32)
72 return -EUSERS;
73
Eric W. Biederman31515272013-03-15 01:45:51 -070074 /*
75 * Verify that we can not violate the policy of which files
76 * may be accessed that is specified by the root directory,
77 * by verifing that the root directory is at the root of the
78 * mount namespace which allows all files to be accessed.
79 */
80 if (current_chrooted())
81 return -EPERM;
82
Eric W. Biederman783291e2011-11-17 01:32:59 -080083 /* The creator needs a mapping in the parent user namespace
84 * or else we won't be able to reasonably tell userspace who
85 * created a user_namespace.
86 */
87 if (!kuid_has_mapping(parent_ns, owner) ||
88 !kgid_has_mapping(parent_ns, group))
89 return -EPERM;
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070090
Eric W. Biederman22d917d2011-11-17 00:11:58 -080091 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070092 if (!ns)
Serge Hallyn18b6e042008-10-15 16:38:45 -050093 return -ENOMEM;
Serge E. Hallyn77ec7392007-07-15 23:41:01 -070094
Al Viro6344c432014-11-01 00:45:45 -040095 ret = ns_alloc_inum(&ns->ns);
Eric W. Biederman98f842e2011-06-15 10:21:48 -070096 if (ret) {
97 kmem_cache_free(user_ns_cachep, ns);
98 return ret;
99 }
Al Viro33c42942014-11-01 02:32:53 -0400100 ns->ns.ops = &userns_operations;
Eric W. Biederman98f842e2011-06-15 10:21:48 -0700101
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800102 atomic_set(&ns->count, 1);
Eric W. Biedermancde19752012-07-26 06:24:06 -0700103 /* Leave the new->user_ns reference with the new user namespace. */
Eric W. Biedermanaeb3ae92011-11-16 21:59:43 -0800104 ns->parent = parent_ns;
Oleg Nesterov8742f222013-08-08 18:55:32 +0200105 ns->level = parent_ns->level + 1;
Eric W. Biederman783291e2011-11-17 01:32:59 -0800106 ns->owner = owner;
107 ns->group = group;
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500108 INIT_WORK(&ns->work, free_user_ns);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800109
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600110 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
111 mutex_lock(&userns_state_mutex);
112 ns->flags = parent_ns->flags;
113 mutex_unlock(&userns_state_mutex);
114
David Howellsf36f8c72013-09-24 10:35:19 +0100115#ifdef CONFIG_PERSISTENT_KEYRINGS
116 init_rwsem(&ns->persistent_keyring_register_sem);
117#endif
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500118 ret = -ENOMEM;
119 if (!setup_userns_sysctls(ns))
120 goto fail_keyring;
121
122 set_cred_user_ns(new, ns);
Serge Hallyn18b6e042008-10-15 16:38:45 -0500123 return 0;
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500124fail_keyring:
125#ifdef CONFIG_PERSISTENT_KEYRINGS
126 key_put(ns->persistent_keyring_register);
127#endif
128 ns_free_inum(&ns->ns);
129 kmem_cache_free(user_ns_cachep, ns);
130 return ret;
Cedric Le Goateracce2922007-07-15 23:40:59 -0700131}
132
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700133int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
134{
135 struct cred *cred;
Oleg Nesterov61609682013-08-06 19:38:55 +0200136 int err = -ENOMEM;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700137
138 if (!(unshare_flags & CLONE_NEWUSER))
139 return 0;
140
141 cred = prepare_creds();
Oleg Nesterov61609682013-08-06 19:38:55 +0200142 if (cred) {
143 err = create_user_ns(cred);
144 if (err)
145 put_cred(cred);
146 else
147 *new_cred = cred;
148 }
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700149
Oleg Nesterov61609682013-08-06 19:38:55 +0200150 return err;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -0700151}
152
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500153static void free_user_ns(struct work_struct *work)
David Howells51708362009-02-27 14:03:03 -0800154{
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500155 struct user_namespace *parent, *ns =
156 container_of(work, struct user_namespace, work);
David Howells51708362009-02-27 14:03:03 -0800157
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800158 do {
159 parent = ns->parent;
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500160 retire_userns_sysctls(ns);
David Howellsf36f8c72013-09-24 10:35:19 +0100161#ifdef CONFIG_PERSISTENT_KEYRINGS
162 key_put(ns->persistent_keyring_register);
163#endif
Al Viro6344c432014-11-01 00:45:45 -0400164 ns_free_inum(&ns->ns);
Eric W. Biedermanc61a2812012-12-28 18:58:39 -0800165 kmem_cache_free(user_ns_cachep, ns);
166 ns = parent;
167 } while (atomic_dec_and_test(&parent->count));
David Howells51708362009-02-27 14:03:03 -0800168}
Eric W. Biedermanb0321322016-07-30 13:53:37 -0500169
170void __put_user_ns(struct user_namespace *ns)
171{
172 schedule_work(&ns->work);
173}
174EXPORT_SYMBOL(__put_user_ns);
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000175
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800176static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000177{
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800178 unsigned idx, extents;
179 u32 first, last, id2;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000180
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800181 id2 = id + count - 1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000182
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800183 /* Find the matching extent */
184 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400185 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800186 for (idx = 0; idx < extents; idx++) {
187 first = map->extent[idx].first;
188 last = first + map->extent[idx].count - 1;
189 if (id >= first && id <= last &&
190 (id2 >= first && id2 <= last))
191 break;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000192 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800193 /* Map the id or note failure */
194 if (idx < extents)
195 id = (id - first) + map->extent[idx].lower_first;
196 else
197 id = (u32) -1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000198
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800199 return id;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000200}
201
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800202static u32 map_id_down(struct uid_gid_map *map, u32 id)
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000203{
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800204 unsigned idx, extents;
205 u32 first, last;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000206
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800207 /* Find the matching extent */
208 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400209 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800210 for (idx = 0; idx < extents; idx++) {
211 first = map->extent[idx].first;
212 last = first + map->extent[idx].count - 1;
213 if (id >= first && id <= last)
214 break;
215 }
216 /* Map the id or note failure */
217 if (idx < extents)
218 id = (id - first) + map->extent[idx].lower_first;
219 else
220 id = (u32) -1;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000221
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800222 return id;
223}
224
225static u32 map_id_up(struct uid_gid_map *map, u32 id)
226{
227 unsigned idx, extents;
228 u32 first, last;
229
230 /* Find the matching extent */
231 extents = map->nr_extents;
Mikulas Patockae79323b2014-04-14 16:58:55 -0400232 smp_rmb();
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800233 for (idx = 0; idx < extents; idx++) {
234 first = map->extent[idx].lower_first;
235 last = first + map->extent[idx].count - 1;
236 if (id >= first && id <= last)
237 break;
238 }
239 /* Map the id or note failure */
240 if (idx < extents)
241 id = (id - first) + map->extent[idx].first;
242 else
243 id = (u32) -1;
244
245 return id;
246}
247
248/**
249 * make_kuid - Map a user-namespace uid pair into a kuid.
250 * @ns: User namespace that the uid is in
251 * @uid: User identifier
252 *
253 * Maps a user-namespace uid pair into a kernel internal kuid,
254 * and returns that kuid.
255 *
256 * When there is no mapping defined for the user-namespace uid
257 * pair INVALID_UID is returned. Callers are expected to test
Brian Campbellb080e042014-02-16 22:58:12 -0500258 * for and handle INVALID_UID being returned. INVALID_UID
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800259 * may be tested for using uid_valid().
260 */
261kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
262{
263 /* Map the uid to a global kernel uid */
264 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
265}
266EXPORT_SYMBOL(make_kuid);
267
268/**
269 * from_kuid - Create a uid from a kuid user-namespace pair.
270 * @targ: The user namespace we want a uid in.
271 * @kuid: The kernel internal uid to start with.
272 *
273 * Map @kuid into the user-namespace specified by @targ and
274 * return the resulting uid.
275 *
276 * There is always a mapping into the initial user_namespace.
277 *
278 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
279 */
280uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
281{
282 /* Map the uid from a global kernel uid */
283 return map_id_up(&targ->uid_map, __kuid_val(kuid));
284}
285EXPORT_SYMBOL(from_kuid);
286
287/**
288 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
289 * @targ: The user namespace we want a uid in.
290 * @kuid: The kernel internal uid to start with.
291 *
292 * Map @kuid into the user-namespace specified by @targ and
293 * return the resulting uid.
294 *
295 * There is always a mapping into the initial user_namespace.
296 *
297 * Unlike from_kuid from_kuid_munged never fails and always
298 * returns a valid uid. This makes from_kuid_munged appropriate
299 * for use in syscalls like stat and getuid where failing the
300 * system call and failing to provide a valid uid are not an
301 * options.
302 *
303 * If @kuid has no mapping in @targ overflowuid is returned.
304 */
305uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
306{
307 uid_t uid;
308 uid = from_kuid(targ, kuid);
309
310 if (uid == (uid_t) -1)
311 uid = overflowuid;
312 return uid;
313}
314EXPORT_SYMBOL(from_kuid_munged);
315
316/**
317 * make_kgid - Map a user-namespace gid pair into a kgid.
318 * @ns: User namespace that the gid is in
Fabian Frederick68a9a432014-06-06 14:37:21 -0700319 * @gid: group identifier
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800320 *
321 * Maps a user-namespace gid pair into a kernel internal kgid,
322 * and returns that kgid.
323 *
324 * When there is no mapping defined for the user-namespace gid
325 * pair INVALID_GID is returned. Callers are expected to test
326 * for and handle INVALID_GID being returned. INVALID_GID may be
327 * tested for using gid_valid().
328 */
329kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
330{
331 /* Map the gid to a global kernel gid */
332 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
333}
334EXPORT_SYMBOL(make_kgid);
335
336/**
337 * from_kgid - Create a gid from a kgid user-namespace pair.
338 * @targ: The user namespace we want a gid in.
339 * @kgid: The kernel internal gid to start with.
340 *
341 * Map @kgid into the user-namespace specified by @targ and
342 * return the resulting gid.
343 *
344 * There is always a mapping into the initial user_namespace.
345 *
346 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
347 */
348gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
349{
350 /* Map the gid from a global kernel gid */
351 return map_id_up(&targ->gid_map, __kgid_val(kgid));
352}
353EXPORT_SYMBOL(from_kgid);
354
355/**
356 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
357 * @targ: The user namespace we want a gid in.
358 * @kgid: The kernel internal gid to start with.
359 *
360 * Map @kgid into the user-namespace specified by @targ and
361 * return the resulting gid.
362 *
363 * There is always a mapping into the initial user_namespace.
364 *
365 * Unlike from_kgid from_kgid_munged never fails and always
366 * returns a valid gid. This makes from_kgid_munged appropriate
367 * for use in syscalls like stat and getgid where failing the
368 * system call and failing to provide a valid gid are not options.
369 *
370 * If @kgid has no mapping in @targ overflowgid is returned.
371 */
372gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
373{
374 gid_t gid;
375 gid = from_kgid(targ, kgid);
376
377 if (gid == (gid_t) -1)
378 gid = overflowgid;
379 return gid;
380}
381EXPORT_SYMBOL(from_kgid_munged);
382
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700383/**
384 * make_kprojid - Map a user-namespace projid pair into a kprojid.
385 * @ns: User namespace that the projid is in
386 * @projid: Project identifier
387 *
388 * Maps a user-namespace uid pair into a kernel internal kuid,
389 * and returns that kuid.
390 *
391 * When there is no mapping defined for the user-namespace projid
392 * pair INVALID_PROJID is returned. Callers are expected to test
393 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
394 * may be tested for using projid_valid().
395 */
396kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
397{
398 /* Map the uid to a global kernel uid */
399 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
400}
401EXPORT_SYMBOL(make_kprojid);
402
403/**
404 * from_kprojid - Create a projid from a kprojid user-namespace pair.
405 * @targ: The user namespace we want a projid in.
406 * @kprojid: The kernel internal project identifier to start with.
407 *
408 * Map @kprojid into the user-namespace specified by @targ and
409 * return the resulting projid.
410 *
411 * There is always a mapping into the initial user_namespace.
412 *
413 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
414 */
415projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
416{
417 /* Map the uid from a global kernel uid */
418 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
419}
420EXPORT_SYMBOL(from_kprojid);
421
422/**
423 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
424 * @targ: The user namespace we want a projid in.
425 * @kprojid: The kernel internal projid to start with.
426 *
427 * Map @kprojid into the user-namespace specified by @targ and
428 * return the resulting projid.
429 *
430 * There is always a mapping into the initial user_namespace.
431 *
432 * Unlike from_kprojid from_kprojid_munged never fails and always
433 * returns a valid projid. This makes from_kprojid_munged
434 * appropriate for use in syscalls like stat and where
435 * failing the system call and failing to provide a valid projid are
436 * not an options.
437 *
438 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
439 */
440projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
441{
442 projid_t projid;
443 projid = from_kprojid(targ, kprojid);
444
445 if (projid == (projid_t) -1)
446 projid = OVERFLOW_PROJID;
447 return projid;
448}
449EXPORT_SYMBOL(from_kprojid_munged);
450
451
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800452static int uid_m_show(struct seq_file *seq, void *v)
453{
454 struct user_namespace *ns = seq->private;
455 struct uid_gid_extent *extent = v;
456 struct user_namespace *lower_ns;
457 uid_t lower;
458
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700459 lower_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800460 if ((lower_ns == ns) && lower_ns->parent)
461 lower_ns = lower_ns->parent;
462
463 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
464
465 seq_printf(seq, "%10u %10u %10u\n",
466 extent->first,
467 lower,
468 extent->count);
469
470 return 0;
471}
472
473static int gid_m_show(struct seq_file *seq, void *v)
474{
475 struct user_namespace *ns = seq->private;
476 struct uid_gid_extent *extent = v;
477 struct user_namespace *lower_ns;
478 gid_t lower;
479
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700480 lower_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800481 if ((lower_ns == ns) && lower_ns->parent)
482 lower_ns = lower_ns->parent;
483
484 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
485
486 seq_printf(seq, "%10u %10u %10u\n",
487 extent->first,
488 lower,
489 extent->count);
490
491 return 0;
492}
493
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700494static int projid_m_show(struct seq_file *seq, void *v)
495{
496 struct user_namespace *ns = seq->private;
497 struct uid_gid_extent *extent = v;
498 struct user_namespace *lower_ns;
499 projid_t lower;
500
501 lower_ns = seq_user_ns(seq);
502 if ((lower_ns == ns) && lower_ns->parent)
503 lower_ns = lower_ns->parent;
504
505 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
506
507 seq_printf(seq, "%10u %10u %10u\n",
508 extent->first,
509 lower,
510 extent->count);
511
512 return 0;
513}
514
Fabian Frederick68a9a432014-06-06 14:37:21 -0700515static void *m_start(struct seq_file *seq, loff_t *ppos,
516 struct uid_gid_map *map)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800517{
518 struct uid_gid_extent *extent = NULL;
519 loff_t pos = *ppos;
520
521 if (pos < map->nr_extents)
522 extent = &map->extent[pos];
523
524 return extent;
525}
526
527static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
528{
529 struct user_namespace *ns = seq->private;
530
531 return m_start(seq, ppos, &ns->uid_map);
532}
533
534static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
535{
536 struct user_namespace *ns = seq->private;
537
538 return m_start(seq, ppos, &ns->gid_map);
539}
540
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700541static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
542{
543 struct user_namespace *ns = seq->private;
544
545 return m_start(seq, ppos, &ns->projid_map);
546}
547
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800548static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
549{
550 (*pos)++;
551 return seq->op->start(seq, pos);
552}
553
554static void m_stop(struct seq_file *seq, void *v)
555{
556 return;
557}
558
Fabian Frederickccf94f12014-08-08 14:21:22 -0700559const struct seq_operations proc_uid_seq_operations = {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800560 .start = uid_m_start,
561 .stop = m_stop,
562 .next = m_next,
563 .show = uid_m_show,
564};
565
Fabian Frederickccf94f12014-08-08 14:21:22 -0700566const struct seq_operations proc_gid_seq_operations = {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800567 .start = gid_m_start,
568 .stop = m_stop,
569 .next = m_next,
570 .show = gid_m_show,
571};
572
Fabian Frederickccf94f12014-08-08 14:21:22 -0700573const struct seq_operations proc_projid_seq_operations = {
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700574 .start = projid_m_start,
575 .stop = m_stop,
576 .next = m_next,
577 .show = projid_m_show,
578};
579
Fabian Frederick68a9a432014-06-06 14:37:21 -0700580static bool mappings_overlap(struct uid_gid_map *new_map,
581 struct uid_gid_extent *extent)
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800582{
583 u32 upper_first, lower_first, upper_last, lower_last;
584 unsigned idx;
585
586 upper_first = extent->first;
587 lower_first = extent->lower_first;
588 upper_last = upper_first + extent->count - 1;
589 lower_last = lower_first + extent->count - 1;
590
591 for (idx = 0; idx < new_map->nr_extents; idx++) {
592 u32 prev_upper_first, prev_lower_first;
593 u32 prev_upper_last, prev_lower_last;
594 struct uid_gid_extent *prev;
595
596 prev = &new_map->extent[idx];
597
598 prev_upper_first = prev->first;
599 prev_lower_first = prev->lower_first;
600 prev_upper_last = prev_upper_first + prev->count - 1;
601 prev_lower_last = prev_lower_first + prev->count - 1;
602
603 /* Does the upper range intersect a previous extent? */
604 if ((prev_upper_first <= upper_last) &&
605 (prev_upper_last >= upper_first))
606 return true;
607
608 /* Does the lower range intersect a previous extent? */
609 if ((prev_lower_first <= lower_last) &&
610 (prev_lower_last >= lower_first))
611 return true;
612 }
613 return false;
614}
615
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800616static ssize_t map_write(struct file *file, const char __user *buf,
617 size_t count, loff_t *ppos,
618 int cap_setid,
619 struct uid_gid_map *map,
620 struct uid_gid_map *parent_map)
621{
622 struct seq_file *seq = file->private_data;
623 struct user_namespace *ns = seq->private;
624 struct uid_gid_map new_map;
625 unsigned idx;
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800626 struct uid_gid_extent *extent = NULL;
Al Viro70f6cbb2015-12-24 00:13:10 -0500627 char *kbuf = NULL, *pos, *next_line;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800628 ssize_t ret = -EINVAL;
629
630 /*
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600631 * The userns_state_mutex serializes all writes to any given map.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800632 *
633 * Any map is only ever written once.
634 *
635 * An id map fits within 1 cache line on most architectures.
636 *
637 * On read nothing needs to be done unless you are on an
638 * architecture with a crazy cache coherency model like alpha.
639 *
640 * There is a one time data dependency between reading the
641 * count of the extents and the values of the extents. The
642 * desired behavior is to see the values of the extents that
643 * were written before the count of the extents.
644 *
645 * To achieve this smp_wmb() is used on guarantee the write
Mikulas Patockae79323b2014-04-14 16:58:55 -0400646 * order and smp_rmb() is guaranteed that we don't have crazy
647 * architectures returning stale data.
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000648 */
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600649 mutex_lock(&userns_state_mutex);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800650
651 ret = -EPERM;
652 /* Only allow one successful write to the map */
653 if (map->nr_extents != 0)
654 goto out;
655
Andy Lutomirski41c21e32013-04-14 11:44:04 -0700656 /*
657 * Adjusting namespace settings requires capabilities on the target.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800658 */
Andy Lutomirski41c21e32013-04-14 11:44:04 -0700659 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800660 goto out;
661
Eric W. Biederman36476be2014-12-05 20:03:28 -0600662 /* Only allow < page size writes at the beginning of the file */
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800663 ret = -EINVAL;
664 if ((*ppos != 0) || (count >= PAGE_SIZE))
665 goto out;
666
667 /* Slurp in the user data */
Al Viro70f6cbb2015-12-24 00:13:10 -0500668 kbuf = memdup_user_nul(buf, count);
669 if (IS_ERR(kbuf)) {
670 ret = PTR_ERR(kbuf);
671 kbuf = NULL;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800672 goto out;
Al Viro70f6cbb2015-12-24 00:13:10 -0500673 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800674
675 /* Parse the user data */
676 ret = -EINVAL;
677 pos = kbuf;
678 new_map.nr_extents = 0;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700679 for (; pos; pos = next_line) {
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800680 extent = &new_map.extent[new_map.nr_extents];
681
682 /* Find the end of line and ensure I don't look past it */
683 next_line = strchr(pos, '\n');
684 if (next_line) {
685 *next_line = '\0';
686 next_line++;
687 if (*next_line == '\0')
688 next_line = NULL;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000689 }
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800690
691 pos = skip_spaces(pos);
692 extent->first = simple_strtoul(pos, &pos, 10);
693 if (!isspace(*pos))
694 goto out;
695
696 pos = skip_spaces(pos);
697 extent->lower_first = simple_strtoul(pos, &pos, 10);
698 if (!isspace(*pos))
699 goto out;
700
701 pos = skip_spaces(pos);
702 extent->count = simple_strtoul(pos, &pos, 10);
703 if (*pos && !isspace(*pos))
704 goto out;
705
706 /* Verify there is not trailing junk on the line */
707 pos = skip_spaces(pos);
708 if (*pos != '\0')
709 goto out;
710
711 /* Verify we have been given valid starting values */
712 if ((extent->first == (u32) -1) ||
Fabian Frederick68a9a432014-06-06 14:37:21 -0700713 (extent->lower_first == (u32) -1))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800714 goto out;
715
Fabian Frederick68a9a432014-06-06 14:37:21 -0700716 /* Verify count is not zero and does not cause the
717 * extent to wrap
718 */
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800719 if ((extent->first + extent->count) <= extent->first)
720 goto out;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700721 if ((extent->lower_first + extent->count) <=
722 extent->lower_first)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800723 goto out;
724
Eric W. Biederman0bd14b42012-12-27 22:27:29 -0800725 /* Do the ranges in extent overlap any previous extents? */
726 if (mappings_overlap(&new_map, extent))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800727 goto out;
728
729 new_map.nr_extents++;
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800730
731 /* Fail if the file contains too many extents */
732 if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
733 (next_line != NULL))
734 goto out;
735 }
736 /* Be very certaint the new map actually exists */
737 if (new_map.nr_extents == 0)
738 goto out;
739
740 ret = -EPERM;
741 /* Validate the user is allowed to use user id's mapped to. */
Eric W. Biederman67080752013-04-14 13:47:02 -0700742 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800743 goto out;
744
745 /* Map the lower ids from the parent user namespace to the
746 * kernel global id space.
747 */
748 for (idx = 0; idx < new_map.nr_extents; idx++) {
749 u32 lower_first;
750 extent = &new_map.extent[idx];
751
752 lower_first = map_id_range_down(parent_map,
753 extent->lower_first,
754 extent->count);
755
756 /* Fail if we can not map the specified extent to
757 * the kernel global id space.
758 */
759 if (lower_first == (u32) -1)
760 goto out;
761
762 extent->lower_first = lower_first;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000763 }
764
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800765 /* Install the map */
766 memcpy(map->extent, new_map.extent,
767 new_map.nr_extents*sizeof(new_map.extent[0]));
768 smp_wmb();
769 map->nr_extents = new_map.nr_extents;
770
771 *ppos = count;
772 ret = count;
773out:
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600774 mutex_unlock(&userns_state_mutex);
Al Viro70f6cbb2015-12-24 00:13:10 -0500775 kfree(kbuf);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800776 return ret;
777}
778
Fabian Frederick68a9a432014-06-06 14:37:21 -0700779ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
780 size_t size, loff_t *ppos)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800781{
782 struct seq_file *seq = file->private_data;
783 struct user_namespace *ns = seq->private;
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700784 struct user_namespace *seq_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800785
786 if (!ns->parent)
787 return -EPERM;
788
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700789 if ((seq_ns != ns) && (seq_ns != ns->parent))
790 return -EPERM;
791
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800792 return map_write(file, buf, size, ppos, CAP_SETUID,
793 &ns->uid_map, &ns->parent->uid_map);
794}
795
Fabian Frederick68a9a432014-06-06 14:37:21 -0700796ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
797 size_t size, loff_t *ppos)
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800798{
799 struct seq_file *seq = file->private_data;
800 struct user_namespace *ns = seq->private;
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700801 struct user_namespace *seq_ns = seq_user_ns(seq);
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800802
803 if (!ns->parent)
804 return -EPERM;
805
Eric W. Biedermanc450f372012-08-14 21:25:13 -0700806 if ((seq_ns != ns) && (seq_ns != ns->parent))
807 return -EPERM;
808
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800809 return map_write(file, buf, size, ppos, CAP_SETGID,
810 &ns->gid_map, &ns->parent->gid_map);
811}
812
Fabian Frederick68a9a432014-06-06 14:37:21 -0700813ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
814 size_t size, loff_t *ppos)
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700815{
816 struct seq_file *seq = file->private_data;
817 struct user_namespace *ns = seq->private;
818 struct user_namespace *seq_ns = seq_user_ns(seq);
819
820 if (!ns->parent)
821 return -EPERM;
822
823 if ((seq_ns != ns) && (seq_ns != ns->parent))
824 return -EPERM;
825
826 /* Anyone can set any valid project id no capability needed */
827 return map_write(file, buf, size, ppos, -1,
828 &ns->projid_map, &ns->parent->projid_map);
829}
830
Fabian Frederick68a9a432014-06-06 14:37:21 -0700831static bool new_idmap_permitted(const struct file *file,
Eric W. Biederman67080752013-04-14 13:47:02 -0700832 struct user_namespace *ns, int cap_setid,
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800833 struct uid_gid_map *new_map)
834{
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600835 const struct cred *cred = file->f_cred;
Eric W. Biederman0542f172014-12-05 17:51:47 -0600836 /* Don't allow mappings that would allow anything that wouldn't
837 * be allowed without the establishment of unprivileged mappings.
838 */
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600839 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
840 uid_eq(ns->owner, cred->euid)) {
Eric W. Biederman37657da2012-07-27 06:21:27 -0700841 u32 id = new_map->extent[0].lower_first;
842 if (cap_setid == CAP_SETUID) {
843 kuid_t uid = make_kuid(ns->parent, id);
Eric W. Biedermanf95d7912014-11-26 23:22:14 -0600844 if (uid_eq(uid, cred->euid))
Eric W. Biederman37657da2012-07-27 06:21:27 -0700845 return true;
Fabian Frederick68a9a432014-06-06 14:37:21 -0700846 } else if (cap_setid == CAP_SETGID) {
Eric W. Biederman37657da2012-07-27 06:21:27 -0700847 kgid_t gid = make_kgid(ns->parent, id);
Eric W. Biederman66d2f332014-12-05 19:36:04 -0600848 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
849 gid_eq(gid, cred->egid))
Eric W. Biederman37657da2012-07-27 06:21:27 -0700850 return true;
851 }
852 }
853
Eric W. Biedermanf76d2072012-08-30 01:24:05 -0700854 /* Allow anyone to set a mapping that doesn't require privilege */
855 if (!cap_valid(cap_setid))
856 return true;
857
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800858 /* Allow the specified ids if we have the appropriate capability
859 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
Eric W. Biederman67080752013-04-14 13:47:02 -0700860 * And the opener of the id file also had the approprpiate capability.
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800861 */
Eric W. Biederman67080752013-04-14 13:47:02 -0700862 if (ns_capable(ns->parent, cap_setid) &&
863 file_ns_capable(file, ns->parent, cap_setid))
Eric W. Biederman22d917d2011-11-17 00:11:58 -0800864 return true;
865
866 return false;
Eric W. Biederman5c1469d2010-06-13 03:28:03 +0000867}
Pavel Emelyanov61642812011-01-12 17:00:46 -0800868
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600869int proc_setgroups_show(struct seq_file *seq, void *v)
870{
871 struct user_namespace *ns = seq->private;
872 unsigned long userns_flags = ACCESS_ONCE(ns->flags);
873
874 seq_printf(seq, "%s\n",
875 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
876 "allow" : "deny");
877 return 0;
878}
879
880ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
881 size_t count, loff_t *ppos)
882{
883 struct seq_file *seq = file->private_data;
884 struct user_namespace *ns = seq->private;
885 char kbuf[8], *pos;
886 bool setgroups_allowed;
887 ssize_t ret;
888
889 /* Only allow a very narrow range of strings to be written */
890 ret = -EINVAL;
891 if ((*ppos != 0) || (count >= sizeof(kbuf)))
892 goto out;
893
894 /* What was written? */
895 ret = -EFAULT;
896 if (copy_from_user(kbuf, buf, count))
897 goto out;
898 kbuf[count] = '\0';
899 pos = kbuf;
900
901 /* What is being requested? */
902 ret = -EINVAL;
903 if (strncmp(pos, "allow", 5) == 0) {
904 pos += 5;
905 setgroups_allowed = true;
906 }
907 else if (strncmp(pos, "deny", 4) == 0) {
908 pos += 4;
909 setgroups_allowed = false;
910 }
911 else
912 goto out;
913
914 /* Verify there is not trailing junk on the line */
915 pos = skip_spaces(pos);
916 if (*pos != '\0')
917 goto out;
918
919 ret = -EPERM;
920 mutex_lock(&userns_state_mutex);
921 if (setgroups_allowed) {
922 /* Enabling setgroups after setgroups has been disabled
923 * is not allowed.
924 */
925 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
926 goto out_unlock;
927 } else {
928 /* Permanently disabling setgroups after setgroups has
929 * been enabled by writing the gid_map is not allowed.
930 */
931 if (ns->gid_map.nr_extents != 0)
932 goto out_unlock;
933 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
934 }
935 mutex_unlock(&userns_state_mutex);
936
937 /* Report a successful write */
938 *ppos = count;
939 ret = count;
940out:
941 return ret;
942out_unlock:
943 mutex_unlock(&userns_state_mutex);
944 goto out;
945}
946
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600947bool userns_may_setgroups(const struct user_namespace *ns)
948{
949 bool allowed;
950
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600951 mutex_lock(&userns_state_mutex);
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600952 /* It is not safe to use setgroups until a gid mapping in
953 * the user namespace has been established.
954 */
955 allowed = ns->gid_map.nr_extents != 0;
Eric W. Biederman9cc46512014-12-02 12:27:26 -0600956 /* Is setgroups allowed? */
957 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
Eric W. Biedermanf0d62ae2014-12-09 14:03:14 -0600958 mutex_unlock(&userns_state_mutex);
Eric W. Biederman273d2c62014-12-05 18:01:11 -0600959
960 return allowed;
961}
962
Seth Forsheed07b8462015-09-23 15:16:04 -0500963/*
964 * Returns true if @ns is the same namespace as or a descendant of
965 * @target_ns.
966 */
967bool current_in_userns(const struct user_namespace *target_ns)
968{
969 struct user_namespace *ns;
970 for (ns = current_user_ns(); ns; ns = ns->parent) {
971 if (ns == target_ns)
972 return true;
973 }
974 return false;
975}
976
Al Viro3c041182014-11-01 00:25:30 -0400977static inline struct user_namespace *to_user_ns(struct ns_common *ns)
978{
979 return container_of(ns, struct user_namespace, ns);
980}
981
Al Viro64964522014-11-01 00:37:32 -0400982static struct ns_common *userns_get(struct task_struct *task)
Eric W. Biedermancde19752012-07-26 06:24:06 -0700983{
984 struct user_namespace *user_ns;
985
986 rcu_read_lock();
987 user_ns = get_user_ns(__task_cred(task)->user_ns);
988 rcu_read_unlock();
989
Al Viro3c041182014-11-01 00:25:30 -0400990 return user_ns ? &user_ns->ns : NULL;
Eric W. Biedermancde19752012-07-26 06:24:06 -0700991}
992
Al Viro64964522014-11-01 00:37:32 -0400993static void userns_put(struct ns_common *ns)
Eric W. Biedermancde19752012-07-26 06:24:06 -0700994{
Al Viro3c041182014-11-01 00:25:30 -0400995 put_user_ns(to_user_ns(ns));
Eric W. Biedermancde19752012-07-26 06:24:06 -0700996}
997
Al Viro64964522014-11-01 00:37:32 -0400998static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
Eric W. Biedermancde19752012-07-26 06:24:06 -0700999{
Al Viro3c041182014-11-01 00:25:30 -04001000 struct user_namespace *user_ns = to_user_ns(ns);
Eric W. Biedermancde19752012-07-26 06:24:06 -07001001 struct cred *cred;
1002
1003 /* Don't allow gaining capabilities by reentering
1004 * the same user namespace.
1005 */
1006 if (user_ns == current_user_ns())
1007 return -EINVAL;
1008
Eric W. Biedermanfaf00da2015-08-10 18:25:44 -05001009 /* Tasks that share a thread group must share a user namespace */
1010 if (!thread_group_empty(current))
Eric W. Biedermancde19752012-07-26 06:24:06 -07001011 return -EINVAL;
1012
Eric W. Biedermane66eded2013-03-13 11:51:49 -07001013 if (current->fs->users != 1)
1014 return -EINVAL;
1015
Eric W. Biedermancde19752012-07-26 06:24:06 -07001016 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1017 return -EPERM;
1018
1019 cred = prepare_creds();
1020 if (!cred)
1021 return -ENOMEM;
1022
1023 put_user_ns(cred->user_ns);
1024 set_cred_user_ns(cred, get_user_ns(user_ns));
1025
1026 return commit_creds(cred);
1027}
1028
1029const struct proc_ns_operations userns_operations = {
1030 .name = "user",
1031 .type = CLONE_NEWUSER,
1032 .get = userns_get,
1033 .put = userns_put,
1034 .install = userns_install,
1035};
1036
Pavel Emelyanov61642812011-01-12 17:00:46 -08001037static __init int user_namespaces_init(void)
1038{
1039 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1040 return 0;
1041}
Paul Gortmakerc96d6662014-04-03 14:48:35 -07001042subsys_initcall(user_namespaces_init);