blob: f9e420e36699a2ec81e74b202cc46efb021e516f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
Ingo Molnar4021cb22006-01-25 15:23:07 +010016#include <linux/interrupt.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040017#include <linux/export.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070018#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Serge E. Hallyn59607db2011-03-23 16:43:16 -070020/*
21 * userns count is 1 for root user, 1 for init_uts_ns,
22 * and 1 for... ?
23 */
Pavel Emelyanovaee16ce2008-02-08 04:18:23 -080024struct user_namespace init_user_ns = {
Eric W. Biederman22d917d2011-11-17 00:11:58 -080025 .uid_map = {
26 .nr_extents = 1,
27 .extent[0] = {
28 .first = 0,
29 .lower_first = 0,
30 .count = 4294967295,
31 },
32 },
33 .gid_map = {
34 .nr_extents = 1,
35 .extent[0] = {
36 .first = 0,
37 .lower_first = 0,
38 .count = 4294967295,
39 },
40 },
Pavel Emelyanovaee16ce2008-02-08 04:18:23 -080041 .kref = {
Serge E. Hallyn59607db2011-03-23 16:43:16 -070042 .refcount = ATOMIC_INIT(3),
Pavel Emelyanovaee16ce2008-02-08 04:18:23 -080043 },
Eric W. Biederman783291e2011-11-17 01:32:59 -080044 .owner = GLOBAL_ROOT_UID,
45 .group = GLOBAL_ROOT_GID,
Pavel Emelyanovaee16ce2008-02-08 04:18:23 -080046};
47EXPORT_SYMBOL_GPL(init_user_ns);
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/*
50 * UID task count cache, to get fast user lookup in "alloc_uid"
51 * when changing user ID's (ie setuid() and friends).
52 */
53
Eric W. Biederman7b44ab92011-11-16 23:20:58 -080054#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
55#define UIDHASH_SZ (1 << UIDHASH_BITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#define UIDHASH_MASK (UIDHASH_SZ - 1)
57#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
Eric W. Biederman7b44ab92011-11-16 23:20:58 -080058#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Christoph Lametere18b8902006-12-06 20:33:20 -080060static struct kmem_cache *uid_cachep;
Eric W. Biederman7b44ab92011-11-16 23:20:58 -080061struct hlist_head uidhash_table[UIDHASH_SZ];
Ingo Molnar4021cb22006-01-25 15:23:07 +010062
63/*
64 * The uidhash_lock is mostly taken from process context, but it is
65 * occasionally also taken from softirq/tasklet context, when
66 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
Andrew Morton3fa97c92006-01-31 16:34:26 -080067 * But free_uid() is also called with local interrupts disabled, and running
68 * local_bh_enable() with local interrupts disabled is an error - we'll run
69 * softirq callbacks, and they can unconditionally enable interrupts, and
70 * the caller of free_uid() didn't expect that..
Ingo Molnar4021cb22006-01-25 15:23:07 +010071 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072static DEFINE_SPINLOCK(uidhash_lock);
73
Eric W. Biederman783291e2011-11-17 01:32:59 -080074/* root_user.__count is 1, for init task cred */
Linus Torvalds1da177e2005-04-16 15:20:36 -070075struct user_struct root_user = {
Eric W. Biederman783291e2011-11-17 01:32:59 -080076 .__count = ATOMIC_INIT(1),
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 .processes = ATOMIC_INIT(1),
78 .files = ATOMIC_INIT(0),
79 .sigpending = ATOMIC_INIT(0),
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 .locked_shm = 0,
Eric W. Biederman7b44ab92011-11-16 23:20:58 -080081 .uid = GLOBAL_ROOT_UID,
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Dhaval Giani5cb350b2007-10-15 17:00:14 +020084/*
85 * These routines must be called with the uidhash spinlock held!
86 */
Alexey Dobriyan40aeb402007-10-16 23:30:09 -070087static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020088{
89 hlist_add_head(&up->uidhash_node, hashent);
90}
91
Alexey Dobriyan40aeb402007-10-16 23:30:09 -070092static void uid_hash_remove(struct user_struct *up)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020093{
94 hlist_del_init(&up->uidhash_node);
95}
96
Eric W. Biederman7b44ab92011-11-16 23:20:58 -080097static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
Kay Sievers39592142009-03-24 15:43:30 +010098{
99 struct user_struct *user;
100 struct hlist_node *h;
101
102 hlist_for_each_entry(user, h, hashent, uidhash_node) {
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800103 if (uid_eq(user->uid, uid)) {
Kay Sievers39592142009-03-24 15:43:30 +0100104 atomic_inc(&user->__count);
105 return user;
106 }
107 }
108
109 return NULL;
110}
111
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200112/* IRQs are disabled and uidhash_lock is held upon function entry.
113 * IRQ state (as stored in flags) is restored and uidhash_lock released
114 * upon function exit.
115 */
Serge Hallyn18b6e042008-10-15 16:38:45 -0500116static void free_user(struct user_struct *up, unsigned long flags)
Namhyung Kim571428b2010-10-26 14:22:43 -0700117 __releases(&uidhash_lock)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200118{
119 uid_hash_remove(up);
120 spin_unlock_irqrestore(&uidhash_lock, flags);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200121 key_put(up->uid_keyring);
122 key_put(up->session_keyring);
123 kmem_cache_free(uid_cachep, up);
124}
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 * Locate the user_struct for the passed UID. If found, take a ref on it. The
128 * caller must undo that ref with free_uid().
129 *
130 * If the user_struct could not be found, return NULL.
131 */
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800132struct user_struct *find_user(kuid_t uid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct user_struct *ret;
Andrew Morton3fa97c92006-01-31 16:34:26 -0800135 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Andrew Morton3fa97c92006-01-31 16:34:26 -0800137 spin_lock_irqsave(&uidhash_lock, flags);
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800138 ret = uid_hash_find(uid, uidhashentry(uid));
Andrew Morton3fa97c92006-01-31 16:34:26 -0800139 spin_unlock_irqrestore(&uidhash_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 return ret;
141}
142
143void free_uid(struct user_struct *up)
144{
Andrew Morton3fa97c92006-01-31 16:34:26 -0800145 unsigned long flags;
146
Andrew Morton36f57412006-03-24 03:15:47 -0800147 if (!up)
148 return;
149
Andrew Morton3fa97c92006-01-31 16:34:26 -0800150 local_irq_save(flags);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200151 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
152 free_user(up, flags);
153 else
Andrew Morton36f57412006-03-24 03:15:47 -0800154 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800157struct user_struct *alloc_uid(kuid_t uid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800159 struct hlist_head *hashent = uidhashentry(uid);
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100160 struct user_struct *up, *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Andrew Morton3fa97c92006-01-31 16:34:26 -0800162 spin_lock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 up = uid_hash_find(uid, hashent);
Andrew Morton3fa97c92006-01-31 16:34:26 -0800164 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 if (!up) {
Andrew Morton354a1f42008-04-30 00:54:54 -0700167 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100168 if (!new)
169 goto out_unlock;
Pavel Emelyanov5e8869b2007-11-26 21:21:49 +0100170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 new->uid = uid;
172 atomic_set(&new->__count, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174 /*
175 * Before adding this, check whether we raced
176 * on adding the same user already..
177 */
Andrew Morton3fa97c92006-01-31 16:34:26 -0800178 spin_lock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 up = uid_hash_find(uid, hashent);
180 if (up) {
181 key_put(new->uid_keyring);
182 key_put(new->session_keyring);
183 kmem_cache_free(uid_cachep, new);
184 } else {
185 uid_hash_insert(new, hashent);
186 up = new;
187 }
Andrew Morton3fa97c92006-01-31 16:34:26 -0800188 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return up;
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100192
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100193out_unlock:
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100194 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197static int __init uid_cache_init(void)
198{
199 int n;
200
201 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
Paul Mundt20c2df82007-07-20 10:11:58 +0900202 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 for(n = 0; n < UIDHASH_SZ; ++n)
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800205 INIT_HLIST_HEAD(uidhash_table + n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* Insert the root user immediately (init already runs as root) */
Andrew Morton3fa97c92006-01-31 16:34:26 -0800208 spin_lock_irq(&uidhash_lock);
Eric W. Biederman7b44ab92011-11-16 23:20:58 -0800209 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
Andrew Morton3fa97c92006-01-31 16:34:26 -0800210 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 return 0;
213}
214
215module_init(uid_cache_init);