blob: bc1c48d35cb32726623b37b47a09d4325aaf4074 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
Ingo Molnar4021cb22006-01-25 15:23:07 +010016#include <linux/interrupt.h>
Cedric Le Goateracce2922007-07-15 23:40:59 -070017#include <linux/module.h>
18#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20/*
21 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends).
23 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define UIDHASH_MASK (UIDHASH_SZ - 1)
26#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
Cedric Le Goateracce2922007-07-15 23:40:59 -070027#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Christoph Lametere18b8902006-12-06 20:33:20 -080029static struct kmem_cache *uid_cachep;
Ingo Molnar4021cb22006-01-25 15:23:07 +010030
31/*
32 * The uidhash_lock is mostly taken from process context, but it is
33 * occasionally also taken from softirq/tasklet context, when
34 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
Andrew Morton3fa97c92006-01-31 16:34:26 -080035 * But free_uid() is also called with local interrupts disabled, and running
36 * local_bh_enable() with local interrupts disabled is an error - we'll run
37 * softirq callbacks, and they can unconditionally enable interrupts, and
38 * the caller of free_uid() didn't expect that..
Ingo Molnar4021cb22006-01-25 15:23:07 +010039 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070040static DEFINE_SPINLOCK(uidhash_lock);
41
42struct user_struct root_user = {
43 .__count = ATOMIC_INIT(1),
44 .processes = ATOMIC_INIT(1),
45 .files = ATOMIC_INIT(0),
46 .sigpending = ATOMIC_INIT(0),
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 .locked_shm = 0,
48#ifdef CONFIG_KEYS
49 .uid_keyring = &root_user_keyring,
50 .session_keyring = &root_session_keyring,
51#endif
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +020052#ifdef CONFIG_FAIR_USER_SCHED
Ingo Molnar4cf86d72007-10-15 17:00:14 +020053 .tg = &init_task_group,
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +020054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055};
56
Dhaval Giani5cb350b2007-10-15 17:00:14 +020057/*
58 * These routines must be called with the uidhash spinlock held!
59 */
Alexey Dobriyan40aeb402007-10-16 23:30:09 -070060static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020061{
62 hlist_add_head(&up->uidhash_node, hashent);
63}
64
Alexey Dobriyan40aeb402007-10-16 23:30:09 -070065static void uid_hash_remove(struct user_struct *up)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020066{
67 hlist_del_init(&up->uidhash_node);
68}
69
Alexey Dobriyan40aeb402007-10-16 23:30:09 -070070static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
Dhaval Giani5cb350b2007-10-15 17:00:14 +020071{
72 struct user_struct *user;
73 struct hlist_node *h;
74
75 hlist_for_each_entry(user, h, hashent, uidhash_node) {
76 if (user->uid == uid) {
77 atomic_inc(&user->__count);
78 return user;
79 }
80 }
81
82 return NULL;
83}
84
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +020085#ifdef CONFIG_FAIR_USER_SCHED
Dhaval Giani5cb350b2007-10-15 17:00:14 +020086
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +020087static void sched_destroy_user(struct user_struct *up)
88{
89 sched_destroy_group(up->tg);
90}
91
92static int sched_create_user(struct user_struct *up)
93{
94 int rc = 0;
95
96 up->tg = sched_create_group();
97 if (IS_ERR(up->tg))
98 rc = -ENOMEM;
99
100 return rc;
101}
102
103static void sched_switch_user(struct task_struct *p)
104{
105 sched_move_task(p);
106}
107
Dhaval Gianib1a8c172007-10-17 16:55:11 +0200108#else /* CONFIG_FAIR_USER_SCHED */
109
110static void sched_destroy_user(struct user_struct *up) { }
111static int sched_create_user(struct user_struct *up) { return 0; }
112static void sched_switch_user(struct task_struct *p) { }
113
114#endif /* CONFIG_FAIR_USER_SCHED */
115
116#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
117
Kay Sieverseb41d942007-11-02 13:47:53 +0100118static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
Dhaval Gianib1a8c172007-10-17 16:55:11 +0200119static DEFINE_MUTEX(uids_mutex);
120
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200121static inline void uids_mutex_lock(void)
122{
123 mutex_lock(&uids_mutex);
124}
125
126static inline void uids_mutex_unlock(void)
127{
128 mutex_unlock(&uids_mutex);
129}
130
Kay Sieverseb41d942007-11-02 13:47:53 +0100131/* uid directory attributes */
132static ssize_t cpu_shares_show(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 char *buf)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200135{
Kay Sieverseb41d942007-11-02 13:47:53 +0100136 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200137
Kay Sieverseb41d942007-11-02 13:47:53 +0100138 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200139}
140
Kay Sieverseb41d942007-11-02 13:47:53 +0100141static ssize_t cpu_shares_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t size)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200144{
Kay Sieverseb41d942007-11-02 13:47:53 +0100145 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200146 unsigned long shares;
147 int rc;
148
Kay Sieverseb41d942007-11-02 13:47:53 +0100149 sscanf(buf, "%lu", &shares);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200150
151 rc = sched_group_set_shares(up->tg, shares);
152
153 return (rc ? rc : size);
154}
155
Kay Sieverseb41d942007-11-02 13:47:53 +0100156static struct kobj_attribute cpu_share_attr =
157 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
158
159/* default attributes per uid directory */
160static struct attribute *uids_attributes[] = {
161 &cpu_share_attr.attr,
162 NULL
163};
164
165/* the lifetime of user_struct is not managed by the core (now) */
166static void uids_release(struct kobject *kobj)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200167{
Kay Sieverseb41d942007-11-02 13:47:53 +0100168 return;
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200169}
170
Kay Sieverseb41d942007-11-02 13:47:53 +0100171static struct kobj_type uids_ktype = {
172 .sysfs_ops = &kobj_sysfs_ops,
173 .default_attrs = uids_attributes,
174 .release = uids_release,
175};
176
177/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
178static int uids_user_create(struct user_struct *up)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200179{
Kay Sieverseb41d942007-11-02 13:47:53 +0100180 struct kobject *kobj = &up->kobj;
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200181 int error;
182
Kay Sieverseb41d942007-11-02 13:47:53 +0100183 memset(kobj, 0, sizeof(struct kobject));
Kay Sieverseb41d942007-11-02 13:47:53 +0100184 kobj->kset = uids_kset;
Greg Kroah-Hartmancf151262007-12-17 23:05:35 -0700185 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
186 if (error) {
187 kobject_put(kobj);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200188 goto done;
Greg Kroah-Hartmancf151262007-12-17 23:05:35 -0700189 }
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200190
Srivatsa Vaddagirifb7dde32007-10-15 17:00:18 +0200191 kobject_uevent(kobj, KOBJ_ADD);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200192done:
193 return error;
194}
195
Kay Sieverseb41d942007-11-02 13:47:53 +0100196/* create these entries in sysfs:
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200197 * "/sys/kernel/uids" directory
198 * "/sys/kernel/uids/0" directory (for root user)
199 * "/sys/kernel/uids/0/cpu_share" file (for root user)
200 */
Kay Sieverseb41d942007-11-02 13:47:53 +0100201int __init uids_sysfs_init(void)
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200202{
Greg Kroah-Hartman0ff21e42007-11-06 10:36:58 -0800203 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
Kay Sieverseb41d942007-11-02 13:47:53 +0100204 if (!uids_kset)
205 return -ENOMEM;
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200206
Kay Sieverseb41d942007-11-02 13:47:53 +0100207 return uids_user_create(&root_user);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200208}
209
210/* work function to remove sysfs directory for a user and free up
211 * corresponding structures.
212 */
213static void remove_user_sysfs_dir(struct work_struct *w)
214{
215 struct user_struct *up = container_of(w, struct user_struct, work);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200216 unsigned long flags;
217 int remove_user = 0;
218
219 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
220 * atomic.
221 */
222 uids_mutex_lock();
223
224 local_irq_save(flags);
225
226 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
227 uid_hash_remove(up);
228 remove_user = 1;
229 spin_unlock_irqrestore(&uidhash_lock, flags);
230 } else {
231 local_irq_restore(flags);
232 }
233
234 if (!remove_user)
235 goto done;
236
Kay Sieverseb41d942007-11-02 13:47:53 +0100237 kobject_uevent(&up->kobj, KOBJ_REMOVE);
238 kobject_del(&up->kobj);
239 kobject_put(&up->kobj);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200240
241 sched_destroy_user(up);
242 key_put(up->uid_keyring);
243 key_put(up->session_keyring);
244 kmem_cache_free(uid_cachep, up);
245
246done:
247 uids_mutex_unlock();
248}
249
250/* IRQs are disabled and uidhash_lock is held upon function entry.
251 * IRQ state (as stored in flags) is restored and uidhash_lock released
252 * upon function exit.
253 */
254static inline void free_user(struct user_struct *up, unsigned long flags)
255{
256 /* restore back the count */
257 atomic_inc(&up->__count);
258 spin_unlock_irqrestore(&uidhash_lock, flags);
259
260 INIT_WORK(&up->work, remove_user_sysfs_dir);
261 schedule_work(&up->work);
262}
263
Dhaval Gianib1a8c172007-10-17 16:55:11 +0200264#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200265
Kay Sieverseb41d942007-11-02 13:47:53 +0100266int uids_sysfs_init(void) { return 0; }
267static inline int uids_user_create(struct user_struct *up) { return 0; }
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200268static inline void uids_mutex_lock(void) { }
269static inline void uids_mutex_unlock(void) { }
270
271/* IRQs are disabled and uidhash_lock is held upon function entry.
272 * IRQ state (as stored in flags) is restored and uidhash_lock released
273 * upon function exit.
274 */
275static inline void free_user(struct user_struct *up, unsigned long flags)
276{
277 uid_hash_remove(up);
278 spin_unlock_irqrestore(&uidhash_lock, flags);
279 sched_destroy_user(up);
280 key_put(up->uid_keyring);
281 key_put(up->session_keyring);
282 kmem_cache_free(uid_cachep, up);
283}
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200284
Dhaval Gianib1a8c172007-10-17 16:55:11 +0200285#endif
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 * Locate the user_struct for the passed UID. If found, take a ref on it. The
289 * caller must undo that ref with free_uid().
290 *
291 * If the user_struct could not be found, return NULL.
292 */
293struct user_struct *find_user(uid_t uid)
294{
295 struct user_struct *ret;
Andrew Morton3fa97c92006-01-31 16:34:26 -0800296 unsigned long flags;
Cedric Le Goateracce2922007-07-15 23:40:59 -0700297 struct user_namespace *ns = current->nsproxy->user_ns;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Andrew Morton3fa97c92006-01-31 16:34:26 -0800299 spin_lock_irqsave(&uidhash_lock, flags);
Cedric Le Goateracce2922007-07-15 23:40:59 -0700300 ret = uid_hash_find(uid, uidhashentry(ns, uid));
Andrew Morton3fa97c92006-01-31 16:34:26 -0800301 spin_unlock_irqrestore(&uidhash_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 return ret;
303}
304
305void free_uid(struct user_struct *up)
306{
Andrew Morton3fa97c92006-01-31 16:34:26 -0800307 unsigned long flags;
308
Andrew Morton36f57412006-03-24 03:15:47 -0800309 if (!up)
310 return;
311
Andrew Morton3fa97c92006-01-31 16:34:26 -0800312 local_irq_save(flags);
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200313 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
314 free_user(up, flags);
315 else
Andrew Morton36f57412006-03-24 03:15:47 -0800316 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Cedric Le Goateracce2922007-07-15 23:40:59 -0700319struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Pavel Emelyanov735de222007-09-18 22:46:44 -0700321 struct hlist_head *hashent = uidhashentry(ns, uid);
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100322 struct user_struct *up, *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Kay Sieverseb41d942007-11-02 13:47:53 +0100324 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200325 * atomic.
326 */
327 uids_mutex_lock();
328
Andrew Morton3fa97c92006-01-31 16:34:26 -0800329 spin_lock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 up = uid_hash_find(uid, hashent);
Andrew Morton3fa97c92006-01-31 16:34:26 -0800331 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 if (!up) {
Christoph Lametere94b1762006-12-06 20:33:17 -0800334 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100335 if (!new)
336 goto out_unlock;
Pavel Emelyanov5e8869b2007-11-26 21:21:49 +0100337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 new->uid = uid;
339 atomic_set(&new->__count, 1);
340 atomic_set(&new->processes, 0);
341 atomic_set(&new->files, 0);
342 atomic_set(&new->sigpending, 0);
Amy Griffis2d9048e2006-06-01 13:10:59 -0700343#ifdef CONFIG_INOTIFY_USER
Robert Love0eeca282005-07-12 17:06:03 -0400344 atomic_set(&new->inotify_watches, 0);
345 atomic_set(&new->inotify_devs, 0);
346#endif
Alexey Dobriyan970a8642007-10-16 23:30:09 -0700347#ifdef CONFIG_POSIX_MQUEUE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 new->mq_bytes = 0;
Alexey Dobriyan970a8642007-10-16 23:30:09 -0700349#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 new->locked_shm = 0;
351
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100352 if (alloc_uid_keyring(new, current) < 0)
353 goto out_free_user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100355 if (sched_create_user(new) < 0)
356 goto out_put_keys;
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200357
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100358 if (uids_user_create(new))
359 goto out_destoy_sched;
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 /*
362 * Before adding this, check whether we raced
363 * on adding the same user already..
364 */
Andrew Morton3fa97c92006-01-31 16:34:26 -0800365 spin_lock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 up = uid_hash_find(uid, hashent);
367 if (up) {
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200368 /* This case is not possible when CONFIG_FAIR_USER_SCHED
369 * is defined, since we serialize alloc_uid() using
370 * uids_mutex. Hence no need to call
371 * sched_destroy_user() or remove_user_sysfs_dir().
372 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 key_put(new->uid_keyring);
374 key_put(new->session_keyring);
375 kmem_cache_free(uid_cachep, new);
376 } else {
377 uid_hash_insert(new, hashent);
378 up = new;
379 }
Andrew Morton3fa97c92006-01-31 16:34:26 -0800380 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 }
Dhaval Giani5cb350b2007-10-15 17:00:14 +0200383
384 uids_mutex_unlock();
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 return up;
Pavel Emelyanov8eb703e2008-01-25 21:08:26 +0100387
388out_destoy_sched:
389 sched_destroy_user(new);
390out_put_keys:
391 key_put(new->uid_keyring);
392 key_put(new->session_keyring);
393out_free_user:
394 kmem_cache_free(uid_cachep, new);
395out_unlock:
396 uids_mutex_unlock();
397 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
400void switch_uid(struct user_struct *new_user)
401{
402 struct user_struct *old_user;
403
404 /* What if a process setreuid()'s and this brings the
405 * new uid over his NPROC rlimit? We can check this now
406 * cheaply with the new uid cache, so if it matters
407 * we should be checking for it. -DaveM
408 */
409 old_user = current->user;
410 atomic_inc(&new_user->processes);
411 atomic_dec(&old_user->processes);
412 switch_uid_keyring(new_user);
413 current->user = new_user;
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200414 sched_switch_user(current);
Linus Torvalds45c18b02006-11-04 10:06:02 -0800415
416 /*
417 * We need to synchronize with __sigqueue_alloc()
418 * doing a get_uid(p->user).. If that saw the old
419 * user value, we need to wait until it has exited
420 * its critical region before we can free the old
421 * structure.
422 */
423 smp_mb();
424 spin_unlock_wait(&current->sighand->siglock);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 free_uid(old_user);
427 suid_keys(current);
428}
429
Pavel Emelyanov28f300d2007-09-18 22:46:45 -0700430void release_uids(struct user_namespace *ns)
431{
432 int i;
433 unsigned long flags;
434 struct hlist_head *head;
435 struct hlist_node *nd;
436
437 spin_lock_irqsave(&uidhash_lock, flags);
438 /*
439 * collapse the chains so that the user_struct-s will
440 * be still alive, but not in hashes. subsequent free_uid()
441 * will free them.
442 */
443 for (i = 0; i < UIDHASH_SZ; i++) {
444 head = ns->uidhash_table + i;
445 while (!hlist_empty(head)) {
446 nd = head->first;
447 hlist_del_init(nd);
448 }
449 }
450 spin_unlock_irqrestore(&uidhash_lock, flags);
451
452 free_uid(ns->root_user);
453}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455static int __init uid_cache_init(void)
456{
457 int n;
458
459 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
Paul Mundt20c2df82007-07-20 10:11:58 +0900460 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 for(n = 0; n < UIDHASH_SZ; ++n)
Pavel Emelyanov735de222007-09-18 22:46:44 -0700463 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 /* Insert the root user immediately (init already runs as root) */
Andrew Morton3fa97c92006-01-31 16:34:26 -0800466 spin_lock_irq(&uidhash_lock);
Cedric Le Goateracce2922007-07-15 23:40:59 -0700467 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
Andrew Morton3fa97c92006-01-31 16:34:26 -0800468 spin_unlock_irq(&uidhash_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 return 0;
471}
472
473module_init(uid_cache_init);