blob: 3c3e5223e7e50f2c376a6a2abc9979bb598a350e [file] [log] [blame]
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -08001/*
2 * linux/ipc/namespace.c
3 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
4 */
5
6#include <linux/ipc.h>
7#include <linux/msg.h>
8#include <linux/ipc_namespace.h>
9#include <linux/rcupdate.h>
10#include <linux/nsproxy.h>
11#include <linux/slab.h>
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -070012#include <linux/fs.h>
13#include <linux/mount.h>
Serge E. Hallynb5154982011-03-23 16:43:23 -070014#include <linux/user_namespace.h>
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080015
16#include "util.h"
17
Serge E. Hallynb0e77592011-03-23 16:43:24 -070018static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
19 struct ipc_namespace *old_ns)
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080020{
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080021 struct ipc_namespace *ns;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -070022 int err;
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080023
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080024 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
25 if (ns == NULL)
Pierre Peiffered2ddbf2008-02-08 04:18:57 -080026 return ERR_PTR(-ENOMEM);
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080027
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -070028 atomic_set(&ns->count, 1);
29 err = mq_init_ns(ns);
30 if (err) {
31 kfree(ns);
32 return ERR_PTR(err);
33 }
Nadia Derbey4d89dc62008-04-29 01:00:40 -070034 atomic_inc(&nr_ipc_ns);
35
Pierre Peiffered2ddbf2008-02-08 04:18:57 -080036 sem_init_ns(ns);
37 msg_init_ns(ns);
38 shm_init_ns(ns);
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080039
Nadia Derbeye2c284d2008-04-29 01:00:44 -070040 /*
41 * msgmni has already been computed for the new ipc ns.
42 * Thus, do the ipcns creation notification before registering that
43 * new ipcns in the chain.
44 */
45 ipcns_notify(IPCNS_CREATED);
Nadia Derbeyb6b337a2008-04-29 01:00:42 -070046 register_ipcns_notifier(ns);
47
Serge E. Hallynb0e77592011-03-23 16:43:24 -070048 ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns);
Serge E. Hallynb5154982011-03-23 16:43:23 -070049
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080050 return ns;
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080051}
52
Serge E. Hallynb0e77592011-03-23 16:43:24 -070053struct ipc_namespace *copy_ipcs(unsigned long flags,
54 struct task_struct *tsk)
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080055{
Serge E. Hallynb0e77592011-03-23 16:43:24 -070056 struct ipc_namespace *ns = tsk->nsproxy->ipc_ns;
57
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080058 if (!(flags & CLONE_NEWIPC))
Alexey Dobriyan64424282009-06-17 16:27:54 -070059 return get_ipc_ns(ns);
Serge E. Hallynb0e77592011-03-23 16:43:24 -070060 return create_ipc_ns(tsk, ns);
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080061}
62
Pierre Peiffer01b8b072008-02-08 04:18:57 -080063/*
64 * free_ipcs - free all ipcs of one type
65 * @ns: the namespace to remove the ipcs from
66 * @ids: the table of ipcs to free
67 * @free: the function called to free each individual ipc
68 *
69 * Called for each kind of ipc when an ipc_namespace exits.
70 */
71void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
72 void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
73{
74 struct kern_ipc_perm *perm;
75 int next_id;
76 int total, in_use;
77
78 down_write(&ids->rw_mutex);
79
80 in_use = ids->in_use;
81
82 for (total = 0, next_id = 0; total < in_use; next_id++) {
83 perm = idr_find(&ids->ipcs_idr, next_id);
84 if (perm == NULL)
85 continue;
86 ipc_lock_by_ptr(perm);
87 free(ns, perm);
88 total++;
89 }
90 up_write(&ids->rw_mutex);
91}
92
Alexey Dobriyanb4188de2009-06-17 16:27:56 -070093static void free_ipc_ns(struct ipc_namespace *ns)
94{
95 /*
96 * Unregistering the hotplug notifier at the beginning guarantees
97 * that the ipc namespace won't be freed while we are inside the
98 * callback routine. Since the blocking_notifier_chain_XXX routines
99 * hold a rw lock on the notifier list, unregister_ipcns_notifier()
100 * won't take the rw lock before blocking_notifier_call_chain() has
101 * released the rd lock.
102 */
103 unregister_ipcns_notifier(ns);
104 sem_exit_ns(ns);
105 msg_exit_ns(ns);
106 shm_exit_ns(ns);
107 kfree(ns);
108 atomic_dec(&nr_ipc_ns);
109
110 /*
111 * Do the ipcns removal notification after decrementing nr_ipc_ns in
112 * order to have a correct value when recomputing msgmni.
113 */
114 ipcns_notify(IPCNS_REMOVED);
Serge E. Hallynb5154982011-03-23 16:43:23 -0700115 put_user_ns(ns->user_ns);
Alexey Dobriyanb4188de2009-06-17 16:27:56 -0700116}
117
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700118/*
119 * put_ipc_ns - drop a reference to an ipc namespace.
120 * @ns: the namespace to put
121 *
122 * If this is the last task in the namespace exiting, and
123 * it is dropping the refcount to 0, then it can race with
124 * a task in another ipc namespace but in a mounts namespace
125 * which has this ipcns's mqueuefs mounted, doing some action
126 * with one of the mqueuefs files. That can raise the refcount.
127 * So dropping the refcount, and raising the refcount when
128 * accessing it through the VFS, are protected with mq_lock.
129 *
130 * (Clearly, a task raising the refcount on its own ipc_ns
131 * needn't take mq_lock since it can't race with the last task
132 * in the ipcns exiting).
133 */
134void put_ipc_ns(struct ipc_namespace *ns)
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -0800135{
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700136 if (atomic_dec_and_lock(&ns->count, &mq_lock)) {
137 mq_clear_sbinfo(ns);
138 spin_unlock(&mq_lock);
139 mq_put_mnt(ns);
140 free_ipc_ns(ns);
141 }
142}