Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic pidhash and scalable, time-bounded PID allocator |
| 3 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 4 | * (C) 2002-2003 Nadia Yvette Chambers, IBM |
| 5 | * (C) 2004 Nadia Yvette Chambers, Oracle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * (C) 2002-2004 Ingo Molnar, Red Hat |
| 7 | * |
| 8 | * pid-structures are backing objects for tasks sharing a given ID to chain |
| 9 | * against. There is very little to them aside from hashing them and |
| 10 | * parking tasks using given ID's on a list. |
| 11 | * |
| 12 | * The hash is always changed with the tasklist_lock write-acquired, |
| 13 | * and the hash is only accessed with the tasklist_lock at least |
| 14 | * read-acquired, so there's no additional SMP locking needed here. |
| 15 | * |
| 16 | * We have a list of bitmap pages, which bitmaps represent the PID space. |
| 17 | * Allocating and freeing PIDs is completely lockless. The worst-case |
| 18 | * allocation scenario when all but one out of 1 million PIDs possible are |
| 19 | * allocated already: the scanning of 32 list entries and at most PAGE_SIZE |
| 20 | * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). |
Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 21 | * |
| 22 | * Pid namespaces: |
| 23 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 24 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 25 | * Many thanks to Oleg Nesterov for comments and help |
| 26 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #include <linux/mm.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/slab.h> |
| 32 | #include <linux/init.h> |
Franck Bui-Huu | 8252474 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 33 | #include <linux/rculist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/bootmem.h> |
| 35 | #include <linux/hash.h> |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 36 | #include <linux/pid_namespace.h> |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 37 | #include <linux/init_task.h> |
Sukadev Bhattiprolu | 3eb07c8 | 2007-10-18 23:40:13 -0700 | [diff] [blame] | 38 | #include <linux/syscalls.h> |
David Howells | 0bb80f2 | 2013-04-12 01:50:06 +0100 | [diff] [blame] | 39 | #include <linux/proc_ns.h> |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 40 | #include <linux/proc_fs.h> |
Christian Brauner | 5016e77 | 2019-05-24 12:43:51 +0200 | [diff] [blame] | 41 | #include <linux/anon_inodes.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 43 | #define pid_hashfn(nr, ns) \ |
| 44 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 45 | static struct hlist_head *pid_hash; |
Jan Beulich | 2c85f51 | 2009-09-21 17:03:07 -0700 | [diff] [blame] | 46 | static unsigned int pidhash_shift = 4; |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 47 | struct pid init_struct_pid = INIT_STRUCT_PID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
| 49 | int pid_max = PID_MAX_DEFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | #define RESERVED_PIDS 300 |
| 52 | |
| 53 | int pid_max_min = RESERVED_PIDS + 1; |
| 54 | int pid_max_max = PID_MAX_LIMIT; |
| 55 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 56 | static inline int mk_pid(struct pid_namespace *pid_ns, |
| 57 | struct pidmap *map, int off) |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 58 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 59 | return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #define find_next_offset(map, off) \ |
| 63 | find_next_zero_bit((map)->page, BITS_PER_PAGE, off) |
| 64 | |
| 65 | /* |
| 66 | * PID-map pages start out as NULL, they get allocated upon |
| 67 | * first use and are never deallocated. This way a low pid_max |
| 68 | * value does not cause lots of bitmaps to be allocated, but |
| 69 | * the scheme scales to up to 4 million PIDs, runtime. |
| 70 | */ |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 71 | struct pid_namespace init_pid_ns = { |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 72 | .kref = { |
| 73 | .refcount = ATOMIC_INIT(2), |
| 74 | }, |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 75 | .pidmap = { |
| 76 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } |
| 77 | }, |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 78 | .last_pid = 0, |
Raphael S. Carvalho | 8f75af4 | 2013-07-03 15:09:02 -0700 | [diff] [blame] | 79 | .nr_hashed = PIDNS_HASH_ADDING, |
Pavel Emelyanov | faacbfd | 2007-10-18 23:40:04 -0700 | [diff] [blame] | 80 | .level = 0, |
| 81 | .child_reaper = &init_task, |
Eric W. Biederman | 49f4d8b | 2012-08-02 04:25:10 -0700 | [diff] [blame] | 82 | .user_ns = &init_user_ns, |
Al Viro | 435d5f4 | 2014-10-31 22:56:04 -0400 | [diff] [blame] | 83 | .ns.inum = PROC_PID_INIT_INO, |
Al Viro | 33c4294 | 2014-11-01 02:32:53 -0400 | [diff] [blame] | 84 | #ifdef CONFIG_PID_NS |
| 85 | .ns.ops = &pidns_operations, |
| 86 | #endif |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 87 | }; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 88 | EXPORT_SYMBOL_GPL(init_pid_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 90 | /* |
| 91 | * Note: disable interrupts while the pidmap_lock is held as an |
| 92 | * interrupt might come in and do read_lock(&tasklist_lock). |
| 93 | * |
| 94 | * If we don't disable interrupts there is a nasty deadlock between |
| 95 | * detach_pid()->free_pid() and another cpu that does |
| 96 | * spin_lock(&pidmap_lock) followed by an interrupt routine that does |
| 97 | * read_lock(&tasklist_lock); |
| 98 | * |
| 99 | * After we clean up the tasklist_lock and know there are no |
| 100 | * irq handlers that take it we can leave the interrupts enabled. |
| 101 | * For now it is easier to be safe than to prove it can't happen. |
| 102 | */ |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); |
| 105 | |
Oleg Nesterov | b7127aa | 2008-04-30 00:54:22 -0700 | [diff] [blame] | 106 | static void free_pidmap(struct upid *upid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Oleg Nesterov | b7127aa | 2008-04-30 00:54:22 -0700 | [diff] [blame] | 108 | int nr = upid->nr; |
| 109 | struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; |
| 110 | int offset = nr & BITS_PER_PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
| 112 | clear_bit(offset, map->page); |
| 113 | atomic_inc(&map->nr_free); |
| 114 | } |
| 115 | |
Salman | 5fdee8c | 2010-08-10 18:03:16 -0700 | [diff] [blame] | 116 | /* |
| 117 | * If we started walking pids at 'base', is 'a' seen before 'b'? |
| 118 | */ |
| 119 | static int pid_before(int base, int a, int b) |
| 120 | { |
| 121 | /* |
| 122 | * This is the same as saying |
| 123 | * |
| 124 | * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT |
| 125 | * and that mapping orders 'a' and 'b' with respect to 'base'. |
| 126 | */ |
| 127 | return (unsigned)(a - base) < (unsigned)(b - base); |
| 128 | } |
| 129 | |
| 130 | /* |
Pavel Emelyanov | b8f566b | 2012-01-12 17:20:27 -0800 | [diff] [blame] | 131 | * We might be racing with someone else trying to set pid_ns->last_pid |
| 132 | * at the pid allocation time (there's also a sysctl for this, but racing |
| 133 | * with this one is OK, see comment in kernel/pid_namespace.c about it). |
Salman | 5fdee8c | 2010-08-10 18:03:16 -0700 | [diff] [blame] | 134 | * We want the winner to have the "later" value, because if the |
| 135 | * "earlier" value prevails, then a pid may get reused immediately. |
| 136 | * |
| 137 | * Since pids rollover, it is not sufficient to just pick the bigger |
| 138 | * value. We have to consider where we started counting from. |
| 139 | * |
| 140 | * 'base' is the value of pid_ns->last_pid that we observed when |
| 141 | * we started looking for a pid. |
| 142 | * |
| 143 | * 'pid' is the pid that we eventually found. |
| 144 | */ |
| 145 | static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) |
| 146 | { |
| 147 | int prev; |
| 148 | int last_write = base; |
| 149 | do { |
| 150 | prev = last_write; |
| 151 | last_write = cmpxchg(&pid_ns->last_pid, prev, pid); |
| 152 | } while ((prev != last_write) && (pid_before(base, last_write, pid))); |
| 153 | } |
| 154 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 155 | static int alloc_pidmap(struct pid_namespace *pid_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 157 | int i, offset, max_scan, pid, last = pid_ns->last_pid; |
Sukadev Bhattiprolu | 6a1f3b8 | 2006-10-02 02:17:20 -0700 | [diff] [blame] | 158 | struct pidmap *map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | |
| 160 | pid = last + 1; |
| 161 | if (pid >= pid_max) |
| 162 | pid = RESERVED_PIDS; |
| 163 | offset = pid & BITS_PER_PAGE_MASK; |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 164 | map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; |
Oleg Nesterov | c52b0b9 | 2010-08-10 18:03:17 -0700 | [diff] [blame] | 165 | /* |
| 166 | * If last_pid points into the middle of the map->page we |
| 167 | * want to scan this bitmap block twice, the second time |
| 168 | * we start with offset == 0 (or RESERVED_PIDS). |
| 169 | */ |
| 170 | max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | for (i = 0; i <= max_scan; ++i) { |
| 172 | if (unlikely(!map->page)) { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 173 | void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | /* |
| 175 | * Free the page if someone raced with us |
| 176 | * installing it: |
| 177 | */ |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 178 | spin_lock_irq(&pidmap_lock); |
André Goddard Rosa | 7be6d99 | 2009-12-15 16:47:39 -0800 | [diff] [blame] | 179 | if (!map->page) { |
Sukadev Bhattiprolu | 3fbc964 | 2006-10-02 02:17:24 -0700 | [diff] [blame] | 180 | map->page = page; |
André Goddard Rosa | 7be6d99 | 2009-12-15 16:47:39 -0800 | [diff] [blame] | 181 | page = NULL; |
| 182 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 183 | spin_unlock_irq(&pidmap_lock); |
André Goddard Rosa | 7be6d99 | 2009-12-15 16:47:39 -0800 | [diff] [blame] | 184 | kfree(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | if (unlikely(!map->page)) |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 186 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } |
| 188 | if (likely(atomic_read(&map->nr_free))) { |
Raphael S. Carvalho | 8db049b | 2013-04-30 15:28:26 -0700 | [diff] [blame] | 189 | for ( ; ; ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | if (!test_and_set_bit(offset, map->page)) { |
| 191 | atomic_dec(&map->nr_free); |
Salman | 5fdee8c | 2010-08-10 18:03:16 -0700 | [diff] [blame] | 192 | set_last_pid(pid_ns, last, pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | return pid; |
| 194 | } |
| 195 | offset = find_next_offset(map, offset); |
Raphael S. Carvalho | 8db049b | 2013-04-30 15:28:26 -0700 | [diff] [blame] | 196 | if (offset >= BITS_PER_PAGE) |
| 197 | break; |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 198 | pid = mk_pid(pid_ns, map, offset); |
Raphael S. Carvalho | 8db049b | 2013-04-30 15:28:26 -0700 | [diff] [blame] | 199 | if (pid >= pid_max) |
| 200 | break; |
| 201 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | } |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 203 | if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | ++map; |
| 205 | offset = 0; |
| 206 | } else { |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 207 | map = &pid_ns->pidmap[0]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | offset = RESERVED_PIDS; |
| 209 | if (unlikely(last == offset)) |
| 210 | break; |
| 211 | } |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 212 | pid = mk_pid(pid_ns, map, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 214 | return -EAGAIN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Linus Torvalds | c78193e | 2011-04-18 10:35:30 -0700 | [diff] [blame] | 217 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 218 | { |
| 219 | int offset; |
Eric W. Biederman | f40f50d | 2006-10-02 02:17:25 -0700 | [diff] [blame] | 220 | struct pidmap *map, *end; |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 221 | |
Linus Torvalds | c78193e | 2011-04-18 10:35:30 -0700 | [diff] [blame] | 222 | if (last >= PID_MAX_LIMIT) |
| 223 | return -1; |
| 224 | |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 225 | offset = (last + 1) & BITS_PER_PAGE_MASK; |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 226 | map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; |
| 227 | end = &pid_ns->pidmap[PIDMAP_ENTRIES]; |
Eric W. Biederman | f40f50d | 2006-10-02 02:17:25 -0700 | [diff] [blame] | 228 | for (; map < end; map++, offset = 0) { |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 229 | if (unlikely(!map->page)) |
| 230 | continue; |
| 231 | offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); |
| 232 | if (offset < BITS_PER_PAGE) |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 233 | return mk_pid(pid_ns, map, offset); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 234 | } |
| 235 | return -1; |
| 236 | } |
| 237 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 238 | void put_pid(struct pid *pid) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 239 | { |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 240 | struct pid_namespace *ns; |
| 241 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 242 | if (!pid) |
| 243 | return; |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 244 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 245 | ns = pid->numbers[pid->level].ns; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 246 | if ((atomic_read(&pid->count) == 1) || |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 247 | atomic_dec_and_test(&pid->count)) { |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 248 | kmem_cache_free(ns->pid_cachep, pid); |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 249 | put_pid_ns(ns); |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 250 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 251 | } |
Eric W. Biederman | bbf7314 | 2006-10-02 02:17:11 -0700 | [diff] [blame] | 252 | EXPORT_SYMBOL_GPL(put_pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 253 | |
| 254 | static void delayed_put_pid(struct rcu_head *rhp) |
| 255 | { |
| 256 | struct pid *pid = container_of(rhp, struct pid, rcu); |
| 257 | put_pid(pid); |
| 258 | } |
| 259 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 260 | void free_pid(struct pid *pid) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 261 | { |
| 262 | /* We can be called with write_lock_irq(&tasklist_lock) held */ |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 263 | int i; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 264 | unsigned long flags; |
| 265 | |
| 266 | spin_lock_irqsave(&pidmap_lock, flags); |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 267 | for (i = 0; i <= pid->level; i++) { |
| 268 | struct upid *upid = pid->numbers + i; |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 269 | struct pid_namespace *ns = upid->ns; |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 270 | hlist_del_rcu(&upid->pid_chain); |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 271 | switch(--ns->nr_hashed) { |
Eric W. Biederman | a606488 | 2013-08-29 13:56:50 -0700 | [diff] [blame] | 272 | case 2: |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 273 | case 1: |
| 274 | /* When all that is left in the pid namespace |
| 275 | * is the reaper wake up the reaper. The reaper |
| 276 | * may be sleeping in zap_pid_ns_processes(). |
| 277 | */ |
| 278 | wake_up_process(ns->child_reaper); |
| 279 | break; |
Oleg Nesterov | 314a8ad | 2013-09-30 13:45:27 -0700 | [diff] [blame] | 280 | case PIDNS_HASH_ADDING: |
| 281 | /* Handle a fork failure of the first process */ |
| 282 | WARN_ON(ns->child_reaper); |
| 283 | ns->nr_hashed = 0; |
| 284 | /* fall through */ |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 285 | case 0: |
Eric W. Biederman | af4b8a8 | 2012-08-01 15:03:42 -0700 | [diff] [blame] | 286 | schedule_work(&ns->proc_work); |
| 287 | break; |
Eric W. Biederman | 5e1182deb | 2010-07-12 18:50:25 -0700 | [diff] [blame] | 288 | } |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 289 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 290 | spin_unlock_irqrestore(&pidmap_lock, flags); |
| 291 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 292 | for (i = 0; i <= pid->level; i++) |
Oleg Nesterov | b7127aa | 2008-04-30 00:54:22 -0700 | [diff] [blame] | 293 | free_pidmap(pid->numbers + i); |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 294 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 295 | call_rcu(&pid->rcu, delayed_put_pid); |
| 296 | } |
| 297 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 298 | struct pid *alloc_pid(struct pid_namespace *ns) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 299 | { |
| 300 | struct pid *pid; |
| 301 | enum pid_type type; |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 302 | int i, nr; |
| 303 | struct pid_namespace *tmp; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 304 | struct upid *upid; |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 305 | int retval = -ENOMEM; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 306 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 307 | pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 308 | if (!pid) |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 309 | return ERR_PTR(retval); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 310 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 311 | tmp = ns; |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 312 | pid->level = ns->level; |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 313 | for (i = ns->level; i >= 0; i--) { |
| 314 | nr = alloc_pidmap(tmp); |
Arnd Bergmann | 287980e | 2016-05-27 23:23:25 +0200 | [diff] [blame] | 315 | if (nr < 0) { |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 316 | retval = nr; |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 317 | goto out_free; |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 318 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 319 | |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 320 | pid->numbers[i].nr = nr; |
| 321 | pid->numbers[i].ns = tmp; |
| 322 | tmp = tmp->parent; |
| 323 | } |
| 324 | |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 325 | if (unlikely(is_child_reaper(pid))) { |
Kirill Tkhai | a2095be | 2017-05-08 15:56:34 -0700 | [diff] [blame] | 326 | if (pid_ns_prepare_proc(ns)) { |
| 327 | disable_pid_allocation(ns); |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 328 | goto out_free; |
Kirill Tkhai | a2095be | 2017-05-08 15:56:34 -0700 | [diff] [blame] | 329 | } |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 332 | get_pid_ns(ns); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 333 | atomic_set(&pid->count, 1); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 334 | for (type = 0; type < PIDTYPE_MAX; ++type) |
| 335 | INIT_HLIST_HEAD(&pid->tasks[type]); |
| 336 | |
Joel Fernandes (Google) | 911e99a | 2019-04-30 12:21:53 -0400 | [diff] [blame] | 337 | init_waitqueue_head(&pid->wait_pidfd); |
| 338 | |
André Goddard Rosa | 417e315 | 2009-12-15 16:47:40 -0800 | [diff] [blame] | 339 | upid = pid->numbers + ns->level; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 340 | spin_lock_irq(&pidmap_lock); |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 341 | if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) |
Eric W. Biederman | 5e1182deb | 2010-07-12 18:50:25 -0700 | [diff] [blame] | 342 | goto out_unlock; |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 343 | for ( ; upid >= pid->numbers; --upid) { |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 344 | hlist_add_head_rcu(&upid->pid_chain, |
| 345 | &pid_hash[pid_hashfn(upid->nr, upid->ns)]); |
Eric W. Biederman | 0a01f2c | 2012-08-01 10:33:47 -0700 | [diff] [blame] | 346 | upid->ns->nr_hashed++; |
| 347 | } |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 348 | spin_unlock_irq(&pidmap_lock); |
| 349 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 350 | return pid; |
| 351 | |
Eric W. Biederman | 5e1182deb | 2010-07-12 18:50:25 -0700 | [diff] [blame] | 352 | out_unlock: |
Eric W. Biederman | 6e66688 | 2013-02-12 13:46:23 -0800 | [diff] [blame] | 353 | spin_unlock_irq(&pidmap_lock); |
Oleg Nesterov | 24c037e | 2014-12-10 15:55:25 -0800 | [diff] [blame] | 354 | put_pid_ns(ns); |
| 355 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 356 | out_free: |
Oleg Nesterov | b7127aa | 2008-04-30 00:54:22 -0700 | [diff] [blame] | 357 | while (++i <= ns->level) |
| 358 | free_pidmap(pid->numbers + i); |
Pavel Emelyanov | 8ef047a | 2007-10-18 23:40:05 -0700 | [diff] [blame] | 359 | |
Pavel Emelianov | baf8f0f | 2007-10-18 23:39:48 -0700 | [diff] [blame] | 360 | kmem_cache_free(ns->pid_cachep, pid); |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 361 | return ERR_PTR(retval); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 362 | } |
| 363 | |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 364 | void disable_pid_allocation(struct pid_namespace *ns) |
| 365 | { |
| 366 | spin_lock_irq(&pidmap_lock); |
| 367 | ns->nr_hashed &= ~PIDNS_HASH_ADDING; |
| 368 | spin_unlock_irq(&pidmap_lock); |
| 369 | } |
| 370 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 371 | struct pid *find_pid_ns(int nr, struct pid_namespace *ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | { |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 373 | struct upid *pnr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 375 | hlist_for_each_entry_rcu(pnr, |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 376 | &pid_hash[pid_hashfn(nr, ns)], pid_chain) |
| 377 | if (pnr->nr == nr && pnr->ns == ns) |
| 378 | return container_of(pnr, struct pid, |
| 379 | numbers[ns->level]); |
| 380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | return NULL; |
| 382 | } |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 383 | EXPORT_SYMBOL_GPL(find_pid_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
Pavel Emelyanov | 8990571 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 385 | struct pid *find_vpid(int nr) |
| 386 | { |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 387 | return find_pid_ns(nr, task_active_pid_ns(current)); |
Pavel Emelyanov | 8990571 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 388 | } |
| 389 | EXPORT_SYMBOL_GPL(find_vpid); |
| 390 | |
Sukadev Bhattiprolu | e713d0d | 2007-05-10 22:22:58 -0700 | [diff] [blame] | 391 | /* |
| 392 | * attach_pid() must be called with the tasklist_lock write-held. |
| 393 | */ |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 394 | void attach_pid(struct task_struct *task, enum pid_type type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 396 | struct pid_link *link = &task->pids[type]; |
| 397 | hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } |
| 399 | |
Oleg Nesterov | 24336ea | 2008-04-30 00:54:26 -0700 | [diff] [blame] | 400 | static void __change_pid(struct task_struct *task, enum pid_type type, |
| 401 | struct pid *new) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 403 | struct pid_link *link; |
| 404 | struct pid *pid; |
| 405 | int tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 407 | link = &task->pids[type]; |
| 408 | pid = link->pid; |
| 409 | |
| 410 | hlist_del_rcu(&link->node); |
Oleg Nesterov | 24336ea | 2008-04-30 00:54:26 -0700 | [diff] [blame] | 411 | link->pid = new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
| 413 | for (tmp = PIDTYPE_MAX; --tmp >= 0; ) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 414 | if (!hlist_empty(&pid->tasks[tmp])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | return; |
| 416 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 417 | free_pid(pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | } |
| 419 | |
Oleg Nesterov | 24336ea | 2008-04-30 00:54:26 -0700 | [diff] [blame] | 420 | void detach_pid(struct task_struct *task, enum pid_type type) |
| 421 | { |
| 422 | __change_pid(task, type, NULL); |
| 423 | } |
| 424 | |
| 425 | void change_pid(struct task_struct *task, enum pid_type type, |
| 426 | struct pid *pid) |
| 427 | { |
| 428 | __change_pid(task, type, pid); |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 429 | attach_pid(task, type); |
Oleg Nesterov | 24336ea | 2008-04-30 00:54:26 -0700 | [diff] [blame] | 430 | } |
| 431 | |
Eric W. Biederman | c18258c | 2006-09-27 01:51:06 -0700 | [diff] [blame] | 432 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 433 | void transfer_pid(struct task_struct *old, struct task_struct *new, |
Eric W. Biederman | c18258c | 2006-09-27 01:51:06 -0700 | [diff] [blame] | 434 | enum pid_type type) |
| 435 | { |
| 436 | new->pids[type].pid = old->pids[type].pid; |
| 437 | hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); |
Eric W. Biederman | c18258c | 2006-09-27 01:51:06 -0700 | [diff] [blame] | 438 | } |
| 439 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 440 | struct task_struct *pid_task(struct pid *pid, enum pid_type type) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 441 | { |
| 442 | struct task_struct *result = NULL; |
| 443 | if (pid) { |
| 444 | struct hlist_node *first; |
Arnd Bergmann | 67bdbff | 2010-02-25 16:55:13 +0100 | [diff] [blame] | 445 | first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), |
Paul E. McKenney | db1466b | 2010-03-03 07:46:56 -0800 | [diff] [blame] | 446 | lockdep_tasklist_lock_is_held()); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 447 | if (first) |
| 448 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
| 449 | } |
| 450 | return result; |
| 451 | } |
Pavel Emelyanov | eccba06 | 2008-02-07 00:13:21 -0800 | [diff] [blame] | 452 | EXPORT_SYMBOL(pid_task); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 453 | |
| 454 | /* |
Tetsuo Handa | 9728e5d | 2010-03-05 13:42:56 -0800 | [diff] [blame] | 455 | * Must be called under rcu_read_lock(). |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 456 | */ |
Christoph Hellwig | 17f98dc | 2009-06-17 16:27:51 -0700 | [diff] [blame] | 457 | struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 459 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
| 460 | "find_task_by_pid_ns() needs rcu_read_lock() protection"); |
Christoph Hellwig | 17f98dc | 2009-06-17 16:27:51 -0700 | [diff] [blame] | 461 | return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } |
| 463 | |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 464 | struct task_struct *find_task_by_vpid(pid_t vnr) |
| 465 | { |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 466 | return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 467 | } |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 468 | |
Oleg Nesterov | 1a657f78 | 2006-10-02 02:18:59 -0700 | [diff] [blame] | 469 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
| 470 | { |
| 471 | struct pid *pid; |
| 472 | rcu_read_lock(); |
Oleg Nesterov | 2ae448e | 2009-04-02 16:58:36 -0700 | [diff] [blame] | 473 | if (type != PIDTYPE_PID) |
| 474 | task = task->group_leader; |
Eric Dumazet | 81b1a83 | 2015-11-24 11:39:54 -0800 | [diff] [blame] | 475 | pid = get_pid(rcu_dereference(task->pids[type].pid)); |
Oleg Nesterov | 1a657f78 | 2006-10-02 02:18:59 -0700 | [diff] [blame] | 476 | rcu_read_unlock(); |
| 477 | return pid; |
| 478 | } |
Rik van Riel | 77c100c | 2011-02-01 09:51:46 -0500 | [diff] [blame] | 479 | EXPORT_SYMBOL_GPL(get_task_pid); |
Oleg Nesterov | 1a657f78 | 2006-10-02 02:18:59 -0700 | [diff] [blame] | 480 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 481 | struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 482 | { |
| 483 | struct task_struct *result; |
| 484 | rcu_read_lock(); |
| 485 | result = pid_task(pid, type); |
| 486 | if (result) |
| 487 | get_task_struct(result); |
| 488 | rcu_read_unlock(); |
| 489 | return result; |
| 490 | } |
Rik van Riel | 77c100c | 2011-02-01 09:51:46 -0500 | [diff] [blame] | 491 | EXPORT_SYMBOL_GPL(get_pid_task); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 492 | |
| 493 | struct pid *find_get_pid(pid_t nr) |
| 494 | { |
| 495 | struct pid *pid; |
| 496 | |
| 497 | rcu_read_lock(); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 498 | pid = get_pid(find_vpid(nr)); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 499 | rcu_read_unlock(); |
| 500 | |
| 501 | return pid; |
| 502 | } |
David Sterba | 339caf2 | 2008-07-25 01:48:31 -0700 | [diff] [blame] | 503 | EXPORT_SYMBOL_GPL(find_get_pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 504 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 505 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
| 506 | { |
| 507 | struct upid *upid; |
| 508 | pid_t nr = 0; |
| 509 | |
| 510 | if (pid && ns->level <= pid->level) { |
| 511 | upid = &pid->numbers[ns->level]; |
| 512 | if (upid->ns == ns) |
| 513 | nr = upid->nr; |
| 514 | } |
| 515 | return nr; |
| 516 | } |
Eric W. Biederman | 4f82f45 | 2012-05-24 10:37:59 -0600 | [diff] [blame] | 517 | EXPORT_SYMBOL_GPL(pid_nr_ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 518 | |
Eric W. Biederman | 44c4e1b | 2008-02-08 04:19:15 -0800 | [diff] [blame] | 519 | pid_t pid_vnr(struct pid *pid) |
| 520 | { |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 521 | return pid_nr_ns(pid, task_active_pid_ns(current)); |
Eric W. Biederman | 44c4e1b | 2008-02-08 04:19:15 -0800 | [diff] [blame] | 522 | } |
| 523 | EXPORT_SYMBOL_GPL(pid_vnr); |
| 524 | |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 525 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
| 526 | struct pid_namespace *ns) |
Pavel Emelyanov | 2f2a3a4 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 527 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 528 | pid_t nr = 0; |
| 529 | |
| 530 | rcu_read_lock(); |
| 531 | if (!ns) |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 532 | ns = task_active_pid_ns(current); |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 533 | if (likely(pid_alive(task))) { |
Oleg Nesterov | 322cd32 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 534 | if (type != PIDTYPE_PID) { |
| 535 | if (type == __PIDTYPE_TGID) |
| 536 | type = PIDTYPE_PID; |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 537 | task = task->group_leader; |
Oleg Nesterov | 322cd32 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 538 | } |
Eric Dumazet | 81b1a83 | 2015-11-24 11:39:54 -0800 | [diff] [blame] | 539 | nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 540 | } |
| 541 | rcu_read_unlock(); |
| 542 | |
| 543 | return nr; |
Pavel Emelyanov | 2f2a3a4 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 544 | } |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 545 | EXPORT_SYMBOL(__task_pid_nr_ns); |
Pavel Emelyanov | 2f2a3a4 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 546 | |
Eric W. Biederman | 61bce0f | 2009-01-07 18:08:49 -0800 | [diff] [blame] | 547 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
| 548 | { |
| 549 | return ns_of_pid(task_pid(tsk)); |
| 550 | } |
| 551 | EXPORT_SYMBOL_GPL(task_active_pid_ns); |
| 552 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | /* |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 554 | * Used by proc to find the first pid that is greater than or equal to nr. |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 555 | * |
Pavel Emelyanov | e49859e | 2008-07-25 01:48:36 -0700 | [diff] [blame] | 556 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 557 | */ |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 558 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 559 | { |
| 560 | struct pid *pid; |
| 561 | |
| 562 | do { |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 563 | pid = find_pid_ns(nr, ns); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 564 | if (pid) |
| 565 | break; |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 566 | nr = next_pidmap(ns, nr); |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 567 | } while (nr > 0); |
| 568 | |
| 569 | return pid; |
| 570 | } |
| 571 | |
Christian Brauner | 5016e77 | 2019-05-24 12:43:51 +0200 | [diff] [blame] | 572 | /** |
| 573 | * pidfd_create() - Create a new pid file descriptor. |
| 574 | * |
| 575 | * @pid: struct pid that the pidfd will reference |
| 576 | * |
| 577 | * This creates a new pid file descriptor with the O_CLOEXEC flag set. |
| 578 | * |
| 579 | * Note, that this function can only be called after the fd table has |
| 580 | * been unshared to avoid leaking the pidfd to the new process. |
| 581 | * |
| 582 | * Return: On success, a cloexec pidfd is returned. |
| 583 | * On error, a negative errno number will be returned. |
| 584 | */ |
| 585 | static int pidfd_create(struct pid *pid) |
| 586 | { |
| 587 | int fd; |
| 588 | |
| 589 | fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid), |
| 590 | O_RDWR | O_CLOEXEC); |
| 591 | if (fd < 0) |
| 592 | put_pid(pid); |
| 593 | |
| 594 | return fd; |
| 595 | } |
| 596 | |
| 597 | /** |
| 598 | * pidfd_open() - Open new pid file descriptor. |
| 599 | * |
| 600 | * @pid: pid for which to retrieve a pidfd |
| 601 | * @flags: flags to pass |
| 602 | * |
| 603 | * This creates a new pid file descriptor with the O_CLOEXEC flag set for |
| 604 | * the process identified by @pid. Currently, the process identified by |
| 605 | * @pid must be a thread-group leader. This restriction currently exists |
| 606 | * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot |
| 607 | * be used with CLONE_THREAD) and pidfd polling (only supports thread group |
| 608 | * leaders). |
| 609 | * |
| 610 | * Return: On success, a cloexec pidfd is returned. |
| 611 | * On error, a negative errno number will be returned. |
| 612 | */ |
| 613 | SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) |
| 614 | { |
| 615 | int fd, ret; |
| 616 | struct pid *p; |
| 617 | struct task_struct *tsk; |
| 618 | |
| 619 | if (flags) |
| 620 | return -EINVAL; |
| 621 | |
| 622 | if (pid <= 0) |
| 623 | return -EINVAL; |
| 624 | |
| 625 | p = find_get_pid(pid); |
| 626 | if (!p) |
| 627 | return -ESRCH; |
| 628 | |
| 629 | ret = 0; |
| 630 | rcu_read_lock(); |
| 631 | tsk = pid_task(p, PIDTYPE_PID); |
| 632 | /* Check that pid belongs to a group leader task */ |
| 633 | if (!tsk || !thread_group_leader(tsk)) |
| 634 | ret = -EINVAL; |
| 635 | rcu_read_unlock(); |
| 636 | |
| 637 | fd = ret ?: pidfd_create(p); |
| 638 | put_pid(p); |
| 639 | return fd; |
| 640 | } |
| 641 | |
Eric W. Biederman | 0804ef4 | 2006-10-02 02:17:04 -0700 | [diff] [blame] | 642 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | * The pid hash table is scaled according to the amount of memory in the |
| 644 | * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or |
| 645 | * more. |
| 646 | */ |
| 647 | void __init pidhash_init(void) |
| 648 | { |
Dimitri Sivanich | 074b851 | 2012-02-08 12:39:07 -0800 | [diff] [blame] | 649 | unsigned int i, pidhash_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | |
Jan Beulich | 2c85f51 | 2009-09-21 17:03:07 -0700 | [diff] [blame] | 651 | pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, |
| 652 | HASH_EARLY | HASH_SMALL, |
Tim Bird | 31fe62b | 2012-05-23 13:33:35 +0000 | [diff] [blame] | 653 | &pidhash_shift, NULL, |
| 654 | 0, 4096); |
Dimitri Sivanich | 074b851 | 2012-02-08 12:39:07 -0800 | [diff] [blame] | 655 | pidhash_size = 1U << pidhash_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 657 | for (i = 0; i < pidhash_size; i++) |
| 658 | INIT_HLIST_HEAD(&pid_hash[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | void __init pidmap_init(void) |
| 662 | { |
Zhen Lei | 840d6fe | 2016-01-30 10:04:17 +0800 | [diff] [blame] | 663 | /* Verify no one has done anything silly: */ |
Eric W. Biederman | c876ad76 | 2012-12-21 20:27:12 -0800 | [diff] [blame] | 664 | BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); |
| 665 | |
Hedi Berriche | 72680a1 | 2010-05-26 14:44:06 -0700 | [diff] [blame] | 666 | /* bump default and minimum pid_max based on number of cpus */ |
| 667 | pid_max = min(pid_max_max, max_t(int, pid_max, |
| 668 | PIDS_PER_CPU_DEFAULT * num_possible_cpus())); |
| 669 | pid_max_min = max_t(int, pid_max_min, |
| 670 | PIDS_PER_CPU_MIN * num_possible_cpus()); |
| 671 | pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); |
| 672 | |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 673 | init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 674 | /* Reserve PID 0. We never call free_pidmap(0) */ |
Sukadev Bhattiprolu | 61a58c6 | 2006-12-08 02:37:58 -0800 | [diff] [blame] | 675 | set_bit(0, init_pid_ns.pidmap[0].page); |
| 676 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 677 | |
Pavel Emelyanov | 74bd59b | 2008-02-08 04:18:24 -0800 | [diff] [blame] | 678 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 679 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | } |