blob: f806a2e314e0be17ce3b0ad5309eb974f6f7e80f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/shm.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/shmem_fs.h>
27#include <linux/security.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080030#include <linux/capability.h>
Stephen Rothwell7d87e142005-05-01 08:59:12 -070031#include <linux/ptrace.h>
Mike Waychison19b49462005-09-06 15:17:10 -070032#include <linux/seq_file.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080033#include <linux/mutex.h>
Stephen Rothwell7d87e142005-05-01 08:59:12 -070034
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/uaccess.h>
36
37#include "util.h"
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039static struct file_operations shm_file_operations;
40static struct vm_operations_struct shm_vm_ops;
41
42static struct ipc_ids shm_ids;
43
44#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
45#define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
46#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
47#define shm_buildid(id, seq) \
48 ipc_buildid(&shm_ids, id, seq)
49
50static int newseg (key_t key, int shmflg, size_t size);
51static void shm_open (struct vm_area_struct *shmd);
52static void shm_close (struct vm_area_struct *shmd);
53#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -070054static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
57size_t shm_ctlmax = SHMMAX;
58size_t shm_ctlall = SHMALL;
59int shm_ctlmni = SHMMNI;
60
61static int shm_tot; /* total number of shared memory pages */
62
63void __init shm_init (void)
64{
65 ipc_init_ids(&shm_ids, 1);
Mike Waychison19b49462005-09-06 15:17:10 -070066 ipc_init_proc_interface("sysvipc/shm",
67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
68 &shm_ids,
69 sysvipc_shm_proc_show);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
72static inline int shm_checkid(struct shmid_kernel *s, int id)
73{
74 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
75 return -EIDRM;
76 return 0;
77}
78
79static inline struct shmid_kernel *shm_rmid(int id)
80{
81 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
82}
83
84static inline int shm_addid(struct shmid_kernel *shp)
85{
86 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
87}
88
89
90
91static inline void shm_inc (int id) {
92 struct shmid_kernel *shp;
93
94 if(!(shp = shm_lock(id)))
95 BUG();
96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid;
98 shp->shm_nattch++;
99 shm_unlock(shp);
100}
101
102/* This is called by fork, once for every shm attach. */
103static void shm_open (struct vm_area_struct *shmd)
104{
105 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
106}
107
108/*
109 * shm_destroy - free the struct shmid_kernel
110 *
111 * @shp: struct to free
112 *
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800113 * It has to be called with shp and shm_ids.mutex locked,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * but returns with shp unlocked and freed.
115 */
116static void shm_destroy (struct shmid_kernel *shp)
117{
118 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
119 shm_rmid (shp->id);
120 shm_unlock(shp);
121 if (!is_file_hugepages(shp->shm_file))
122 shmem_lock(shp->shm_file, 0, shp->mlock_user);
123 else
124 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
125 shp->mlock_user);
126 fput (shp->shm_file);
127 security_shm_free(shp);
128 ipc_rcu_putref(shp);
129}
130
131/*
132 * remove the attach descriptor shmd.
133 * free memory for segment if it is marked destroyed.
134 * The descriptor has already been removed from the current->mm->mmap list
135 * and will later be kfree()d.
136 */
137static void shm_close (struct vm_area_struct *shmd)
138{
139 struct file * file = shmd->vm_file;
140 int id = file->f_dentry->d_inode->i_ino;
141 struct shmid_kernel *shp;
142
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800143 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 /* remove from the list of attaches of the shm segment */
145 if(!(shp = shm_lock(id)))
146 BUG();
147 shp->shm_lprid = current->tgid;
148 shp->shm_dtim = get_seconds();
149 shp->shm_nattch--;
150 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800151 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 shm_destroy (shp);
153 else
154 shm_unlock(shp);
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800155 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
158static int shm_mmap(struct file * file, struct vm_area_struct * vma)
159{
David Howellsb0e15192006-01-06 00:11:42 -0800160 int ret;
161
162 ret = shmem_mmap(file, vma);
163 if (ret == 0) {
164 vma->vm_ops = &shm_vm_ops;
165 shm_inc(file->f_dentry->d_inode->i_ino);
166 }
167
168 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
171static struct file_operations shm_file_operations = {
David Howellsb0e15192006-01-06 00:11:42 -0800172 .mmap = shm_mmap,
173#ifndef CONFIG_MMU
174 .get_unmapped_area = shmem_get_unmapped_area,
175#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176};
177
178static struct vm_operations_struct shm_vm_ops = {
179 .open = shm_open, /* callback for a new vm-area open */
180 .close = shm_close, /* callback for when the vm-area is released */
181 .nopage = shmem_nopage,
Andrew Morton6ade43f2005-08-01 21:11:45 -0700182#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 .set_policy = shmem_set_policy,
184 .get_policy = shmem_get_policy,
185#endif
186};
187
188static int newseg (key_t key, int shmflg, size_t size)
189{
190 int error;
191 struct shmid_kernel *shp;
192 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
193 struct file * file;
194 char name[13];
195 int id;
196
197 if (size < SHMMIN || size > shm_ctlmax)
198 return -EINVAL;
199
200 if (shm_tot + numpages >= shm_ctlall)
201 return -ENOSPC;
202
203 shp = ipc_rcu_alloc(sizeof(*shp));
204 if (!shp)
205 return -ENOMEM;
206
207 shp->shm_perm.key = key;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800208 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 shp->mlock_user = NULL;
210
211 shp->shm_perm.security = NULL;
212 error = security_shm_alloc(shp);
213 if (error) {
214 ipc_rcu_putref(shp);
215 return error;
216 }
217
218 if (shmflg & SHM_HUGETLB) {
219 /* hugetlb_zero_setup takes care of mlock user accounting */
220 file = hugetlb_zero_setup(size);
221 shp->mlock_user = current->user;
222 } else {
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800223 int acctflag = VM_ACCOUNT;
224 /*
225 * Do not allow no accounting for OVERCOMMIT_NEVER, even
226 * if it's asked for.
227 */
228 if ((shmflg & SHM_NORESERVE) &&
229 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
230 acctflag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 sprintf (name, "SYSV%08x", key);
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800232 file = shmem_file_setup(name, size, acctflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
234 error = PTR_ERR(file);
235 if (IS_ERR(file))
236 goto no_file;
237
238 error = -ENOSPC;
239 id = shm_addid(shp);
240 if(id == -1)
241 goto no_id;
242
243 shp->shm_cprid = current->tgid;
244 shp->shm_lprid = 0;
245 shp->shm_atim = shp->shm_dtim = 0;
246 shp->shm_ctim = get_seconds();
247 shp->shm_segsz = size;
248 shp->shm_nattch = 0;
249 shp->id = shm_buildid(id,shp->shm_perm.seq);
250 shp->shm_file = file;
251 file->f_dentry->d_inode->i_ino = shp->id;
Krishnakumar R551110a2005-10-29 18:16:45 -0700252
253 /* Hugetlb ops would have already been assigned. */
254 if (!(shmflg & SHM_HUGETLB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 file->f_op = &shm_file_operations;
Krishnakumar R551110a2005-10-29 18:16:45 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 shm_tot += numpages;
258 shm_unlock(shp);
259 return shp->id;
260
261no_id:
262 fput(file);
263no_file:
264 security_shm_free(shp);
265 ipc_rcu_putref(shp);
266 return error;
267}
268
269asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
270{
271 struct shmid_kernel *shp;
272 int err, id = 0;
273
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800274 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (key == IPC_PRIVATE) {
276 err = newseg(key, shmflg, size);
277 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
278 if (!(shmflg & IPC_CREAT))
279 err = -ENOENT;
280 else
281 err = newseg(key, shmflg, size);
282 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
283 err = -EEXIST;
284 } else {
285 shp = shm_lock(id);
286 if(shp==NULL)
287 BUG();
288 if (shp->shm_segsz < size)
289 err = -EINVAL;
290 else if (ipcperms(&shp->shm_perm, shmflg))
291 err = -EACCES;
292 else {
293 int shmid = shm_buildid(id, shp->shm_perm.seq);
294 err = security_shm_associate(shp, shmflg);
295 if (!err)
296 err = shmid;
297 }
298 shm_unlock(shp);
299 }
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800300 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 return err;
303}
304
305static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
306{
307 switch(version) {
308 case IPC_64:
309 return copy_to_user(buf, in, sizeof(*in));
310 case IPC_OLD:
311 {
312 struct shmid_ds out;
313
314 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
315 out.shm_segsz = in->shm_segsz;
316 out.shm_atime = in->shm_atime;
317 out.shm_dtime = in->shm_dtime;
318 out.shm_ctime = in->shm_ctime;
319 out.shm_cpid = in->shm_cpid;
320 out.shm_lpid = in->shm_lpid;
321 out.shm_nattch = in->shm_nattch;
322
323 return copy_to_user(buf, &out, sizeof(out));
324 }
325 default:
326 return -EINVAL;
327 }
328}
329
330struct shm_setbuf {
331 uid_t uid;
332 gid_t gid;
333 mode_t mode;
334};
335
336static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
337{
338 switch(version) {
339 case IPC_64:
340 {
341 struct shmid64_ds tbuf;
342
343 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
344 return -EFAULT;
345
346 out->uid = tbuf.shm_perm.uid;
347 out->gid = tbuf.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800348 out->mode = tbuf.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 return 0;
351 }
352 case IPC_OLD:
353 {
354 struct shmid_ds tbuf_old;
355
356 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
357 return -EFAULT;
358
359 out->uid = tbuf_old.shm_perm.uid;
360 out->gid = tbuf_old.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800361 out->mode = tbuf_old.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 return 0;
364 }
365 default:
366 return -EINVAL;
367 }
368}
369
370static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
371{
372 switch(version) {
373 case IPC_64:
374 return copy_to_user(buf, in, sizeof(*in));
375 case IPC_OLD:
376 {
377 struct shminfo out;
378
379 if(in->shmmax > INT_MAX)
380 out.shmmax = INT_MAX;
381 else
382 out.shmmax = (int)in->shmmax;
383
384 out.shmmin = in->shmmin;
385 out.shmmni = in->shmmni;
386 out.shmseg = in->shmseg;
387 out.shmall = in->shmall;
388
389 return copy_to_user(buf, &out, sizeof(out));
390 }
391 default:
392 return -EINVAL;
393 }
394}
395
396static void shm_get_stat(unsigned long *rss, unsigned long *swp)
397{
398 int i;
399
400 *rss = 0;
401 *swp = 0;
402
403 for (i = 0; i <= shm_ids.max_id; i++) {
404 struct shmid_kernel *shp;
405 struct inode *inode;
406
407 shp = shm_get(i);
408 if(!shp)
409 continue;
410
411 inode = shp->shm_file->f_dentry->d_inode;
412
413 if (is_file_hugepages(shp->shm_file)) {
414 struct address_space *mapping = inode->i_mapping;
415 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
416 } else {
417 struct shmem_inode_info *info = SHMEM_I(inode);
418 spin_lock(&info->lock);
419 *rss += inode->i_mapping->nrpages;
420 *swp += info->swapped;
421 spin_unlock(&info->lock);
422 }
423 }
424}
425
426asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
427{
428 struct shm_setbuf setbuf;
429 struct shmid_kernel *shp;
430 int err, version;
431
432 if (cmd < 0 || shmid < 0) {
433 err = -EINVAL;
434 goto out;
435 }
436
437 version = ipc_parse_version(&cmd);
438
439 switch (cmd) { /* replace with proc interface ? */
440 case IPC_INFO:
441 {
442 struct shminfo64 shminfo;
443
444 err = security_shm_shmctl(NULL, cmd);
445 if (err)
446 return err;
447
448 memset(&shminfo,0,sizeof(shminfo));
449 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
450 shminfo.shmmax = shm_ctlmax;
451 shminfo.shmall = shm_ctlall;
452
453 shminfo.shmmin = SHMMIN;
454 if(copy_shminfo_to_user (buf, &shminfo, version))
455 return -EFAULT;
456 /* reading a integer is always atomic */
457 err= shm_ids.max_id;
458 if(err<0)
459 err = 0;
460 goto out;
461 }
462 case SHM_INFO:
463 {
464 struct shm_info shm_info;
465
466 err = security_shm_shmctl(NULL, cmd);
467 if (err)
468 return err;
469
470 memset(&shm_info,0,sizeof(shm_info));
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800471 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 shm_info.used_ids = shm_ids.in_use;
473 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
474 shm_info.shm_tot = shm_tot;
475 shm_info.swap_attempts = 0;
476 shm_info.swap_successes = 0;
477 err = shm_ids.max_id;
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800478 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
480 err = -EFAULT;
481 goto out;
482 }
483
484 err = err < 0 ? 0 : err;
485 goto out;
486 }
487 case SHM_STAT:
488 case IPC_STAT:
489 {
490 struct shmid64_ds tbuf;
491 int result;
492 memset(&tbuf, 0, sizeof(tbuf));
493 shp = shm_lock(shmid);
494 if(shp==NULL) {
495 err = -EINVAL;
496 goto out;
497 } else if(cmd==SHM_STAT) {
498 err = -EINVAL;
499 if (shmid > shm_ids.max_id)
500 goto out_unlock;
501 result = shm_buildid(shmid, shp->shm_perm.seq);
502 } else {
503 err = shm_checkid(shp,shmid);
504 if(err)
505 goto out_unlock;
506 result = 0;
507 }
508 err=-EACCES;
509 if (ipcperms (&shp->shm_perm, S_IRUGO))
510 goto out_unlock;
511 err = security_shm_shmctl(shp, cmd);
512 if (err)
513 goto out_unlock;
514 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
515 tbuf.shm_segsz = shp->shm_segsz;
516 tbuf.shm_atime = shp->shm_atim;
517 tbuf.shm_dtime = shp->shm_dtim;
518 tbuf.shm_ctime = shp->shm_ctim;
519 tbuf.shm_cpid = shp->shm_cprid;
520 tbuf.shm_lpid = shp->shm_lprid;
521 if (!is_file_hugepages(shp->shm_file))
522 tbuf.shm_nattch = shp->shm_nattch;
523 else
524 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
525 shm_unlock(shp);
526 if(copy_shmid_to_user (buf, &tbuf, version))
527 err = -EFAULT;
528 else
529 err = result;
530 goto out;
531 }
532 case SHM_LOCK:
533 case SHM_UNLOCK:
534 {
535 shp = shm_lock(shmid);
536 if(shp==NULL) {
537 err = -EINVAL;
538 goto out;
539 }
540 err = shm_checkid(shp,shmid);
541 if(err)
542 goto out_unlock;
543
544 if (!capable(CAP_IPC_LOCK)) {
545 err = -EPERM;
546 if (current->euid != shp->shm_perm.uid &&
547 current->euid != shp->shm_perm.cuid)
548 goto out_unlock;
549 if (cmd == SHM_LOCK &&
550 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
551 goto out_unlock;
552 }
553
554 err = security_shm_shmctl(shp, cmd);
555 if (err)
556 goto out_unlock;
557
558 if(cmd==SHM_LOCK) {
559 struct user_struct * user = current->user;
560 if (!is_file_hugepages(shp->shm_file)) {
561 err = shmem_lock(shp->shm_file, 1, user);
562 if (!err) {
Andrew Mortonb33291c2006-01-08 01:02:21 -0800563 shp->shm_perm.mode |= SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 shp->mlock_user = user;
565 }
566 }
567 } else if (!is_file_hugepages(shp->shm_file)) {
568 shmem_lock(shp->shm_file, 0, shp->mlock_user);
Andrew Mortonb33291c2006-01-08 01:02:21 -0800569 shp->shm_perm.mode &= ~SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 shp->mlock_user = NULL;
571 }
572 shm_unlock(shp);
573 goto out;
574 }
575 case IPC_RMID:
576 {
577 /*
578 * We cannot simply remove the file. The SVID states
579 * that the block remains until the last person
580 * detaches from it, then is deleted. A shmat() on
581 * an RMID segment is legal in older Linux and if
582 * we change it apps break...
583 *
584 * Instead we set a destroyed flag, and then blow
585 * the name away when the usage hits zero.
586 */
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800587 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 shp = shm_lock(shmid);
589 err = -EINVAL;
590 if (shp == NULL)
591 goto out_up;
592 err = shm_checkid(shp, shmid);
593 if(err)
594 goto out_unlock_up;
595
596 if (current->euid != shp->shm_perm.uid &&
597 current->euid != shp->shm_perm.cuid &&
598 !capable(CAP_SYS_ADMIN)) {
599 err=-EPERM;
600 goto out_unlock_up;
601 }
602
603 err = security_shm_shmctl(shp, cmd);
604 if (err)
605 goto out_unlock_up;
606
607 if (shp->shm_nattch){
Andrew Mortonb33291c2006-01-08 01:02:21 -0800608 shp->shm_perm.mode |= SHM_DEST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 /* Do not find it any more */
610 shp->shm_perm.key = IPC_PRIVATE;
611 shm_unlock(shp);
612 } else
613 shm_destroy (shp);
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800614 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 goto out;
616 }
617
618 case IPC_SET:
619 {
620 if (copy_shmid_from_user (&setbuf, buf, version)) {
621 err = -EFAULT;
622 goto out;
623 }
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800624 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 shp = shm_lock(shmid);
626 err=-EINVAL;
627 if(shp==NULL)
628 goto out_up;
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800629 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
630 setbuf.mode, &(shp->shm_perm))))
Dustin Kirkland8c8570f2005-11-03 17:15:16 +0000631 goto out_unlock_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 err = shm_checkid(shp,shmid);
633 if(err)
634 goto out_unlock_up;
635 err=-EPERM;
636 if (current->euid != shp->shm_perm.uid &&
637 current->euid != shp->shm_perm.cuid &&
638 !capable(CAP_SYS_ADMIN)) {
639 goto out_unlock_up;
640 }
641
642 err = security_shm_shmctl(shp, cmd);
643 if (err)
644 goto out_unlock_up;
645
646 shp->shm_perm.uid = setbuf.uid;
647 shp->shm_perm.gid = setbuf.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800648 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 | (setbuf.mode & S_IRWXUGO);
650 shp->shm_ctim = get_seconds();
651 break;
652 }
653
654 default:
655 err = -EINVAL;
656 goto out;
657 }
658
659 err = 0;
660out_unlock_up:
661 shm_unlock(shp);
662out_up:
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800663 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 goto out;
665out_unlock:
666 shm_unlock(shp);
667out:
668 return err;
669}
670
671/*
672 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
673 *
674 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
675 * "raddr" thing points to kernel space, and there has to be a wrapper around
676 * this.
677 */
678long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
679{
680 struct shmid_kernel *shp;
681 unsigned long addr;
682 unsigned long size;
683 struct file * file;
684 int err;
685 unsigned long flags;
686 unsigned long prot;
687 unsigned long o_flags;
688 int acc_mode;
689 void *user_addr;
690
691 if (shmid < 0) {
692 err = -EINVAL;
693 goto out;
694 } else if ((addr = (ulong)shmaddr)) {
695 if (addr & (SHMLBA-1)) {
696 if (shmflg & SHM_RND)
697 addr &= ~(SHMLBA-1); /* round down */
698 else
699#ifndef __ARCH_FORCE_SHMLBA
700 if (addr & ~PAGE_MASK)
701#endif
702 return -EINVAL;
703 }
704 flags = MAP_SHARED | MAP_FIXED;
705 } else {
706 if ((shmflg & SHM_REMAP))
707 return -EINVAL;
708
709 flags = MAP_SHARED;
710 }
711
712 if (shmflg & SHM_RDONLY) {
713 prot = PROT_READ;
714 o_flags = O_RDONLY;
715 acc_mode = S_IRUGO;
716 } else {
717 prot = PROT_READ | PROT_WRITE;
718 o_flags = O_RDWR;
719 acc_mode = S_IRUGO | S_IWUGO;
720 }
721 if (shmflg & SHM_EXEC) {
722 prot |= PROT_EXEC;
723 acc_mode |= S_IXUGO;
724 }
725
726 /*
727 * We cannot rely on the fs check since SYSV IPC does have an
728 * additional creator id...
729 */
730 shp = shm_lock(shmid);
731 if(shp == NULL) {
732 err = -EINVAL;
733 goto out;
734 }
735 err = shm_checkid(shp,shmid);
736 if (err) {
737 shm_unlock(shp);
738 goto out;
739 }
740 if (ipcperms(&shp->shm_perm, acc_mode)) {
741 shm_unlock(shp);
742 err = -EACCES;
743 goto out;
744 }
745
746 err = security_shm_shmat(shp, shmaddr, shmflg);
747 if (err) {
748 shm_unlock(shp);
749 return err;
750 }
751
752 file = shp->shm_file;
753 size = i_size_read(file->f_dentry->d_inode);
754 shp->shm_nattch++;
755 shm_unlock(shp);
756
757 down_write(&current->mm->mmap_sem);
758 if (addr && !(shmflg & SHM_REMAP)) {
759 user_addr = ERR_PTR(-EINVAL);
760 if (find_vma_intersection(current->mm, addr, addr + size))
761 goto invalid;
762 /*
763 * If shm segment goes below stack, make sure there is some
764 * space left for the stack to grow (at least 4 pages).
765 */
766 if (addr < current->mm->start_stack &&
767 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
768 goto invalid;
769 }
770
771 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
772
773invalid:
774 up_write(&current->mm->mmap_sem);
775
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800776 mutex_lock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if(!(shp = shm_lock(shmid)))
778 BUG();
779 shp->shm_nattch--;
780 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800781 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 shm_destroy (shp);
783 else
784 shm_unlock(shp);
Ingo Molnar5f921ae2006-03-26 01:37:17 -0800785 mutex_unlock(&shm_ids.mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 *raddr = (unsigned long) user_addr;
788 err = 0;
789 if (IS_ERR(user_addr))
790 err = PTR_ERR(user_addr);
791out:
792 return err;
793}
794
Stephen Rothwell7d87e142005-05-01 08:59:12 -0700795asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
796{
797 unsigned long ret;
798 long err;
799
800 err = do_shmat(shmid, shmaddr, shmflg, &ret);
801 if (err)
802 return err;
803 force_successful_syscall_return();
804 return (long)ret;
805}
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807/*
808 * detach and kill segment if marked destroyed.
809 * The work is done in shm_close.
810 */
811asmlinkage long sys_shmdt(char __user *shmaddr)
812{
813 struct mm_struct *mm = current->mm;
814 struct vm_area_struct *vma, *next;
815 unsigned long addr = (unsigned long)shmaddr;
816 loff_t size = 0;
817 int retval = -EINVAL;
818
Hugh Dickinsdf1e2fb2006-03-24 03:18:06 -0800819 if (addr & ~PAGE_MASK)
820 return retval;
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 down_write(&mm->mmap_sem);
823
824 /*
825 * This function tries to be smart and unmap shm segments that
826 * were modified by partial mlock or munmap calls:
827 * - It first determines the size of the shm segment that should be
828 * unmapped: It searches for a vma that is backed by shm and that
829 * started at address shmaddr. It records it's size and then unmaps
830 * it.
831 * - Then it unmaps all shm vmas that started at shmaddr and that
832 * are within the initially determined size.
833 * Errors from do_munmap are ignored: the function only fails if
834 * it's called with invalid parameters or if it's called to unmap
835 * a part of a vma. Both calls in this function are for full vmas,
836 * the parameters are directly copied from the vma itself and always
837 * valid - therefore do_munmap cannot fail. (famous last words?)
838 */
839 /*
840 * If it had been mremap()'d, the starting address would not
841 * match the usual checks anyway. So assume all vma's are
842 * above the starting address given.
843 */
844 vma = find_vma(mm, addr);
845
846 while (vma) {
847 next = vma->vm_next;
848
849 /*
850 * Check if the starting address would match, i.e. it's
851 * a fragment created by mprotect() and/or munmap(), or it
852 * otherwise it starts at this address with no hassles.
853 */
854 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
855 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
856
857
858 size = vma->vm_file->f_dentry->d_inode->i_size;
859 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
860 /*
861 * We discovered the size of the shm segment, so
862 * break out of here and fall through to the next
863 * loop that uses the size information to stop
864 * searching for matching vma's.
865 */
866 retval = 0;
867 vma = next;
868 break;
869 }
870 vma = next;
871 }
872
873 /*
874 * We need look no further than the maximum address a fragment
875 * could possibly have landed at. Also cast things to loff_t to
876 * prevent overflows and make comparisions vs. equal-width types.
877 */
KAMEZAWA Hiroyuki8e367092006-02-10 01:51:12 -0800878 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
880 next = vma->vm_next;
881
882 /* finding a matching vma now does not alter retval */
883 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
884 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
885
886 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
887 vma = next;
888 }
889
890 up_write(&mm->mmap_sem);
891 return retval;
892}
893
894#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -0700895static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Mike Waychison19b49462005-09-06 15:17:10 -0700897 struct shmid_kernel *shp = it;
898 char *format;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
901#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Mike Waychison19b49462005-09-06 15:17:10 -0700903 if (sizeof(size_t) <= sizeof(int))
904 format = SMALL_STRING;
905 else
906 format = BIG_STRING;
907 return seq_printf(s, format,
908 shp->shm_perm.key,
909 shp->id,
Andrew Mortonb33291c2006-01-08 01:02:21 -0800910 shp->shm_perm.mode,
Mike Waychison19b49462005-09-06 15:17:10 -0700911 shp->shm_segsz,
912 shp->shm_cprid,
913 shp->shm_lprid,
914 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
915 shp->shm_perm.uid,
916 shp->shm_perm.gid,
917 shp->shm_perm.cuid,
918 shp->shm_perm.cgid,
919 shp->shm_atim,
920 shp->shm_dtim,
921 shp->shm_ctim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
923#endif