blob: dca90489e3b0f7b9cfd189b593eeee1d3c0f197e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/shm.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/shmem_fs.h>
27#include <linux/security.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
Stephen Rothwell7d87e142005-05-01 08:59:12 -070030#include <linux/ptrace.h>
Mike Waychison19b49462005-09-06 15:17:10 -070031#include <linux/seq_file.h>
Stephen Rothwell7d87e142005-05-01 08:59:12 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/uaccess.h>
34
35#include "util.h"
36
37#define shm_flags shm_perm.mode
38
39static struct file_operations shm_file_operations;
40static struct vm_operations_struct shm_vm_ops;
41
42static struct ipc_ids shm_ids;
43
44#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
45#define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
46#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
47#define shm_buildid(id, seq) \
48 ipc_buildid(&shm_ids, id, seq)
49
50static int newseg (key_t key, int shmflg, size_t size);
51static void shm_open (struct vm_area_struct *shmd);
52static void shm_close (struct vm_area_struct *shmd);
53#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -070054static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
57size_t shm_ctlmax = SHMMAX;
58size_t shm_ctlall = SHMALL;
59int shm_ctlmni = SHMMNI;
60
61static int shm_tot; /* total number of shared memory pages */
62
63void __init shm_init (void)
64{
65 ipc_init_ids(&shm_ids, 1);
Mike Waychison19b49462005-09-06 15:17:10 -070066 ipc_init_proc_interface("sysvipc/shm",
67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
68 &shm_ids,
69 sysvipc_shm_proc_show);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
72static inline int shm_checkid(struct shmid_kernel *s, int id)
73{
74 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
75 return -EIDRM;
76 return 0;
77}
78
79static inline struct shmid_kernel *shm_rmid(int id)
80{
81 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
82}
83
84static inline int shm_addid(struct shmid_kernel *shp)
85{
86 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
87}
88
89
90
91static inline void shm_inc (int id) {
92 struct shmid_kernel *shp;
93
94 if(!(shp = shm_lock(id)))
95 BUG();
96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid;
98 shp->shm_nattch++;
99 shm_unlock(shp);
100}
101
102/* This is called by fork, once for every shm attach. */
103static void shm_open (struct vm_area_struct *shmd)
104{
105 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
106}
107
108/*
109 * shm_destroy - free the struct shmid_kernel
110 *
111 * @shp: struct to free
112 *
113 * It has to be called with shp and shm_ids.sem locked,
114 * but returns with shp unlocked and freed.
115 */
116static void shm_destroy (struct shmid_kernel *shp)
117{
118 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
119 shm_rmid (shp->id);
120 shm_unlock(shp);
121 if (!is_file_hugepages(shp->shm_file))
122 shmem_lock(shp->shm_file, 0, shp->mlock_user);
123 else
124 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
125 shp->mlock_user);
126 fput (shp->shm_file);
127 security_shm_free(shp);
128 ipc_rcu_putref(shp);
129}
130
131/*
132 * remove the attach descriptor shmd.
133 * free memory for segment if it is marked destroyed.
134 * The descriptor has already been removed from the current->mm->mmap list
135 * and will later be kfree()d.
136 */
137static void shm_close (struct vm_area_struct *shmd)
138{
139 struct file * file = shmd->vm_file;
140 int id = file->f_dentry->d_inode->i_ino;
141 struct shmid_kernel *shp;
142
143 down (&shm_ids.sem);
144 /* remove from the list of attaches of the shm segment */
145 if(!(shp = shm_lock(id)))
146 BUG();
147 shp->shm_lprid = current->tgid;
148 shp->shm_dtim = get_seconds();
149 shp->shm_nattch--;
150 if(shp->shm_nattch == 0 &&
151 shp->shm_flags & SHM_DEST)
152 shm_destroy (shp);
153 else
154 shm_unlock(shp);
155 up (&shm_ids.sem);
156}
157
158static int shm_mmap(struct file * file, struct vm_area_struct * vma)
159{
160 file_accessed(file);
161 vma->vm_ops = &shm_vm_ops;
162 shm_inc(file->f_dentry->d_inode->i_ino);
163 return 0;
164}
165
166static struct file_operations shm_file_operations = {
167 .mmap = shm_mmap
168};
169
170static struct vm_operations_struct shm_vm_ops = {
171 .open = shm_open, /* callback for a new vm-area open */
172 .close = shm_close, /* callback for when the vm-area is released */
173 .nopage = shmem_nopage,
Andrew Morton6ade43f2005-08-01 21:11:45 -0700174#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 .set_policy = shmem_set_policy,
176 .get_policy = shmem_get_policy,
177#endif
178};
179
180static int newseg (key_t key, int shmflg, size_t size)
181{
182 int error;
183 struct shmid_kernel *shp;
184 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
185 struct file * file;
186 char name[13];
187 int id;
188
189 if (size < SHMMIN || size > shm_ctlmax)
190 return -EINVAL;
191
192 if (shm_tot + numpages >= shm_ctlall)
193 return -ENOSPC;
194
195 shp = ipc_rcu_alloc(sizeof(*shp));
196 if (!shp)
197 return -ENOMEM;
198
199 shp->shm_perm.key = key;
200 shp->shm_flags = (shmflg & S_IRWXUGO);
201 shp->mlock_user = NULL;
202
203 shp->shm_perm.security = NULL;
204 error = security_shm_alloc(shp);
205 if (error) {
206 ipc_rcu_putref(shp);
207 return error;
208 }
209
210 if (shmflg & SHM_HUGETLB) {
211 /* hugetlb_zero_setup takes care of mlock user accounting */
212 file = hugetlb_zero_setup(size);
213 shp->mlock_user = current->user;
214 } else {
215 sprintf (name, "SYSV%08x", key);
216 file = shmem_file_setup(name, size, VM_ACCOUNT);
217 }
218 error = PTR_ERR(file);
219 if (IS_ERR(file))
220 goto no_file;
221
222 error = -ENOSPC;
223 id = shm_addid(shp);
224 if(id == -1)
225 goto no_id;
226
227 shp->shm_cprid = current->tgid;
228 shp->shm_lprid = 0;
229 shp->shm_atim = shp->shm_dtim = 0;
230 shp->shm_ctim = get_seconds();
231 shp->shm_segsz = size;
232 shp->shm_nattch = 0;
233 shp->id = shm_buildid(id,shp->shm_perm.seq);
234 shp->shm_file = file;
235 file->f_dentry->d_inode->i_ino = shp->id;
236 if (shmflg & SHM_HUGETLB)
237 set_file_hugepages(file);
238 else
239 file->f_op = &shm_file_operations;
240 shm_tot += numpages;
241 shm_unlock(shp);
242 return shp->id;
243
244no_id:
245 fput(file);
246no_file:
247 security_shm_free(shp);
248 ipc_rcu_putref(shp);
249 return error;
250}
251
252asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
253{
254 struct shmid_kernel *shp;
255 int err, id = 0;
256
257 down(&shm_ids.sem);
258 if (key == IPC_PRIVATE) {
259 err = newseg(key, shmflg, size);
260 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
261 if (!(shmflg & IPC_CREAT))
262 err = -ENOENT;
263 else
264 err = newseg(key, shmflg, size);
265 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
266 err = -EEXIST;
267 } else {
268 shp = shm_lock(id);
269 if(shp==NULL)
270 BUG();
271 if (shp->shm_segsz < size)
272 err = -EINVAL;
273 else if (ipcperms(&shp->shm_perm, shmflg))
274 err = -EACCES;
275 else {
276 int shmid = shm_buildid(id, shp->shm_perm.seq);
277 err = security_shm_associate(shp, shmflg);
278 if (!err)
279 err = shmid;
280 }
281 shm_unlock(shp);
282 }
283 up(&shm_ids.sem);
284
285 return err;
286}
287
288static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
289{
290 switch(version) {
291 case IPC_64:
292 return copy_to_user(buf, in, sizeof(*in));
293 case IPC_OLD:
294 {
295 struct shmid_ds out;
296
297 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
298 out.shm_segsz = in->shm_segsz;
299 out.shm_atime = in->shm_atime;
300 out.shm_dtime = in->shm_dtime;
301 out.shm_ctime = in->shm_ctime;
302 out.shm_cpid = in->shm_cpid;
303 out.shm_lpid = in->shm_lpid;
304 out.shm_nattch = in->shm_nattch;
305
306 return copy_to_user(buf, &out, sizeof(out));
307 }
308 default:
309 return -EINVAL;
310 }
311}
312
313struct shm_setbuf {
314 uid_t uid;
315 gid_t gid;
316 mode_t mode;
317};
318
319static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
320{
321 switch(version) {
322 case IPC_64:
323 {
324 struct shmid64_ds tbuf;
325
326 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
327 return -EFAULT;
328
329 out->uid = tbuf.shm_perm.uid;
330 out->gid = tbuf.shm_perm.gid;
331 out->mode = tbuf.shm_flags;
332
333 return 0;
334 }
335 case IPC_OLD:
336 {
337 struct shmid_ds tbuf_old;
338
339 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
340 return -EFAULT;
341
342 out->uid = tbuf_old.shm_perm.uid;
343 out->gid = tbuf_old.shm_perm.gid;
344 out->mode = tbuf_old.shm_flags;
345
346 return 0;
347 }
348 default:
349 return -EINVAL;
350 }
351}
352
353static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
354{
355 switch(version) {
356 case IPC_64:
357 return copy_to_user(buf, in, sizeof(*in));
358 case IPC_OLD:
359 {
360 struct shminfo out;
361
362 if(in->shmmax > INT_MAX)
363 out.shmmax = INT_MAX;
364 else
365 out.shmmax = (int)in->shmmax;
366
367 out.shmmin = in->shmmin;
368 out.shmmni = in->shmmni;
369 out.shmseg = in->shmseg;
370 out.shmall = in->shmall;
371
372 return copy_to_user(buf, &out, sizeof(out));
373 }
374 default:
375 return -EINVAL;
376 }
377}
378
379static void shm_get_stat(unsigned long *rss, unsigned long *swp)
380{
381 int i;
382
383 *rss = 0;
384 *swp = 0;
385
386 for (i = 0; i <= shm_ids.max_id; i++) {
387 struct shmid_kernel *shp;
388 struct inode *inode;
389
390 shp = shm_get(i);
391 if(!shp)
392 continue;
393
394 inode = shp->shm_file->f_dentry->d_inode;
395
396 if (is_file_hugepages(shp->shm_file)) {
397 struct address_space *mapping = inode->i_mapping;
398 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
399 } else {
400 struct shmem_inode_info *info = SHMEM_I(inode);
401 spin_lock(&info->lock);
402 *rss += inode->i_mapping->nrpages;
403 *swp += info->swapped;
404 spin_unlock(&info->lock);
405 }
406 }
407}
408
409asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
410{
411 struct shm_setbuf setbuf;
412 struct shmid_kernel *shp;
413 int err, version;
414
415 if (cmd < 0 || shmid < 0) {
416 err = -EINVAL;
417 goto out;
418 }
419
420 version = ipc_parse_version(&cmd);
421
422 switch (cmd) { /* replace with proc interface ? */
423 case IPC_INFO:
424 {
425 struct shminfo64 shminfo;
426
427 err = security_shm_shmctl(NULL, cmd);
428 if (err)
429 return err;
430
431 memset(&shminfo,0,sizeof(shminfo));
432 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
433 shminfo.shmmax = shm_ctlmax;
434 shminfo.shmall = shm_ctlall;
435
436 shminfo.shmmin = SHMMIN;
437 if(copy_shminfo_to_user (buf, &shminfo, version))
438 return -EFAULT;
439 /* reading a integer is always atomic */
440 err= shm_ids.max_id;
441 if(err<0)
442 err = 0;
443 goto out;
444 }
445 case SHM_INFO:
446 {
447 struct shm_info shm_info;
448
449 err = security_shm_shmctl(NULL, cmd);
450 if (err)
451 return err;
452
453 memset(&shm_info,0,sizeof(shm_info));
454 down(&shm_ids.sem);
455 shm_info.used_ids = shm_ids.in_use;
456 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
457 shm_info.shm_tot = shm_tot;
458 shm_info.swap_attempts = 0;
459 shm_info.swap_successes = 0;
460 err = shm_ids.max_id;
461 up(&shm_ids.sem);
462 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
463 err = -EFAULT;
464 goto out;
465 }
466
467 err = err < 0 ? 0 : err;
468 goto out;
469 }
470 case SHM_STAT:
471 case IPC_STAT:
472 {
473 struct shmid64_ds tbuf;
474 int result;
475 memset(&tbuf, 0, sizeof(tbuf));
476 shp = shm_lock(shmid);
477 if(shp==NULL) {
478 err = -EINVAL;
479 goto out;
480 } else if(cmd==SHM_STAT) {
481 err = -EINVAL;
482 if (shmid > shm_ids.max_id)
483 goto out_unlock;
484 result = shm_buildid(shmid, shp->shm_perm.seq);
485 } else {
486 err = shm_checkid(shp,shmid);
487 if(err)
488 goto out_unlock;
489 result = 0;
490 }
491 err=-EACCES;
492 if (ipcperms (&shp->shm_perm, S_IRUGO))
493 goto out_unlock;
494 err = security_shm_shmctl(shp, cmd);
495 if (err)
496 goto out_unlock;
497 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
498 tbuf.shm_segsz = shp->shm_segsz;
499 tbuf.shm_atime = shp->shm_atim;
500 tbuf.shm_dtime = shp->shm_dtim;
501 tbuf.shm_ctime = shp->shm_ctim;
502 tbuf.shm_cpid = shp->shm_cprid;
503 tbuf.shm_lpid = shp->shm_lprid;
504 if (!is_file_hugepages(shp->shm_file))
505 tbuf.shm_nattch = shp->shm_nattch;
506 else
507 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
508 shm_unlock(shp);
509 if(copy_shmid_to_user (buf, &tbuf, version))
510 err = -EFAULT;
511 else
512 err = result;
513 goto out;
514 }
515 case SHM_LOCK:
516 case SHM_UNLOCK:
517 {
518 shp = shm_lock(shmid);
519 if(shp==NULL) {
520 err = -EINVAL;
521 goto out;
522 }
523 err = shm_checkid(shp,shmid);
524 if(err)
525 goto out_unlock;
526
527 if (!capable(CAP_IPC_LOCK)) {
528 err = -EPERM;
529 if (current->euid != shp->shm_perm.uid &&
530 current->euid != shp->shm_perm.cuid)
531 goto out_unlock;
532 if (cmd == SHM_LOCK &&
533 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
534 goto out_unlock;
535 }
536
537 err = security_shm_shmctl(shp, cmd);
538 if (err)
539 goto out_unlock;
540
541 if(cmd==SHM_LOCK) {
542 struct user_struct * user = current->user;
543 if (!is_file_hugepages(shp->shm_file)) {
544 err = shmem_lock(shp->shm_file, 1, user);
545 if (!err) {
546 shp->shm_flags |= SHM_LOCKED;
547 shp->mlock_user = user;
548 }
549 }
550 } else if (!is_file_hugepages(shp->shm_file)) {
551 shmem_lock(shp->shm_file, 0, shp->mlock_user);
552 shp->shm_flags &= ~SHM_LOCKED;
553 shp->mlock_user = NULL;
554 }
555 shm_unlock(shp);
556 goto out;
557 }
558 case IPC_RMID:
559 {
560 /*
561 * We cannot simply remove the file. The SVID states
562 * that the block remains until the last person
563 * detaches from it, then is deleted. A shmat() on
564 * an RMID segment is legal in older Linux and if
565 * we change it apps break...
566 *
567 * Instead we set a destroyed flag, and then blow
568 * the name away when the usage hits zero.
569 */
570 down(&shm_ids.sem);
571 shp = shm_lock(shmid);
572 err = -EINVAL;
573 if (shp == NULL)
574 goto out_up;
575 err = shm_checkid(shp, shmid);
576 if(err)
577 goto out_unlock_up;
578
579 if (current->euid != shp->shm_perm.uid &&
580 current->euid != shp->shm_perm.cuid &&
581 !capable(CAP_SYS_ADMIN)) {
582 err=-EPERM;
583 goto out_unlock_up;
584 }
585
586 err = security_shm_shmctl(shp, cmd);
587 if (err)
588 goto out_unlock_up;
589
590 if (shp->shm_nattch){
591 shp->shm_flags |= SHM_DEST;
592 /* Do not find it any more */
593 shp->shm_perm.key = IPC_PRIVATE;
594 shm_unlock(shp);
595 } else
596 shm_destroy (shp);
597 up(&shm_ids.sem);
598 goto out;
599 }
600
601 case IPC_SET:
602 {
603 if (copy_shmid_from_user (&setbuf, buf, version)) {
604 err = -EFAULT;
605 goto out;
606 }
607 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode)))
608 return err;
609 down(&shm_ids.sem);
610 shp = shm_lock(shmid);
611 err=-EINVAL;
612 if(shp==NULL)
613 goto out_up;
614 err = shm_checkid(shp,shmid);
615 if(err)
616 goto out_unlock_up;
617 err=-EPERM;
618 if (current->euid != shp->shm_perm.uid &&
619 current->euid != shp->shm_perm.cuid &&
620 !capable(CAP_SYS_ADMIN)) {
621 goto out_unlock_up;
622 }
623
624 err = security_shm_shmctl(shp, cmd);
625 if (err)
626 goto out_unlock_up;
627
628 shp->shm_perm.uid = setbuf.uid;
629 shp->shm_perm.gid = setbuf.gid;
630 shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
631 | (setbuf.mode & S_IRWXUGO);
632 shp->shm_ctim = get_seconds();
633 break;
634 }
635
636 default:
637 err = -EINVAL;
638 goto out;
639 }
640
641 err = 0;
642out_unlock_up:
643 shm_unlock(shp);
644out_up:
645 up(&shm_ids.sem);
646 goto out;
647out_unlock:
648 shm_unlock(shp);
649out:
650 return err;
651}
652
653/*
654 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
655 *
656 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
657 * "raddr" thing points to kernel space, and there has to be a wrapper around
658 * this.
659 */
660long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
661{
662 struct shmid_kernel *shp;
663 unsigned long addr;
664 unsigned long size;
665 struct file * file;
666 int err;
667 unsigned long flags;
668 unsigned long prot;
669 unsigned long o_flags;
670 int acc_mode;
671 void *user_addr;
672
673 if (shmid < 0) {
674 err = -EINVAL;
675 goto out;
676 } else if ((addr = (ulong)shmaddr)) {
677 if (addr & (SHMLBA-1)) {
678 if (shmflg & SHM_RND)
679 addr &= ~(SHMLBA-1); /* round down */
680 else
681#ifndef __ARCH_FORCE_SHMLBA
682 if (addr & ~PAGE_MASK)
683#endif
684 return -EINVAL;
685 }
686 flags = MAP_SHARED | MAP_FIXED;
687 } else {
688 if ((shmflg & SHM_REMAP))
689 return -EINVAL;
690
691 flags = MAP_SHARED;
692 }
693
694 if (shmflg & SHM_RDONLY) {
695 prot = PROT_READ;
696 o_flags = O_RDONLY;
697 acc_mode = S_IRUGO;
698 } else {
699 prot = PROT_READ | PROT_WRITE;
700 o_flags = O_RDWR;
701 acc_mode = S_IRUGO | S_IWUGO;
702 }
703 if (shmflg & SHM_EXEC) {
704 prot |= PROT_EXEC;
705 acc_mode |= S_IXUGO;
706 }
707
708 /*
709 * We cannot rely on the fs check since SYSV IPC does have an
710 * additional creator id...
711 */
712 shp = shm_lock(shmid);
713 if(shp == NULL) {
714 err = -EINVAL;
715 goto out;
716 }
717 err = shm_checkid(shp,shmid);
718 if (err) {
719 shm_unlock(shp);
720 goto out;
721 }
722 if (ipcperms(&shp->shm_perm, acc_mode)) {
723 shm_unlock(shp);
724 err = -EACCES;
725 goto out;
726 }
727
728 err = security_shm_shmat(shp, shmaddr, shmflg);
729 if (err) {
730 shm_unlock(shp);
731 return err;
732 }
733
734 file = shp->shm_file;
735 size = i_size_read(file->f_dentry->d_inode);
736 shp->shm_nattch++;
737 shm_unlock(shp);
738
739 down_write(&current->mm->mmap_sem);
740 if (addr && !(shmflg & SHM_REMAP)) {
741 user_addr = ERR_PTR(-EINVAL);
742 if (find_vma_intersection(current->mm, addr, addr + size))
743 goto invalid;
744 /*
745 * If shm segment goes below stack, make sure there is some
746 * space left for the stack to grow (at least 4 pages).
747 */
748 if (addr < current->mm->start_stack &&
749 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
750 goto invalid;
751 }
752
753 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
754
755invalid:
756 up_write(&current->mm->mmap_sem);
757
758 down (&shm_ids.sem);
759 if(!(shp = shm_lock(shmid)))
760 BUG();
761 shp->shm_nattch--;
762 if(shp->shm_nattch == 0 &&
763 shp->shm_flags & SHM_DEST)
764 shm_destroy (shp);
765 else
766 shm_unlock(shp);
767 up (&shm_ids.sem);
768
769 *raddr = (unsigned long) user_addr;
770 err = 0;
771 if (IS_ERR(user_addr))
772 err = PTR_ERR(user_addr);
773out:
774 return err;
775}
776
Stephen Rothwell7d87e142005-05-01 08:59:12 -0700777asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
778{
779 unsigned long ret;
780 long err;
781
782 err = do_shmat(shmid, shmaddr, shmflg, &ret);
783 if (err)
784 return err;
785 force_successful_syscall_return();
786 return (long)ret;
787}
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789/*
790 * detach and kill segment if marked destroyed.
791 * The work is done in shm_close.
792 */
793asmlinkage long sys_shmdt(char __user *shmaddr)
794{
795 struct mm_struct *mm = current->mm;
796 struct vm_area_struct *vma, *next;
797 unsigned long addr = (unsigned long)shmaddr;
798 loff_t size = 0;
799 int retval = -EINVAL;
800
801 down_write(&mm->mmap_sem);
802
803 /*
804 * This function tries to be smart and unmap shm segments that
805 * were modified by partial mlock or munmap calls:
806 * - It first determines the size of the shm segment that should be
807 * unmapped: It searches for a vma that is backed by shm and that
808 * started at address shmaddr. It records it's size and then unmaps
809 * it.
810 * - Then it unmaps all shm vmas that started at shmaddr and that
811 * are within the initially determined size.
812 * Errors from do_munmap are ignored: the function only fails if
813 * it's called with invalid parameters or if it's called to unmap
814 * a part of a vma. Both calls in this function are for full vmas,
815 * the parameters are directly copied from the vma itself and always
816 * valid - therefore do_munmap cannot fail. (famous last words?)
817 */
818 /*
819 * If it had been mremap()'d, the starting address would not
820 * match the usual checks anyway. So assume all vma's are
821 * above the starting address given.
822 */
823 vma = find_vma(mm, addr);
824
825 while (vma) {
826 next = vma->vm_next;
827
828 /*
829 * Check if the starting address would match, i.e. it's
830 * a fragment created by mprotect() and/or munmap(), or it
831 * otherwise it starts at this address with no hassles.
832 */
833 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
834 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
835
836
837 size = vma->vm_file->f_dentry->d_inode->i_size;
838 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
839 /*
840 * We discovered the size of the shm segment, so
841 * break out of here and fall through to the next
842 * loop that uses the size information to stop
843 * searching for matching vma's.
844 */
845 retval = 0;
846 vma = next;
847 break;
848 }
849 vma = next;
850 }
851
852 /*
853 * We need look no further than the maximum address a fragment
854 * could possibly have landed at. Also cast things to loff_t to
855 * prevent overflows and make comparisions vs. equal-width types.
856 */
857 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
858 next = vma->vm_next;
859
860 /* finding a matching vma now does not alter retval */
861 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
862 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
863
864 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
865 vma = next;
866 }
867
868 up_write(&mm->mmap_sem);
869 return retval;
870}
871
872#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -0700873static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
Mike Waychison19b49462005-09-06 15:17:10 -0700875 struct shmid_kernel *shp = it;
876 char *format;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
879#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Mike Waychison19b49462005-09-06 15:17:10 -0700881 if (sizeof(size_t) <= sizeof(int))
882 format = SMALL_STRING;
883 else
884 format = BIG_STRING;
885 return seq_printf(s, format,
886 shp->shm_perm.key,
887 shp->id,
888 shp->shm_flags,
889 shp->shm_segsz,
890 shp->shm_cprid,
891 shp->shm_lprid,
892 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
893 shp->shm_perm.uid,
894 shp->shm_perm.gid,
895 shp->shm_perm.cuid,
896 shp->shm_perm.cgid,
897 shp->shm_atim,
898 shp->shm_dtim,
899 shp->shm_ctim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900}
901#endif