blob: 9162123a7b23c348aca8669c56ea56bfbeacadb6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/shm.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/shmem_fs.h>
27#include <linux/security.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080030#include <linux/capability.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070031#include <linux/ptrace.h>
Mike Waychison19b49462005-09-06 15:17:10 -070032#include <linux/seq_file.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/uaccess.h>
35
36#include "util.h"
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static struct file_operations shm_file_operations;
39static struct vm_operations_struct shm_vm_ops;
40
41static struct ipc_ids shm_ids;
42
43#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
44#define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
45#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
46#define shm_buildid(id, seq) \
47 ipc_buildid(&shm_ids, id, seq)
48
49static int newseg (key_t key, int shmflg, size_t size);
50static void shm_open (struct vm_area_struct *shmd);
51static void shm_close (struct vm_area_struct *shmd);
52#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -070053static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#endif
55
56size_t shm_ctlmax = SHMMAX;
57size_t shm_ctlall = SHMALL;
58int shm_ctlmni = SHMMNI;
59
60static int shm_tot; /* total number of shared memory pages */
61
62void __init shm_init (void)
63{
64 ipc_init_ids(&shm_ids, 1);
Mike Waychison19b49462005-09-06 15:17:10 -070065 ipc_init_proc_interface("sysvipc/shm",
66 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
67 &shm_ids,
68 sysvipc_shm_proc_show);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
71static inline int shm_checkid(struct shmid_kernel *s, int id)
72{
73 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
74 return -EIDRM;
75 return 0;
76}
77
78static inline struct shmid_kernel *shm_rmid(int id)
79{
80 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
81}
82
83static inline int shm_addid(struct shmid_kernel *shp)
84{
85 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
86}
87
88
89
90static inline void shm_inc (int id) {
91 struct shmid_kernel *shp;
92
93 if(!(shp = shm_lock(id)))
94 BUG();
95 shp->shm_atim = get_seconds();
96 shp->shm_lprid = current->tgid;
97 shp->shm_nattch++;
98 shm_unlock(shp);
99}
100
101/* This is called by fork, once for every shm attach. */
102static void shm_open (struct vm_area_struct *shmd)
103{
104 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
105}
106
107/*
108 * shm_destroy - free the struct shmid_kernel
109 *
110 * @shp: struct to free
111 *
112 * It has to be called with shp and shm_ids.sem locked,
113 * but returns with shp unlocked and freed.
114 */
115static void shm_destroy (struct shmid_kernel *shp)
116{
117 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
118 shm_rmid (shp->id);
119 shm_unlock(shp);
120 if (!is_file_hugepages(shp->shm_file))
121 shmem_lock(shp->shm_file, 0, shp->mlock_user);
122 else
123 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
124 shp->mlock_user);
125 fput (shp->shm_file);
126 security_shm_free(shp);
127 ipc_rcu_putref(shp);
128}
129
130/*
131 * remove the attach descriptor shmd.
132 * free memory for segment if it is marked destroyed.
133 * The descriptor has already been removed from the current->mm->mmap list
134 * and will later be kfree()d.
135 */
136static void shm_close (struct vm_area_struct *shmd)
137{
138 struct file * file = shmd->vm_file;
139 int id = file->f_dentry->d_inode->i_ino;
140 struct shmid_kernel *shp;
141
142 down (&shm_ids.sem);
143 /* remove from the list of attaches of the shm segment */
144 if(!(shp = shm_lock(id)))
145 BUG();
146 shp->shm_lprid = current->tgid;
147 shp->shm_dtim = get_seconds();
148 shp->shm_nattch--;
149 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800150 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 shm_destroy (shp);
152 else
153 shm_unlock(shp);
154 up (&shm_ids.sem);
155}
156
157static int shm_mmap(struct file * file, struct vm_area_struct * vma)
158{
David Howellsb0e15192006-01-06 00:11:42 -0800159 int ret;
160
161 ret = shmem_mmap(file, vma);
162 if (ret == 0) {
163 vma->vm_ops = &shm_vm_ops;
164 shm_inc(file->f_dentry->d_inode->i_ino);
165 }
166
167 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
170static struct file_operations shm_file_operations = {
David Howellsb0e15192006-01-06 00:11:42 -0800171 .mmap = shm_mmap,
172#ifndef CONFIG_MMU
173 .get_unmapped_area = shmem_get_unmapped_area,
174#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175};
176
177static struct vm_operations_struct shm_vm_ops = {
178 .open = shm_open, /* callback for a new vm-area open */
179 .close = shm_close, /* callback for when the vm-area is released */
180 .nopage = shmem_nopage,
Andrew Morton6ade43f2005-08-01 21:11:45 -0700181#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 .set_policy = shmem_set_policy,
183 .get_policy = shmem_get_policy,
184#endif
185};
186
187static int newseg (key_t key, int shmflg, size_t size)
188{
189 int error;
190 struct shmid_kernel *shp;
191 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
192 struct file * file;
193 char name[13];
194 int id;
195
196 if (size < SHMMIN || size > shm_ctlmax)
197 return -EINVAL;
198
199 if (shm_tot + numpages >= shm_ctlall)
200 return -ENOSPC;
201
202 shp = ipc_rcu_alloc(sizeof(*shp));
203 if (!shp)
204 return -ENOMEM;
205
206 shp->shm_perm.key = key;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800207 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 shp->mlock_user = NULL;
209
210 shp->shm_perm.security = NULL;
211 error = security_shm_alloc(shp);
212 if (error) {
213 ipc_rcu_putref(shp);
214 return error;
215 }
216
217 if (shmflg & SHM_HUGETLB) {
218 /* hugetlb_zero_setup takes care of mlock user accounting */
219 file = hugetlb_zero_setup(size);
220 shp->mlock_user = current->user;
221 } else {
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800222 int acctflag = VM_ACCOUNT;
223 /*
224 * Do not allow no accounting for OVERCOMMIT_NEVER, even
225 * if it's asked for.
226 */
227 if ((shmflg & SHM_NORESERVE) &&
228 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
229 acctflag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 sprintf (name, "SYSV%08x", key);
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800231 file = shmem_file_setup(name, size, acctflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
233 error = PTR_ERR(file);
234 if (IS_ERR(file))
235 goto no_file;
236
237 error = -ENOSPC;
238 id = shm_addid(shp);
239 if(id == -1)
240 goto no_id;
241
242 shp->shm_cprid = current->tgid;
243 shp->shm_lprid = 0;
244 shp->shm_atim = shp->shm_dtim = 0;
245 shp->shm_ctim = get_seconds();
246 shp->shm_segsz = size;
247 shp->shm_nattch = 0;
248 shp->id = shm_buildid(id,shp->shm_perm.seq);
249 shp->shm_file = file;
250 file->f_dentry->d_inode->i_ino = shp->id;
Krishnakumar R551110a2005-10-29 18:16:45 -0700251
252 /* Hugetlb ops would have already been assigned. */
253 if (!(shmflg & SHM_HUGETLB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 file->f_op = &shm_file_operations;
Krishnakumar R551110a2005-10-29 18:16:45 -0700255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 shm_tot += numpages;
257 shm_unlock(shp);
258 return shp->id;
259
260no_id:
261 fput(file);
262no_file:
263 security_shm_free(shp);
264 ipc_rcu_putref(shp);
265 return error;
266}
267
268asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
269{
270 struct shmid_kernel *shp;
271 int err, id = 0;
272
273 down(&shm_ids.sem);
274 if (key == IPC_PRIVATE) {
275 err = newseg(key, shmflg, size);
276 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
277 if (!(shmflg & IPC_CREAT))
278 err = -ENOENT;
279 else
280 err = newseg(key, shmflg, size);
281 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
282 err = -EEXIST;
283 } else {
284 shp = shm_lock(id);
285 if(shp==NULL)
286 BUG();
287 if (shp->shm_segsz < size)
288 err = -EINVAL;
289 else if (ipcperms(&shp->shm_perm, shmflg))
290 err = -EACCES;
291 else {
292 int shmid = shm_buildid(id, shp->shm_perm.seq);
293 err = security_shm_associate(shp, shmflg);
294 if (!err)
295 err = shmid;
296 }
297 shm_unlock(shp);
298 }
299 up(&shm_ids.sem);
300
301 return err;
302}
303
304static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
305{
306 switch(version) {
307 case IPC_64:
308 return copy_to_user(buf, in, sizeof(*in));
309 case IPC_OLD:
310 {
311 struct shmid_ds out;
312
313 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
314 out.shm_segsz = in->shm_segsz;
315 out.shm_atime = in->shm_atime;
316 out.shm_dtime = in->shm_dtime;
317 out.shm_ctime = in->shm_ctime;
318 out.shm_cpid = in->shm_cpid;
319 out.shm_lpid = in->shm_lpid;
320 out.shm_nattch = in->shm_nattch;
321
322 return copy_to_user(buf, &out, sizeof(out));
323 }
324 default:
325 return -EINVAL;
326 }
327}
328
329struct shm_setbuf {
330 uid_t uid;
331 gid_t gid;
332 mode_t mode;
333};
334
335static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
336{
337 switch(version) {
338 case IPC_64:
339 {
340 struct shmid64_ds tbuf;
341
342 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
343 return -EFAULT;
344
345 out->uid = tbuf.shm_perm.uid;
346 out->gid = tbuf.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800347 out->mode = tbuf.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 return 0;
350 }
351 case IPC_OLD:
352 {
353 struct shmid_ds tbuf_old;
354
355 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
356 return -EFAULT;
357
358 out->uid = tbuf_old.shm_perm.uid;
359 out->gid = tbuf_old.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800360 out->mode = tbuf_old.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 return 0;
363 }
364 default:
365 return -EINVAL;
366 }
367}
368
369static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
370{
371 switch(version) {
372 case IPC_64:
373 return copy_to_user(buf, in, sizeof(*in));
374 case IPC_OLD:
375 {
376 struct shminfo out;
377
378 if(in->shmmax > INT_MAX)
379 out.shmmax = INT_MAX;
380 else
381 out.shmmax = (int)in->shmmax;
382
383 out.shmmin = in->shmmin;
384 out.shmmni = in->shmmni;
385 out.shmseg = in->shmseg;
386 out.shmall = in->shmall;
387
388 return copy_to_user(buf, &out, sizeof(out));
389 }
390 default:
391 return -EINVAL;
392 }
393}
394
395static void shm_get_stat(unsigned long *rss, unsigned long *swp)
396{
397 int i;
398
399 *rss = 0;
400 *swp = 0;
401
402 for (i = 0; i <= shm_ids.max_id; i++) {
403 struct shmid_kernel *shp;
404 struct inode *inode;
405
406 shp = shm_get(i);
407 if(!shp)
408 continue;
409
410 inode = shp->shm_file->f_dentry->d_inode;
411
412 if (is_file_hugepages(shp->shm_file)) {
413 struct address_space *mapping = inode->i_mapping;
414 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
415 } else {
416 struct shmem_inode_info *info = SHMEM_I(inode);
417 spin_lock(&info->lock);
418 *rss += inode->i_mapping->nrpages;
419 *swp += info->swapped;
420 spin_unlock(&info->lock);
421 }
422 }
423}
424
425asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
426{
427 struct shm_setbuf setbuf;
428 struct shmid_kernel *shp;
429 int err, version;
430
431 if (cmd < 0 || shmid < 0) {
432 err = -EINVAL;
433 goto out;
434 }
435
436 version = ipc_parse_version(&cmd);
437
438 switch (cmd) { /* replace with proc interface ? */
439 case IPC_INFO:
440 {
441 struct shminfo64 shminfo;
442
443 err = security_shm_shmctl(NULL, cmd);
444 if (err)
445 return err;
446
447 memset(&shminfo,0,sizeof(shminfo));
448 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
449 shminfo.shmmax = shm_ctlmax;
450 shminfo.shmall = shm_ctlall;
451
452 shminfo.shmmin = SHMMIN;
453 if(copy_shminfo_to_user (buf, &shminfo, version))
454 return -EFAULT;
455 /* reading a integer is always atomic */
456 err= shm_ids.max_id;
457 if(err<0)
458 err = 0;
459 goto out;
460 }
461 case SHM_INFO:
462 {
463 struct shm_info shm_info;
464
465 err = security_shm_shmctl(NULL, cmd);
466 if (err)
467 return err;
468
469 memset(&shm_info,0,sizeof(shm_info));
470 down(&shm_ids.sem);
471 shm_info.used_ids = shm_ids.in_use;
472 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
473 shm_info.shm_tot = shm_tot;
474 shm_info.swap_attempts = 0;
475 shm_info.swap_successes = 0;
476 err = shm_ids.max_id;
477 up(&shm_ids.sem);
478 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
479 err = -EFAULT;
480 goto out;
481 }
482
483 err = err < 0 ? 0 : err;
484 goto out;
485 }
486 case SHM_STAT:
487 case IPC_STAT:
488 {
489 struct shmid64_ds tbuf;
490 int result;
491 memset(&tbuf, 0, sizeof(tbuf));
492 shp = shm_lock(shmid);
493 if(shp==NULL) {
494 err = -EINVAL;
495 goto out;
496 } else if(cmd==SHM_STAT) {
497 err = -EINVAL;
498 if (shmid > shm_ids.max_id)
499 goto out_unlock;
500 result = shm_buildid(shmid, shp->shm_perm.seq);
501 } else {
502 err = shm_checkid(shp,shmid);
503 if(err)
504 goto out_unlock;
505 result = 0;
506 }
507 err=-EACCES;
508 if (ipcperms (&shp->shm_perm, S_IRUGO))
509 goto out_unlock;
510 err = security_shm_shmctl(shp, cmd);
511 if (err)
512 goto out_unlock;
513 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
514 tbuf.shm_segsz = shp->shm_segsz;
515 tbuf.shm_atime = shp->shm_atim;
516 tbuf.shm_dtime = shp->shm_dtim;
517 tbuf.shm_ctime = shp->shm_ctim;
518 tbuf.shm_cpid = shp->shm_cprid;
519 tbuf.shm_lpid = shp->shm_lprid;
520 if (!is_file_hugepages(shp->shm_file))
521 tbuf.shm_nattch = shp->shm_nattch;
522 else
523 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
524 shm_unlock(shp);
525 if(copy_shmid_to_user (buf, &tbuf, version))
526 err = -EFAULT;
527 else
528 err = result;
529 goto out;
530 }
531 case SHM_LOCK:
532 case SHM_UNLOCK:
533 {
534 shp = shm_lock(shmid);
535 if(shp==NULL) {
536 err = -EINVAL;
537 goto out;
538 }
539 err = shm_checkid(shp,shmid);
540 if(err)
541 goto out_unlock;
542
543 if (!capable(CAP_IPC_LOCK)) {
544 err = -EPERM;
545 if (current->euid != shp->shm_perm.uid &&
546 current->euid != shp->shm_perm.cuid)
547 goto out_unlock;
548 if (cmd == SHM_LOCK &&
549 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
550 goto out_unlock;
551 }
552
553 err = security_shm_shmctl(shp, cmd);
554 if (err)
555 goto out_unlock;
556
557 if(cmd==SHM_LOCK) {
558 struct user_struct * user = current->user;
559 if (!is_file_hugepages(shp->shm_file)) {
560 err = shmem_lock(shp->shm_file, 1, user);
561 if (!err) {
Andrew Mortonb33291c2006-01-08 01:02:21 -0800562 shp->shm_perm.mode |= SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 shp->mlock_user = user;
564 }
565 }
566 } else if (!is_file_hugepages(shp->shm_file)) {
567 shmem_lock(shp->shm_file, 0, shp->mlock_user);
Andrew Mortonb33291c2006-01-08 01:02:21 -0800568 shp->shm_perm.mode &= ~SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 shp->mlock_user = NULL;
570 }
571 shm_unlock(shp);
572 goto out;
573 }
574 case IPC_RMID:
575 {
576 /*
577 * We cannot simply remove the file. The SVID states
578 * that the block remains until the last person
579 * detaches from it, then is deleted. A shmat() on
580 * an RMID segment is legal in older Linux and if
581 * we change it apps break...
582 *
583 * Instead we set a destroyed flag, and then blow
584 * the name away when the usage hits zero.
585 */
586 down(&shm_ids.sem);
587 shp = shm_lock(shmid);
588 err = -EINVAL;
589 if (shp == NULL)
590 goto out_up;
591 err = shm_checkid(shp, shmid);
592 if(err)
593 goto out_unlock_up;
594
595 if (current->euid != shp->shm_perm.uid &&
596 current->euid != shp->shm_perm.cuid &&
597 !capable(CAP_SYS_ADMIN)) {
598 err=-EPERM;
599 goto out_unlock_up;
600 }
601
602 err = security_shm_shmctl(shp, cmd);
603 if (err)
604 goto out_unlock_up;
605
606 if (shp->shm_nattch){
Andrew Mortonb33291c2006-01-08 01:02:21 -0800607 shp->shm_perm.mode |= SHM_DEST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 /* Do not find it any more */
609 shp->shm_perm.key = IPC_PRIVATE;
610 shm_unlock(shp);
611 } else
612 shm_destroy (shp);
613 up(&shm_ids.sem);
614 goto out;
615 }
616
617 case IPC_SET:
618 {
619 if (copy_shmid_from_user (&setbuf, buf, version)) {
620 err = -EFAULT;
621 goto out;
622 }
623 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode)))
624 return err;
625 down(&shm_ids.sem);
626 shp = shm_lock(shmid);
627 err=-EINVAL;
628 if(shp==NULL)
629 goto out_up;
630 err = shm_checkid(shp,shmid);
631 if(err)
632 goto out_unlock_up;
633 err=-EPERM;
634 if (current->euid != shp->shm_perm.uid &&
635 current->euid != shp->shm_perm.cuid &&
636 !capable(CAP_SYS_ADMIN)) {
637 goto out_unlock_up;
638 }
639
640 err = security_shm_shmctl(shp, cmd);
641 if (err)
642 goto out_unlock_up;
643
644 shp->shm_perm.uid = setbuf.uid;
645 shp->shm_perm.gid = setbuf.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800646 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 | (setbuf.mode & S_IRWXUGO);
648 shp->shm_ctim = get_seconds();
649 break;
650 }
651
652 default:
653 err = -EINVAL;
654 goto out;
655 }
656
657 err = 0;
658out_unlock_up:
659 shm_unlock(shp);
660out_up:
661 up(&shm_ids.sem);
662 goto out;
663out_unlock:
664 shm_unlock(shp);
665out:
666 return err;
667}
668
669/*
670 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
671 *
672 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
673 * "raddr" thing points to kernel space, and there has to be a wrapper around
674 * this.
675 */
676long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
677{
678 struct shmid_kernel *shp;
679 unsigned long addr;
680 unsigned long size;
681 struct file * file;
682 int err;
683 unsigned long flags;
684 unsigned long prot;
685 unsigned long o_flags;
686 int acc_mode;
687 void *user_addr;
688
689 if (shmid < 0) {
690 err = -EINVAL;
691 goto out;
692 } else if ((addr = (ulong)shmaddr)) {
693 if (addr & (SHMLBA-1)) {
694 if (shmflg & SHM_RND)
695 addr &= ~(SHMLBA-1); /* round down */
696 else
697#ifndef __ARCH_FORCE_SHMLBA
698 if (addr & ~PAGE_MASK)
699#endif
700 return -EINVAL;
701 }
702 flags = MAP_SHARED | MAP_FIXED;
703 } else {
704 if ((shmflg & SHM_REMAP))
705 return -EINVAL;
706
707 flags = MAP_SHARED;
708 }
709
710 if (shmflg & SHM_RDONLY) {
711 prot = PROT_READ;
712 o_flags = O_RDONLY;
713 acc_mode = S_IRUGO;
714 } else {
715 prot = PROT_READ | PROT_WRITE;
716 o_flags = O_RDWR;
717 acc_mode = S_IRUGO | S_IWUGO;
718 }
719 if (shmflg & SHM_EXEC) {
720 prot |= PROT_EXEC;
721 acc_mode |= S_IXUGO;
722 }
723
724 /*
725 * We cannot rely on the fs check since SYSV IPC does have an
726 * additional creator id...
727 */
728 shp = shm_lock(shmid);
729 if(shp == NULL) {
730 err = -EINVAL;
731 goto out;
732 }
733 err = shm_checkid(shp,shmid);
734 if (err) {
735 shm_unlock(shp);
736 goto out;
737 }
738 if (ipcperms(&shp->shm_perm, acc_mode)) {
739 shm_unlock(shp);
740 err = -EACCES;
741 goto out;
742 }
743
744 err = security_shm_shmat(shp, shmaddr, shmflg);
745 if (err) {
746 shm_unlock(shp);
747 return err;
748 }
749
750 file = shp->shm_file;
751 size = i_size_read(file->f_dentry->d_inode);
752 shp->shm_nattch++;
753 shm_unlock(shp);
754
755 down_write(&current->mm->mmap_sem);
756 if (addr && !(shmflg & SHM_REMAP)) {
757 user_addr = ERR_PTR(-EINVAL);
758 if (find_vma_intersection(current->mm, addr, addr + size))
759 goto invalid;
760 /*
761 * If shm segment goes below stack, make sure there is some
762 * space left for the stack to grow (at least 4 pages).
763 */
764 if (addr < current->mm->start_stack &&
765 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
766 goto invalid;
767 }
768
769 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
770
771invalid:
772 up_write(&current->mm->mmap_sem);
773
774 down (&shm_ids.sem);
775 if(!(shp = shm_lock(shmid)))
776 BUG();
777 shp->shm_nattch--;
778 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800779 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 shm_destroy (shp);
781 else
782 shm_unlock(shp);
783 up (&shm_ids.sem);
784
785 *raddr = (unsigned long) user_addr;
786 err = 0;
787 if (IS_ERR(user_addr))
788 err = PTR_ERR(user_addr);
789out:
790 return err;
791}
792
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -0700793asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
794{
795 unsigned long ret;
796 long err;
797
798 err = do_shmat(shmid, shmaddr, shmflg, &ret);
799 if (err)
800 return err;
801 force_successful_syscall_return();
802 return (long)ret;
803}
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805/*
806 * detach and kill segment if marked destroyed.
807 * The work is done in shm_close.
808 */
809asmlinkage long sys_shmdt(char __user *shmaddr)
810{
811 struct mm_struct *mm = current->mm;
812 struct vm_area_struct *vma, *next;
813 unsigned long addr = (unsigned long)shmaddr;
814 loff_t size = 0;
815 int retval = -EINVAL;
816
817 down_write(&mm->mmap_sem);
818
819 /*
820 * This function tries to be smart and unmap shm segments that
821 * were modified by partial mlock or munmap calls:
822 * - It first determines the size of the shm segment that should be
823 * unmapped: It searches for a vma that is backed by shm and that
824 * started at address shmaddr. It records it's size and then unmaps
825 * it.
826 * - Then it unmaps all shm vmas that started at shmaddr and that
827 * are within the initially determined size.
828 * Errors from do_munmap are ignored: the function only fails if
829 * it's called with invalid parameters or if it's called to unmap
830 * a part of a vma. Both calls in this function are for full vmas,
831 * the parameters are directly copied from the vma itself and always
832 * valid - therefore do_munmap cannot fail. (famous last words?)
833 */
834 /*
835 * If it had been mremap()'d, the starting address would not
836 * match the usual checks anyway. So assume all vma's are
837 * above the starting address given.
838 */
839 vma = find_vma(mm, addr);
840
841 while (vma) {
842 next = vma->vm_next;
843
844 /*
845 * Check if the starting address would match, i.e. it's
846 * a fragment created by mprotect() and/or munmap(), or it
847 * otherwise it starts at this address with no hassles.
848 */
849 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
850 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
851
852
853 size = vma->vm_file->f_dentry->d_inode->i_size;
854 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
855 /*
856 * We discovered the size of the shm segment, so
857 * break out of here and fall through to the next
858 * loop that uses the size information to stop
859 * searching for matching vma's.
860 */
861 retval = 0;
862 vma = next;
863 break;
864 }
865 vma = next;
866 }
867
868 /*
869 * We need look no further than the maximum address a fragment
870 * could possibly have landed at. Also cast things to loff_t to
871 * prevent overflows and make comparisions vs. equal-width types.
872 */
KAMEZAWA Hiroyuki8e367092006-02-10 01:51:12 -0800873 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
875 next = vma->vm_next;
876
877 /* finding a matching vma now does not alter retval */
878 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
879 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
880
881 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
882 vma = next;
883 }
884
885 up_write(&mm->mmap_sem);
886 return retval;
887}
888
889#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -0700890static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Mike Waychison19b49462005-09-06 15:17:10 -0700892 struct shmid_kernel *shp = it;
893 char *format;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
896#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Mike Waychison19b49462005-09-06 15:17:10 -0700898 if (sizeof(size_t) <= sizeof(int))
899 format = SMALL_STRING;
900 else
901 format = BIG_STRING;
902 return seq_printf(s, format,
903 shp->shm_perm.key,
904 shp->id,
Andrew Mortonb33291c2006-01-08 01:02:21 -0800905 shp->shm_perm.mode,
Mike Waychison19b49462005-09-06 15:17:10 -0700906 shp->shm_segsz,
907 shp->shm_cprid,
908 shp->shm_lprid,
909 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
910 shp->shm_perm.uid,
911 shp->shm_perm.gid,
912 shp->shm_perm.cuid,
913 shp->shm_perm.cgid,
914 shp->shm_atim,
915 shp->shm_dtim,
916 shp->shm_ctim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918#endif