blob: 0b92e874fc068fb1bc34f3e5940cfb45542f29f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/shm.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/shmem_fs.h>
27#include <linux/security.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070030#include <linux/ptrace.h>
Mike Waychison19b49462005-09-06 15:17:10 -070031#include <linux/seq_file.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/uaccess.h>
34
35#include "util.h"
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static struct file_operations shm_file_operations;
38static struct vm_operations_struct shm_vm_ops;
39
40static struct ipc_ids shm_ids;
41
42#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
43#define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
44#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
45#define shm_buildid(id, seq) \
46 ipc_buildid(&shm_ids, id, seq)
47
48static int newseg (key_t key, int shmflg, size_t size);
49static void shm_open (struct vm_area_struct *shmd);
50static void shm_close (struct vm_area_struct *shmd);
51#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -070052static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif
54
55size_t shm_ctlmax = SHMMAX;
56size_t shm_ctlall = SHMALL;
57int shm_ctlmni = SHMMNI;
58
59static int shm_tot; /* total number of shared memory pages */
60
61void __init shm_init (void)
62{
63 ipc_init_ids(&shm_ids, 1);
Mike Waychison19b49462005-09-06 15:17:10 -070064 ipc_init_proc_interface("sysvipc/shm",
65 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
66 &shm_ids,
67 sysvipc_shm_proc_show);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70static inline int shm_checkid(struct shmid_kernel *s, int id)
71{
72 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
73 return -EIDRM;
74 return 0;
75}
76
77static inline struct shmid_kernel *shm_rmid(int id)
78{
79 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
80}
81
82static inline int shm_addid(struct shmid_kernel *shp)
83{
84 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
85}
86
87
88
89static inline void shm_inc (int id) {
90 struct shmid_kernel *shp;
91
92 if(!(shp = shm_lock(id)))
93 BUG();
94 shp->shm_atim = get_seconds();
95 shp->shm_lprid = current->tgid;
96 shp->shm_nattch++;
97 shm_unlock(shp);
98}
99
100/* This is called by fork, once for every shm attach. */
101static void shm_open (struct vm_area_struct *shmd)
102{
103 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
104}
105
106/*
107 * shm_destroy - free the struct shmid_kernel
108 *
109 * @shp: struct to free
110 *
111 * It has to be called with shp and shm_ids.sem locked,
112 * but returns with shp unlocked and freed.
113 */
114static void shm_destroy (struct shmid_kernel *shp)
115{
116 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
117 shm_rmid (shp->id);
118 shm_unlock(shp);
119 if (!is_file_hugepages(shp->shm_file))
120 shmem_lock(shp->shm_file, 0, shp->mlock_user);
121 else
122 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
123 shp->mlock_user);
124 fput (shp->shm_file);
125 security_shm_free(shp);
126 ipc_rcu_putref(shp);
127}
128
129/*
130 * remove the attach descriptor shmd.
131 * free memory for segment if it is marked destroyed.
132 * The descriptor has already been removed from the current->mm->mmap list
133 * and will later be kfree()d.
134 */
135static void shm_close (struct vm_area_struct *shmd)
136{
137 struct file * file = shmd->vm_file;
138 int id = file->f_dentry->d_inode->i_ino;
139 struct shmid_kernel *shp;
140
141 down (&shm_ids.sem);
142 /* remove from the list of attaches of the shm segment */
143 if(!(shp = shm_lock(id)))
144 BUG();
145 shp->shm_lprid = current->tgid;
146 shp->shm_dtim = get_seconds();
147 shp->shm_nattch--;
148 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800149 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 shm_destroy (shp);
151 else
152 shm_unlock(shp);
153 up (&shm_ids.sem);
154}
155
156static int shm_mmap(struct file * file, struct vm_area_struct * vma)
157{
David Howellsb0e15192006-01-06 00:11:42 -0800158 int ret;
159
160 ret = shmem_mmap(file, vma);
161 if (ret == 0) {
162 vma->vm_ops = &shm_vm_ops;
163 shm_inc(file->f_dentry->d_inode->i_ino);
164 }
165
166 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169static struct file_operations shm_file_operations = {
David Howellsb0e15192006-01-06 00:11:42 -0800170 .mmap = shm_mmap,
171#ifndef CONFIG_MMU
172 .get_unmapped_area = shmem_get_unmapped_area,
173#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174};
175
176static struct vm_operations_struct shm_vm_ops = {
177 .open = shm_open, /* callback for a new vm-area open */
178 .close = shm_close, /* callback for when the vm-area is released */
179 .nopage = shmem_nopage,
Andrew Morton6ade43f2005-08-01 21:11:45 -0700180#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 .set_policy = shmem_set_policy,
182 .get_policy = shmem_get_policy,
183#endif
184};
185
186static int newseg (key_t key, int shmflg, size_t size)
187{
188 int error;
189 struct shmid_kernel *shp;
190 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
191 struct file * file;
192 char name[13];
193 int id;
194
195 if (size < SHMMIN || size > shm_ctlmax)
196 return -EINVAL;
197
198 if (shm_tot + numpages >= shm_ctlall)
199 return -ENOSPC;
200
201 shp = ipc_rcu_alloc(sizeof(*shp));
202 if (!shp)
203 return -ENOMEM;
204
205 shp->shm_perm.key = key;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800206 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 shp->mlock_user = NULL;
208
209 shp->shm_perm.security = NULL;
210 error = security_shm_alloc(shp);
211 if (error) {
212 ipc_rcu_putref(shp);
213 return error;
214 }
215
216 if (shmflg & SHM_HUGETLB) {
217 /* hugetlb_zero_setup takes care of mlock user accounting */
218 file = hugetlb_zero_setup(size);
219 shp->mlock_user = current->user;
220 } else {
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800221 int acctflag = VM_ACCOUNT;
222 /*
223 * Do not allow no accounting for OVERCOMMIT_NEVER, even
224 * if it's asked for.
225 */
226 if ((shmflg & SHM_NORESERVE) &&
227 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
228 acctflag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 sprintf (name, "SYSV%08x", key);
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800230 file = shmem_file_setup(name, size, acctflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232 error = PTR_ERR(file);
233 if (IS_ERR(file))
234 goto no_file;
235
236 error = -ENOSPC;
237 id = shm_addid(shp);
238 if(id == -1)
239 goto no_id;
240
241 shp->shm_cprid = current->tgid;
242 shp->shm_lprid = 0;
243 shp->shm_atim = shp->shm_dtim = 0;
244 shp->shm_ctim = get_seconds();
245 shp->shm_segsz = size;
246 shp->shm_nattch = 0;
247 shp->id = shm_buildid(id,shp->shm_perm.seq);
248 shp->shm_file = file;
249 file->f_dentry->d_inode->i_ino = shp->id;
Krishnakumar R551110a2005-10-29 18:16:45 -0700250
251 /* Hugetlb ops would have already been assigned. */
252 if (!(shmflg & SHM_HUGETLB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 file->f_op = &shm_file_operations;
Krishnakumar R551110a2005-10-29 18:16:45 -0700254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 shm_tot += numpages;
256 shm_unlock(shp);
257 return shp->id;
258
259no_id:
260 fput(file);
261no_file:
262 security_shm_free(shp);
263 ipc_rcu_putref(shp);
264 return error;
265}
266
267asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
268{
269 struct shmid_kernel *shp;
270 int err, id = 0;
271
272 down(&shm_ids.sem);
273 if (key == IPC_PRIVATE) {
274 err = newseg(key, shmflg, size);
275 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
276 if (!(shmflg & IPC_CREAT))
277 err = -ENOENT;
278 else
279 err = newseg(key, shmflg, size);
280 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
281 err = -EEXIST;
282 } else {
283 shp = shm_lock(id);
284 if(shp==NULL)
285 BUG();
286 if (shp->shm_segsz < size)
287 err = -EINVAL;
288 else if (ipcperms(&shp->shm_perm, shmflg))
289 err = -EACCES;
290 else {
291 int shmid = shm_buildid(id, shp->shm_perm.seq);
292 err = security_shm_associate(shp, shmflg);
293 if (!err)
294 err = shmid;
295 }
296 shm_unlock(shp);
297 }
298 up(&shm_ids.sem);
299
300 return err;
301}
302
303static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
304{
305 switch(version) {
306 case IPC_64:
307 return copy_to_user(buf, in, sizeof(*in));
308 case IPC_OLD:
309 {
310 struct shmid_ds out;
311
312 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
313 out.shm_segsz = in->shm_segsz;
314 out.shm_atime = in->shm_atime;
315 out.shm_dtime = in->shm_dtime;
316 out.shm_ctime = in->shm_ctime;
317 out.shm_cpid = in->shm_cpid;
318 out.shm_lpid = in->shm_lpid;
319 out.shm_nattch = in->shm_nattch;
320
321 return copy_to_user(buf, &out, sizeof(out));
322 }
323 default:
324 return -EINVAL;
325 }
326}
327
328struct shm_setbuf {
329 uid_t uid;
330 gid_t gid;
331 mode_t mode;
332};
333
334static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
335{
336 switch(version) {
337 case IPC_64:
338 {
339 struct shmid64_ds tbuf;
340
341 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
342 return -EFAULT;
343
344 out->uid = tbuf.shm_perm.uid;
345 out->gid = tbuf.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800346 out->mode = tbuf.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 return 0;
349 }
350 case IPC_OLD:
351 {
352 struct shmid_ds tbuf_old;
353
354 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
355 return -EFAULT;
356
357 out->uid = tbuf_old.shm_perm.uid;
358 out->gid = tbuf_old.shm_perm.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800359 out->mode = tbuf_old.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 return 0;
362 }
363 default:
364 return -EINVAL;
365 }
366}
367
368static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
369{
370 switch(version) {
371 case IPC_64:
372 return copy_to_user(buf, in, sizeof(*in));
373 case IPC_OLD:
374 {
375 struct shminfo out;
376
377 if(in->shmmax > INT_MAX)
378 out.shmmax = INT_MAX;
379 else
380 out.shmmax = (int)in->shmmax;
381
382 out.shmmin = in->shmmin;
383 out.shmmni = in->shmmni;
384 out.shmseg = in->shmseg;
385 out.shmall = in->shmall;
386
387 return copy_to_user(buf, &out, sizeof(out));
388 }
389 default:
390 return -EINVAL;
391 }
392}
393
394static void shm_get_stat(unsigned long *rss, unsigned long *swp)
395{
396 int i;
397
398 *rss = 0;
399 *swp = 0;
400
401 for (i = 0; i <= shm_ids.max_id; i++) {
402 struct shmid_kernel *shp;
403 struct inode *inode;
404
405 shp = shm_get(i);
406 if(!shp)
407 continue;
408
409 inode = shp->shm_file->f_dentry->d_inode;
410
411 if (is_file_hugepages(shp->shm_file)) {
412 struct address_space *mapping = inode->i_mapping;
413 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
414 } else {
415 struct shmem_inode_info *info = SHMEM_I(inode);
416 spin_lock(&info->lock);
417 *rss += inode->i_mapping->nrpages;
418 *swp += info->swapped;
419 spin_unlock(&info->lock);
420 }
421 }
422}
423
424asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
425{
426 struct shm_setbuf setbuf;
427 struct shmid_kernel *shp;
428 int err, version;
429
430 if (cmd < 0 || shmid < 0) {
431 err = -EINVAL;
432 goto out;
433 }
434
435 version = ipc_parse_version(&cmd);
436
437 switch (cmd) { /* replace with proc interface ? */
438 case IPC_INFO:
439 {
440 struct shminfo64 shminfo;
441
442 err = security_shm_shmctl(NULL, cmd);
443 if (err)
444 return err;
445
446 memset(&shminfo,0,sizeof(shminfo));
447 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
448 shminfo.shmmax = shm_ctlmax;
449 shminfo.shmall = shm_ctlall;
450
451 shminfo.shmmin = SHMMIN;
452 if(copy_shminfo_to_user (buf, &shminfo, version))
453 return -EFAULT;
454 /* reading a integer is always atomic */
455 err= shm_ids.max_id;
456 if(err<0)
457 err = 0;
458 goto out;
459 }
460 case SHM_INFO:
461 {
462 struct shm_info shm_info;
463
464 err = security_shm_shmctl(NULL, cmd);
465 if (err)
466 return err;
467
468 memset(&shm_info,0,sizeof(shm_info));
469 down(&shm_ids.sem);
470 shm_info.used_ids = shm_ids.in_use;
471 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
472 shm_info.shm_tot = shm_tot;
473 shm_info.swap_attempts = 0;
474 shm_info.swap_successes = 0;
475 err = shm_ids.max_id;
476 up(&shm_ids.sem);
477 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
478 err = -EFAULT;
479 goto out;
480 }
481
482 err = err < 0 ? 0 : err;
483 goto out;
484 }
485 case SHM_STAT:
486 case IPC_STAT:
487 {
488 struct shmid64_ds tbuf;
489 int result;
490 memset(&tbuf, 0, sizeof(tbuf));
491 shp = shm_lock(shmid);
492 if(shp==NULL) {
493 err = -EINVAL;
494 goto out;
495 } else if(cmd==SHM_STAT) {
496 err = -EINVAL;
497 if (shmid > shm_ids.max_id)
498 goto out_unlock;
499 result = shm_buildid(shmid, shp->shm_perm.seq);
500 } else {
501 err = shm_checkid(shp,shmid);
502 if(err)
503 goto out_unlock;
504 result = 0;
505 }
506 err=-EACCES;
507 if (ipcperms (&shp->shm_perm, S_IRUGO))
508 goto out_unlock;
509 err = security_shm_shmctl(shp, cmd);
510 if (err)
511 goto out_unlock;
512 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
513 tbuf.shm_segsz = shp->shm_segsz;
514 tbuf.shm_atime = shp->shm_atim;
515 tbuf.shm_dtime = shp->shm_dtim;
516 tbuf.shm_ctime = shp->shm_ctim;
517 tbuf.shm_cpid = shp->shm_cprid;
518 tbuf.shm_lpid = shp->shm_lprid;
519 if (!is_file_hugepages(shp->shm_file))
520 tbuf.shm_nattch = shp->shm_nattch;
521 else
522 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
523 shm_unlock(shp);
524 if(copy_shmid_to_user (buf, &tbuf, version))
525 err = -EFAULT;
526 else
527 err = result;
528 goto out;
529 }
530 case SHM_LOCK:
531 case SHM_UNLOCK:
532 {
533 shp = shm_lock(shmid);
534 if(shp==NULL) {
535 err = -EINVAL;
536 goto out;
537 }
538 err = shm_checkid(shp,shmid);
539 if(err)
540 goto out_unlock;
541
542 if (!capable(CAP_IPC_LOCK)) {
543 err = -EPERM;
544 if (current->euid != shp->shm_perm.uid &&
545 current->euid != shp->shm_perm.cuid)
546 goto out_unlock;
547 if (cmd == SHM_LOCK &&
548 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
549 goto out_unlock;
550 }
551
552 err = security_shm_shmctl(shp, cmd);
553 if (err)
554 goto out_unlock;
555
556 if(cmd==SHM_LOCK) {
557 struct user_struct * user = current->user;
558 if (!is_file_hugepages(shp->shm_file)) {
559 err = shmem_lock(shp->shm_file, 1, user);
560 if (!err) {
Andrew Mortonb33291c2006-01-08 01:02:21 -0800561 shp->shm_perm.mode |= SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 shp->mlock_user = user;
563 }
564 }
565 } else if (!is_file_hugepages(shp->shm_file)) {
566 shmem_lock(shp->shm_file, 0, shp->mlock_user);
Andrew Mortonb33291c2006-01-08 01:02:21 -0800567 shp->shm_perm.mode &= ~SHM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 shp->mlock_user = NULL;
569 }
570 shm_unlock(shp);
571 goto out;
572 }
573 case IPC_RMID:
574 {
575 /*
576 * We cannot simply remove the file. The SVID states
577 * that the block remains until the last person
578 * detaches from it, then is deleted. A shmat() on
579 * an RMID segment is legal in older Linux and if
580 * we change it apps break...
581 *
582 * Instead we set a destroyed flag, and then blow
583 * the name away when the usage hits zero.
584 */
585 down(&shm_ids.sem);
586 shp = shm_lock(shmid);
587 err = -EINVAL;
588 if (shp == NULL)
589 goto out_up;
590 err = shm_checkid(shp, shmid);
591 if(err)
592 goto out_unlock_up;
593
594 if (current->euid != shp->shm_perm.uid &&
595 current->euid != shp->shm_perm.cuid &&
596 !capable(CAP_SYS_ADMIN)) {
597 err=-EPERM;
598 goto out_unlock_up;
599 }
600
601 err = security_shm_shmctl(shp, cmd);
602 if (err)
603 goto out_unlock_up;
604
605 if (shp->shm_nattch){
Andrew Mortonb33291c2006-01-08 01:02:21 -0800606 shp->shm_perm.mode |= SHM_DEST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 /* Do not find it any more */
608 shp->shm_perm.key = IPC_PRIVATE;
609 shm_unlock(shp);
610 } else
611 shm_destroy (shp);
612 up(&shm_ids.sem);
613 goto out;
614 }
615
616 case IPC_SET:
617 {
618 if (copy_shmid_from_user (&setbuf, buf, version)) {
619 err = -EFAULT;
620 goto out;
621 }
622 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode)))
623 return err;
624 down(&shm_ids.sem);
625 shp = shm_lock(shmid);
626 err=-EINVAL;
627 if(shp==NULL)
628 goto out_up;
629 err = shm_checkid(shp,shmid);
630 if(err)
631 goto out_unlock_up;
632 err=-EPERM;
633 if (current->euid != shp->shm_perm.uid &&
634 current->euid != shp->shm_perm.cuid &&
635 !capable(CAP_SYS_ADMIN)) {
636 goto out_unlock_up;
637 }
638
639 err = security_shm_shmctl(shp, cmd);
640 if (err)
641 goto out_unlock_up;
642
643 shp->shm_perm.uid = setbuf.uid;
644 shp->shm_perm.gid = setbuf.gid;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800645 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 | (setbuf.mode & S_IRWXUGO);
647 shp->shm_ctim = get_seconds();
648 break;
649 }
650
651 default:
652 err = -EINVAL;
653 goto out;
654 }
655
656 err = 0;
657out_unlock_up:
658 shm_unlock(shp);
659out_up:
660 up(&shm_ids.sem);
661 goto out;
662out_unlock:
663 shm_unlock(shp);
664out:
665 return err;
666}
667
668/*
669 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
670 *
671 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
672 * "raddr" thing points to kernel space, and there has to be a wrapper around
673 * this.
674 */
675long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
676{
677 struct shmid_kernel *shp;
678 unsigned long addr;
679 unsigned long size;
680 struct file * file;
681 int err;
682 unsigned long flags;
683 unsigned long prot;
684 unsigned long o_flags;
685 int acc_mode;
686 void *user_addr;
687
688 if (shmid < 0) {
689 err = -EINVAL;
690 goto out;
691 } else if ((addr = (ulong)shmaddr)) {
692 if (addr & (SHMLBA-1)) {
693 if (shmflg & SHM_RND)
694 addr &= ~(SHMLBA-1); /* round down */
695 else
696#ifndef __ARCH_FORCE_SHMLBA
697 if (addr & ~PAGE_MASK)
698#endif
699 return -EINVAL;
700 }
701 flags = MAP_SHARED | MAP_FIXED;
702 } else {
703 if ((shmflg & SHM_REMAP))
704 return -EINVAL;
705
706 flags = MAP_SHARED;
707 }
708
709 if (shmflg & SHM_RDONLY) {
710 prot = PROT_READ;
711 o_flags = O_RDONLY;
712 acc_mode = S_IRUGO;
713 } else {
714 prot = PROT_READ | PROT_WRITE;
715 o_flags = O_RDWR;
716 acc_mode = S_IRUGO | S_IWUGO;
717 }
718 if (shmflg & SHM_EXEC) {
719 prot |= PROT_EXEC;
720 acc_mode |= S_IXUGO;
721 }
722
723 /*
724 * We cannot rely on the fs check since SYSV IPC does have an
725 * additional creator id...
726 */
727 shp = shm_lock(shmid);
728 if(shp == NULL) {
729 err = -EINVAL;
730 goto out;
731 }
732 err = shm_checkid(shp,shmid);
733 if (err) {
734 shm_unlock(shp);
735 goto out;
736 }
737 if (ipcperms(&shp->shm_perm, acc_mode)) {
738 shm_unlock(shp);
739 err = -EACCES;
740 goto out;
741 }
742
743 err = security_shm_shmat(shp, shmaddr, shmflg);
744 if (err) {
745 shm_unlock(shp);
746 return err;
747 }
748
749 file = shp->shm_file;
750 size = i_size_read(file->f_dentry->d_inode);
751 shp->shm_nattch++;
752 shm_unlock(shp);
753
754 down_write(&current->mm->mmap_sem);
755 if (addr && !(shmflg & SHM_REMAP)) {
756 user_addr = ERR_PTR(-EINVAL);
757 if (find_vma_intersection(current->mm, addr, addr + size))
758 goto invalid;
759 /*
760 * If shm segment goes below stack, make sure there is some
761 * space left for the stack to grow (at least 4 pages).
762 */
763 if (addr < current->mm->start_stack &&
764 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
765 goto invalid;
766 }
767
768 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
769
770invalid:
771 up_write(&current->mm->mmap_sem);
772
773 down (&shm_ids.sem);
774 if(!(shp = shm_lock(shmid)))
775 BUG();
776 shp->shm_nattch--;
777 if(shp->shm_nattch == 0 &&
Andrew Mortonb33291c2006-01-08 01:02:21 -0800778 shp->shm_perm.mode & SHM_DEST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 shm_destroy (shp);
780 else
781 shm_unlock(shp);
782 up (&shm_ids.sem);
783
784 *raddr = (unsigned long) user_addr;
785 err = 0;
786 if (IS_ERR(user_addr))
787 err = PTR_ERR(user_addr);
788out:
789 return err;
790}
791
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -0700792asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
793{
794 unsigned long ret;
795 long err;
796
797 err = do_shmat(shmid, shmaddr, shmflg, &ret);
798 if (err)
799 return err;
800 force_successful_syscall_return();
801 return (long)ret;
802}
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804/*
805 * detach and kill segment if marked destroyed.
806 * The work is done in shm_close.
807 */
808asmlinkage long sys_shmdt(char __user *shmaddr)
809{
810 struct mm_struct *mm = current->mm;
811 struct vm_area_struct *vma, *next;
812 unsigned long addr = (unsigned long)shmaddr;
813 loff_t size = 0;
814 int retval = -EINVAL;
815
816 down_write(&mm->mmap_sem);
817
818 /*
819 * This function tries to be smart and unmap shm segments that
820 * were modified by partial mlock or munmap calls:
821 * - It first determines the size of the shm segment that should be
822 * unmapped: It searches for a vma that is backed by shm and that
823 * started at address shmaddr. It records it's size and then unmaps
824 * it.
825 * - Then it unmaps all shm vmas that started at shmaddr and that
826 * are within the initially determined size.
827 * Errors from do_munmap are ignored: the function only fails if
828 * it's called with invalid parameters or if it's called to unmap
829 * a part of a vma. Both calls in this function are for full vmas,
830 * the parameters are directly copied from the vma itself and always
831 * valid - therefore do_munmap cannot fail. (famous last words?)
832 */
833 /*
834 * If it had been mremap()'d, the starting address would not
835 * match the usual checks anyway. So assume all vma's are
836 * above the starting address given.
837 */
838 vma = find_vma(mm, addr);
839
840 while (vma) {
841 next = vma->vm_next;
842
843 /*
844 * Check if the starting address would match, i.e. it's
845 * a fragment created by mprotect() and/or munmap(), or it
846 * otherwise it starts at this address with no hassles.
847 */
848 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
849 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
850
851
852 size = vma->vm_file->f_dentry->d_inode->i_size;
853 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
854 /*
855 * We discovered the size of the shm segment, so
856 * break out of here and fall through to the next
857 * loop that uses the size information to stop
858 * searching for matching vma's.
859 */
860 retval = 0;
861 vma = next;
862 break;
863 }
864 vma = next;
865 }
866
867 /*
868 * We need look no further than the maximum address a fragment
869 * could possibly have landed at. Also cast things to loff_t to
870 * prevent overflows and make comparisions vs. equal-width types.
871 */
872 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
873 next = vma->vm_next;
874
875 /* finding a matching vma now does not alter retval */
876 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
877 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
878
879 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
880 vma = next;
881 }
882
883 up_write(&mm->mmap_sem);
884 return retval;
885}
886
887#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -0700888static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
Mike Waychison19b49462005-09-06 15:17:10 -0700890 struct shmid_kernel *shp = it;
891 char *format;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
894#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Mike Waychison19b49462005-09-06 15:17:10 -0700896 if (sizeof(size_t) <= sizeof(int))
897 format = SMALL_STRING;
898 else
899 format = BIG_STRING;
900 return seq_printf(s, format,
901 shp->shm_perm.key,
902 shp->id,
Andrew Mortonb33291c2006-01-08 01:02:21 -0800903 shp->shm_perm.mode,
Mike Waychison19b49462005-09-06 15:17:10 -0700904 shp->shm_segsz,
905 shp->shm_cprid,
906 shp->shm_lprid,
907 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
908 shp->shm_perm.uid,
909 shp->shm_perm.gid,
910 shp->shm_perm.cuid,
911 shp->shm_perm.cgid,
912 shp->shm_atim,
913 shp->shm_dtim,
914 shp->shm_ctim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915}
916#endif