Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/ipc/util.h |
| 4 | * Copyright (C) 1999 Christoph Rohland |
| 5 | * |
Christian Kujau | 624dffc | 2006-01-15 02:43:54 +0100 | [diff] [blame] | 6 | * ipc helper functions (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 7 | * namespaces support. 2006 OpenVZ, SWsoft Inc. |
| 8 | * Pavel Emelianov <xemul@openvz.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #ifndef _IPC_UTIL_H |
| 12 | #define _IPC_UTIL_H |
| 13 | |
Johannes Weiner | 232086b | 2009-06-20 02:23:29 +0200 | [diff] [blame] | 14 | #include <linux/unistd.h> |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 15 | #include <linux/err.h> |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 16 | #include <linux/ipc_namespace.h> |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 17 | |
Eric W. Biederman | f83a396 | 2018-03-22 21:45:50 -0500 | [diff] [blame] | 18 | #define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #define SEQ_MULTIPLIER (IPCMNI) |
| 20 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 21 | void sem_init(void); |
| 22 | void msg_init(void); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 23 | void shm_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 25 | struct ipc_namespace; |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 26 | struct pid_namespace; |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 27 | |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 28 | #ifdef CONFIG_POSIX_MQUEUE |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 29 | extern void mq_clear_sbinfo(struct ipc_namespace *ns); |
| 30 | extern void mq_put_mnt(struct ipc_namespace *ns); |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 31 | #else |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 32 | static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { } |
| 33 | static inline void mq_put_mnt(struct ipc_namespace *ns) { } |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 34 | #endif |
| 35 | |
| 36 | #ifdef CONFIG_SYSVIPC |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 37 | void sem_init_ns(struct ipc_namespace *ns); |
| 38 | void msg_init_ns(struct ipc_namespace *ns); |
| 39 | void shm_init_ns(struct ipc_namespace *ns); |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 40 | |
| 41 | void sem_exit_ns(struct ipc_namespace *ns); |
| 42 | void msg_exit_ns(struct ipc_namespace *ns); |
| 43 | void shm_exit_ns(struct ipc_namespace *ns); |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 44 | #else |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 45 | static inline void sem_init_ns(struct ipc_namespace *ns) { } |
| 46 | static inline void msg_init_ns(struct ipc_namespace *ns) { } |
| 47 | static inline void shm_init_ns(struct ipc_namespace *ns) { } |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 48 | |
| 49 | static inline void sem_exit_ns(struct ipc_namespace *ns) { } |
| 50 | static inline void msg_exit_ns(struct ipc_namespace *ns) { } |
| 51 | static inline void shm_exit_ns(struct ipc_namespace *ns) { } |
| 52 | #endif |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 53 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Structure that holds the parameters needed by the ipc operations |
| 56 | * (see after) |
| 57 | */ |
| 58 | struct ipc_params { |
| 59 | key_t key; |
| 60 | int flg; |
| 61 | union { |
| 62 | size_t size; /* for shared memories */ |
| 63 | int nsems; /* for semaphores */ |
| 64 | } u; /* holds the getnew() specific param */ |
| 65 | }; |
| 66 | |
| 67 | /* |
| 68 | * Structure that holds some ipc operations. This structure is used to unify |
| 69 | * the calls to sys_msgget(), sys_semget(), sys_shmget() |
| 70 | * . routine to call to create a new ipc object. Can be one of newque, |
| 71 | * newary, newseg |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 72 | * . routine to call to check permissions for a new ipc object. |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 73 | * Can be one of security_msg_associate, security_sem_associate, |
| 74 | * security_shm_associate |
| 75 | * . routine to call for an extra check if needed |
| 76 | */ |
| 77 | struct ipc_ops { |
Paul McQuade | 46c0a8c | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 78 | int (*getnew)(struct ipc_namespace *, struct ipc_params *); |
| 79 | int (*associate)(struct kern_ipc_perm *, int); |
| 80 | int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 81 | }; |
| 82 | |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 83 | struct seq_file; |
Pierre Peiffer | ed2ddbf | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 84 | struct ipc_ids; |
Cedric Le Goater | 7d69a1f | 2007-07-15 23:40:58 -0700 | [diff] [blame] | 85 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 86 | void ipc_init_ids(struct ipc_ids *ids); |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 87 | #ifdef CONFIG_PROC_FS |
| 88 | void __init ipc_init_proc_interface(const char *path, const char *header, |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 89 | int ids, int (*show)(struct seq_file *, void *)); |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 90 | struct pid_namespace *ipc_seq_pid_ns(struct seq_file *); |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 91 | #else |
| 92 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) |
| 93 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 95 | #define IPC_SEM_IDS 0 |
| 96 | #define IPC_MSG_IDS 1 |
| 97 | #define IPC_SHM_IDS 2 |
| 98 | |
Nadia Derbey | ce621f5 | 2007-10-18 23:40:52 -0700 | [diff] [blame] | 99 | #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) |
Stanislav Kinsbursky | 03f5956 | 2013-01-04 15:34:50 -0800 | [diff] [blame] | 100 | #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) |
Davidlohr Bueso | daf948c | 2014-01-27 17:07:09 -0800 | [diff] [blame] | 101 | #define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX) |
Nadia Derbey | ce621f5 | 2007-10-18 23:40:52 -0700 | [diff] [blame] | 102 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 103 | /* must be called with ids->rwsem acquired for writing */ |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 104 | int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | /* must be called with both locks acquired. */ |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 107 | void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 109 | /* must be called with both locks acquired. */ |
| 110 | void ipc_set_key_private(struct ipc_ids *, struct kern_ipc_perm *); |
| 111 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 112 | /* must be called with ipcp locked */ |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 113 | int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 115 | /** |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 116 | * ipc_get_maxidx - get the highest assigned index |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 117 | * @ids: ipc identifier set |
| 118 | * |
| 119 | * Called with ipc_ids.rwsem held for reading. |
| 120 | */ |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 121 | static inline int ipc_get_maxidx(struct ipc_ids *ids) |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 122 | { |
| 123 | if (ids->in_use == 0) |
| 124 | return -1; |
| 125 | |
| 126 | if (ids->in_use == IPCMNI) |
| 127 | return IPCMNI - 1; |
| 128 | |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 129 | return ids->max_idx; |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 130 | } |
| 131 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | /* |
| 133 | * For allocation that need to be freed by RCU. |
| 134 | * Objects are reference counted, they start with reference count 1. |
| 135 | * getref increases the refcount, the putref call that reduces the recount |
| 136 | * to 0 schedules the rcu destruction. Caller must guarantee locking. |
Manfred Spraul | 62b49c9 | 2017-07-12 14:35:34 -0700 | [diff] [blame] | 137 | * |
| 138 | * refcount is initialized by ipc_addid(), before that point call_rcu() |
| 139 | * must be used. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | */ |
Manfred Spraul | 2a9d648 | 2018-08-21 22:02:04 -0700 | [diff] [blame] | 141 | bool ipc_rcu_getref(struct kern_ipc_perm *ptr); |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 142 | void ipc_rcu_putref(struct kern_ipc_perm *ptr, |
| 143 | void (*func)(struct rcu_head *head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Davidlohr Bueso | 55b7ae5 | 2015-06-30 14:58:42 -0700 | [diff] [blame] | 145 | struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
| 147 | void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); |
| 148 | void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 149 | int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); |
Manfred Spraul | 4241c1a | 2018-08-21 22:01:34 -0700 | [diff] [blame] | 150 | struct kern_ipc_perm *ipcctl_obtain_check(struct ipc_namespace *ns, |
Davidlohr Bueso | 444d0f6 | 2013-04-30 19:15:24 -0700 | [diff] [blame] | 151 | struct ipc_ids *ids, int id, int cmd, |
| 152 | struct ipc64_perm *perm, int extra_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 154 | static inline void ipc_update_pid(struct pid **pos, struct pid *pid) |
| 155 | { |
| 156 | struct pid *old = *pos; |
| 157 | if (old != pid) { |
| 158 | *pos = get_pid(pid); |
| 159 | put_pid(old); |
| 160 | } |
| 161 | } |
| 162 | |
Will Deacon | c1d7e01 | 2012-07-30 14:42:46 -0700 | [diff] [blame] | 163 | #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
Paul McQuade | 46c0a8c | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 164 | /* On IA-64, we always use the "64-bit version" of the IPC structures. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | # define ipc_parse_version(cmd) IPC_64 |
| 166 | #else |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 167 | int ipc_parse_version(int *cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | #endif |
| 169 | |
| 170 | extern void free_msg(struct msg_msg *msg); |
Mathias Krause | 4e9b45a | 2013-11-12 15:11:47 -0800 | [diff] [blame] | 171 | extern struct msg_msg *load_msg(const void __user *src, size_t len); |
Stanislav Kinsbursky | 4a674f3 | 2013-01-04 15:34:55 -0800 | [diff] [blame] | 172 | extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst); |
Mathias Krause | 4e9b45a | 2013-11-12 15:11:47 -0800 | [diff] [blame] | 173 | extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len); |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 174 | |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 175 | static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int id) |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 176 | { |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 177 | return ipcid_to_seqx(id) != ipcp->seq; |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 178 | } |
| 179 | |
Davidlohr Bueso | 1ca7003 | 2013-07-08 16:01:10 -0700 | [diff] [blame] | 180 | static inline void ipc_lock_object(struct kern_ipc_perm *perm) |
| 181 | { |
| 182 | spin_lock(&perm->lock); |
| 183 | } |
| 184 | |
| 185 | static inline void ipc_unlock_object(struct kern_ipc_perm *perm) |
| 186 | { |
| 187 | spin_unlock(&perm->lock); |
| 188 | } |
| 189 | |
| 190 | static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) |
| 191 | { |
| 192 | assert_spin_locked(&perm->lock); |
| 193 | } |
| 194 | |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 195 | static inline void ipc_unlock(struct kern_ipc_perm *perm) |
| 196 | { |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 197 | ipc_unlock_object(perm); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 198 | rcu_read_unlock(); |
| 199 | } |
| 200 | |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 201 | /* |
| 202 | * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths |
| 203 | * where the respective ipc_ids.rwsem is not being held down. |
| 204 | * Checks whether the ipc object is still around or if it's gone already, as |
| 205 | * ipc_rmid() may have already freed the ID while the ipc lock was spinning. |
| 206 | * Needs to be called with kern_ipc_perm.lock held -- exception made for one |
| 207 | * checkpoint case at sys_semtimedop() as noted in code commentary. |
| 208 | */ |
| 209 | static inline bool ipc_valid_object(struct kern_ipc_perm *perm) |
| 210 | { |
Rafael Aquini | 72a8ff2 | 2014-01-27 17:07:02 -0800 | [diff] [blame] | 211 | return !perm->deleted; |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 212 | } |
| 213 | |
Davidlohr Bueso | 4d2bff5 | 2013-04-30 19:15:19 -0700 | [diff] [blame] | 214 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); |
Pavel Emelyanov | b2d75cd | 2008-02-08 04:18:54 -0800 | [diff] [blame] | 215 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 216 | const struct ipc_ops *ops, struct ipc_params *params); |
Alexey Dobriyan | 665c774 | 2009-06-17 16:27:57 -0700 | [diff] [blame] | 217 | void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, |
| 218 | void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 219 | |
| 220 | #ifdef CONFIG_COMPAT |
| 221 | #include <linux/compat.h> |
| 222 | struct compat_ipc_perm { |
| 223 | key_t key; |
| 224 | __compat_uid_t uid; |
| 225 | __compat_gid_t gid; |
| 226 | __compat_uid_t cuid; |
| 227 | __compat_gid_t cgid; |
| 228 | compat_mode_t mode; |
| 229 | unsigned short seq; |
| 230 | }; |
| 231 | |
Al Viro | c0ebccb | 2017-07-09 10:03:23 -0400 | [diff] [blame] | 232 | void to_compat_ipc_perm(struct compat_ipc_perm *, struct ipc64_perm *); |
| 233 | void to_compat_ipc64_perm(struct compat_ipc64_perm *, struct ipc64_perm *); |
| 234 | int get_compat_ipc_perm(struct ipc64_perm *, struct compat_ipc_perm __user *); |
| 235 | int get_compat_ipc64_perm(struct ipc64_perm *, |
| 236 | struct compat_ipc64_perm __user *); |
| 237 | |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 238 | static inline int compat_ipc_parse_version(int *cmd) |
| 239 | { |
| 240 | #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
| 241 | int version = *cmd & IPC_64; |
| 242 | *cmd &= ~IPC_64; |
| 243 | return version; |
| 244 | #else |
| 245 | return IPC_64; |
| 246 | #endif |
| 247 | } |
| 248 | #endif |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 249 | |
| 250 | /* for __ARCH_WANT_SYS_IPC */ |
| 251 | long ksys_semtimedop(int semid, struct sembuf __user *tsops, |
| 252 | unsigned int nsops, |
Arnd Bergmann | 21fc538 | 2018-04-13 13:58:00 +0200 | [diff] [blame] | 253 | const struct __kernel_timespec __user *timeout); |
Dominik Brodowski | 6989471 | 2018-03-20 19:53:58 +0100 | [diff] [blame] | 254 | long ksys_semget(key_t key, int nsems, int semflg); |
Dominik Brodowski | d969c6f | 2018-03-20 20:00:39 +0100 | [diff] [blame] | 255 | long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg); |
Dominik Brodowski | 3d65661 | 2018-03-20 20:06:04 +0100 | [diff] [blame] | 256 | long ksys_msgget(key_t key, int msgflg); |
Dominik Brodowski | e340db5 | 2018-03-20 20:15:28 +0100 | [diff] [blame] | 257 | long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); |
Dominik Brodowski | 078faac | 2018-03-20 21:25:57 +0100 | [diff] [blame] | 258 | long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
| 259 | long msgtyp, int msgflg); |
Dominik Brodowski | 31c213f | 2018-03-20 21:29:00 +0100 | [diff] [blame] | 260 | long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
| 261 | int msgflg); |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 262 | long ksys_shmget(key_t key, size_t size, int shmflg); |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 263 | long ksys_shmdt(char __user *shmaddr); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 264 | long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 265 | |
| 266 | /* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */ |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 267 | long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, |
| 268 | unsigned int nsops, |
| 269 | const struct compat_timespec __user *timeout); |
Arnd Bergmann | b0d1757 | 2018-04-13 13:58:23 +0200 | [diff] [blame] | 270 | #ifdef CONFIG_COMPAT |
Dominik Brodowski | d969c6f | 2018-03-20 20:00:39 +0100 | [diff] [blame] | 271 | long compat_ksys_semctl(int semid, int semnum, int cmd, int arg); |
Dominik Brodowski | e340db5 | 2018-03-20 20:15:28 +0100 | [diff] [blame] | 272 | long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr); |
Dominik Brodowski | 078faac | 2018-03-20 21:25:57 +0100 | [diff] [blame] | 273 | long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, |
| 274 | compat_long_t msgtyp, int msgflg); |
Dominik Brodowski | 31c213f | 2018-03-20 21:29:00 +0100 | [diff] [blame] | 275 | long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, |
| 276 | compat_ssize_t msgsz, int msgflg); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 277 | long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr); |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 278 | #endif /* CONFIG_COMPAT */ |
| 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | #endif |