| Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
| Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_MM_H | 
|  | 3 | #define _LINUX_SCHED_MM_H | 
|  | 4 |  | 
| Ingo Molnar | b8d6d80 | 2017-02-08 18:51:54 +0100 | [diff] [blame] | 5 | #include <linux/kernel.h> | 
|  | 6 | #include <linux/atomic.h> | 
| Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 7 | #include <linux/sched.h> | 
| Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 8 | #include <linux/mm_types.h> | 
| Ingo Molnar | fd77123 | 2017-02-02 20:56:33 +0100 | [diff] [blame] | 9 | #include <linux/gfp.h> | 
| Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 10 | #include <linux/sync_core.h> | 
| Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 11 |  | 
| Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 12 | /* | 
|  | 13 | * Routines for handling mm_structs | 
|  | 14 | */ | 
| Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 15 | extern struct mm_struct *mm_alloc(void); | 
| Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 16 |  | 
|  | 17 | /** | 
|  | 18 | * mmgrab() - Pin a &struct mm_struct. | 
|  | 19 | * @mm: The &struct mm_struct to pin. | 
|  | 20 | * | 
|  | 21 | * Make sure that @mm will not get freed even after the owning task | 
|  | 22 | * exits. This doesn't guarantee that the associated address space | 
|  | 23 | * will still exist later on and mmget_not_zero() has to be used before | 
|  | 24 | * accessing it. | 
|  | 25 | * | 
|  | 26 | * This is a preferred way to to pin @mm for a longer/unbounded amount | 
|  | 27 | * of time. | 
|  | 28 | * | 
|  | 29 | * Use mmdrop() to release the reference acquired by mmgrab(). | 
|  | 30 | * | 
|  | 31 | * See also <Documentation/vm/active_mm.txt> for an in-depth explanation | 
|  | 32 | * of &mm_struct.mm_count vs &mm_struct.mm_users. | 
|  | 33 | */ | 
|  | 34 | static inline void mmgrab(struct mm_struct *mm) | 
|  | 35 | { | 
|  | 36 | atomic_inc(&mm->mm_count); | 
|  | 37 | } | 
|  | 38 |  | 
| Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 39 | extern void __mmdrop(struct mm_struct *mm); | 
|  | 40 |  | 
|  | 41 | static inline void mmdrop(struct mm_struct *mm) | 
|  | 42 | { | 
|  | 43 | /* | 
|  | 44 | * The implicit full barrier implied by atomic_dec_and_test() is | 
|  | 45 | * required by the membarrier system call before returning to | 
|  | 46 | * user-space, after storing to rq->curr. | 
|  | 47 | */ | 
|  | 48 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | 
|  | 49 | __mmdrop(mm); | 
|  | 50 | } | 
| Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 51 |  | 
|  | 52 | /** | 
|  | 53 | * mmget() - Pin the address space associated with a &struct mm_struct. | 
|  | 54 | * @mm: The address space to pin. | 
|  | 55 | * | 
|  | 56 | * Make sure that the address space of the given &struct mm_struct doesn't | 
|  | 57 | * go away. This does not protect against parts of the address space being | 
|  | 58 | * modified or freed, however. | 
|  | 59 | * | 
|  | 60 | * Never use this function to pin this address space for an | 
|  | 61 | * unbounded/indefinite amount of time. | 
|  | 62 | * | 
|  | 63 | * Use mmput() to release the reference acquired by mmget(). | 
|  | 64 | * | 
|  | 65 | * See also <Documentation/vm/active_mm.txt> for an in-depth explanation | 
|  | 66 | * of &mm_struct.mm_count vs &mm_struct.mm_users. | 
|  | 67 | */ | 
|  | 68 | static inline void mmget(struct mm_struct *mm) | 
|  | 69 | { | 
|  | 70 | atomic_inc(&mm->mm_users); | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | static inline bool mmget_not_zero(struct mm_struct *mm) | 
|  | 74 | { | 
|  | 75 | return atomic_inc_not_zero(&mm->mm_users); | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | /* mmput gets rid of the mappings and all user-space */ | 
|  | 79 | extern void mmput(struct mm_struct *); | 
| Sherry Yang | a1b2289 | 2017-10-03 16:15:00 -0700 | [diff] [blame] | 80 | #ifdef CONFIG_MMU | 
|  | 81 | /* same as above but performs the slow path from the async context. Can | 
|  | 82 | * be called from the atomic context as well | 
|  | 83 | */ | 
|  | 84 | void mmput_async(struct mm_struct *); | 
|  | 85 | #endif | 
| Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 86 |  | 
|  | 87 | /* Grab a reference to a task's mm, if it is not already going away */ | 
|  | 88 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 
|  | 89 | /* | 
|  | 90 | * Grab a reference to a task's mm, if it is not already going away | 
|  | 91 | * and ptrace_may_access with the mode parameter passed to it | 
|  | 92 | * succeeds. | 
|  | 93 | */ | 
|  | 94 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); | 
|  | 95 | /* Remove the current tasks stale references to the old mm_struct */ | 
|  | 96 | extern void mm_release(struct task_struct *, struct mm_struct *); | 
|  | 97 |  | 
| Ingo Molnar | 4240c8b | 2017-02-02 12:18:24 +0100 | [diff] [blame] | 98 | #ifdef CONFIG_MEMCG | 
|  | 99 | extern void mm_update_next_owner(struct mm_struct *mm); | 
|  | 100 | #else | 
|  | 101 | static inline void mm_update_next_owner(struct mm_struct *mm) | 
|  | 102 | { | 
|  | 103 | } | 
|  | 104 | #endif /* CONFIG_MEMCG */ | 
|  | 105 |  | 
|  | 106 | #ifdef CONFIG_MMU | 
|  | 107 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 
|  | 108 | extern unsigned long | 
|  | 109 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 
|  | 110 | unsigned long, unsigned long); | 
|  | 111 | extern unsigned long | 
|  | 112 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | 
|  | 113 | unsigned long len, unsigned long pgoff, | 
|  | 114 | unsigned long flags); | 
|  | 115 | #else | 
|  | 116 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | 
|  | 117 | #endif | 
|  | 118 |  | 
| Ingo Molnar | d026ce7 | 2017-02-02 12:32:21 +0100 | [diff] [blame] | 119 | static inline bool in_vfork(struct task_struct *tsk) | 
|  | 120 | { | 
|  | 121 | bool ret; | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * need RCU to access ->real_parent if CLONE_VM was used along with | 
|  | 125 | * CLONE_PARENT. | 
|  | 126 | * | 
|  | 127 | * We check real_parent->mm == tsk->mm because CLONE_VFORK does not | 
|  | 128 | * imply CLONE_VM | 
|  | 129 | * | 
|  | 130 | * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus | 
|  | 131 | * ->real_parent is not necessarily the task doing vfork(), so in | 
|  | 132 | * theory we can't rely on task_lock() if we want to dereference it. | 
|  | 133 | * | 
|  | 134 | * And in this case we can't trust the real_parent->mm == tsk->mm | 
|  | 135 | * check, it can be false negative. But we do not care, if init or | 
|  | 136 | * another oom-unkillable task does this it should blame itself. | 
|  | 137 | */ | 
|  | 138 | rcu_read_lock(); | 
|  | 139 | ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; | 
|  | 140 | rcu_read_unlock(); | 
|  | 141 |  | 
|  | 142 | return ret; | 
|  | 143 | } | 
|  | 144 |  | 
| Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 145 | /* | 
|  | 146 | * Applies per-task gfp context to the given allocation flags. | 
|  | 147 | * PF_MEMALLOC_NOIO implies GFP_NOIO | 
|  | 148 | * PF_MEMALLOC_NOFS implies GFP_NOFS | 
| Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 149 | */ | 
| Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 150 | static inline gfp_t current_gfp_context(gfp_t flags) | 
| Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 151 | { | 
| Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 152 | /* | 
|  | 153 | * NOIO implies both NOIO and NOFS and it is a weaker context | 
|  | 154 | * so always make sure it makes precendence | 
|  | 155 | */ | 
| Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 156 | if (unlikely(current->flags & PF_MEMALLOC_NOIO)) | 
|  | 157 | flags &= ~(__GFP_IO | __GFP_FS); | 
| Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 158 | else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) | 
|  | 159 | flags &= ~__GFP_FS; | 
| Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 160 | return flags; | 
|  | 161 | } | 
|  | 162 |  | 
| Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 163 | #ifdef CONFIG_LOCKDEP | 
|  | 164 | extern void fs_reclaim_acquire(gfp_t gfp_mask); | 
|  | 165 | extern void fs_reclaim_release(gfp_t gfp_mask); | 
|  | 166 | #else | 
|  | 167 | static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } | 
|  | 168 | static inline void fs_reclaim_release(gfp_t gfp_mask) { } | 
|  | 169 | #endif | 
|  | 170 |  | 
| Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 171 | static inline unsigned int memalloc_noio_save(void) | 
|  | 172 | { | 
|  | 173 | unsigned int flags = current->flags & PF_MEMALLOC_NOIO; | 
|  | 174 | current->flags |= PF_MEMALLOC_NOIO; | 
|  | 175 | return flags; | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 | static inline void memalloc_noio_restore(unsigned int flags) | 
|  | 179 | { | 
|  | 180 | current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; | 
|  | 181 | } | 
|  | 182 |  | 
| Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 183 | static inline unsigned int memalloc_nofs_save(void) | 
|  | 184 | { | 
|  | 185 | unsigned int flags = current->flags & PF_MEMALLOC_NOFS; | 
|  | 186 | current->flags |= PF_MEMALLOC_NOFS; | 
|  | 187 | return flags; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | static inline void memalloc_nofs_restore(unsigned int flags) | 
|  | 191 | { | 
|  | 192 | current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; | 
|  | 193 | } | 
|  | 194 |  | 
| Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 195 | static inline unsigned int memalloc_noreclaim_save(void) | 
|  | 196 | { | 
|  | 197 | unsigned int flags = current->flags & PF_MEMALLOC; | 
|  | 198 | current->flags |= PF_MEMALLOC; | 
|  | 199 | return flags; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | static inline void memalloc_noreclaim_restore(unsigned int flags) | 
|  | 203 | { | 
|  | 204 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; | 
|  | 205 | } | 
|  | 206 |  | 
| Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 207 | #ifdef CONFIG_MEMBARRIER | 
|  | 208 | enum { | 
| Mathieu Desnoyers | c5f58bd | 2018-01-29 15:20:13 -0500 | [diff] [blame] | 209 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0), | 
|  | 210 | MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1), | 
|  | 211 | MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2), | 
|  | 212 | MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3), | 
| Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 213 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4), | 
|  | 214 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5), | 
|  | 215 | }; | 
|  | 216 |  | 
|  | 217 | enum { | 
|  | 218 | MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0), | 
| Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 219 | }; | 
|  | 220 |  | 
| Mathieu Desnoyers | 3ccfebe | 2018-01-29 15:20:11 -0500 | [diff] [blame] | 221 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS | 
|  | 222 | #include <asm/membarrier.h> | 
|  | 223 | #endif | 
|  | 224 |  | 
| Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 225 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) | 
|  | 226 | { | 
|  | 227 | if (likely(!(atomic_read(&mm->membarrier_state) & | 
|  | 228 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) | 
|  | 229 | return; | 
|  | 230 | sync_core_before_usermode(); | 
|  | 231 | } | 
|  | 232 |  | 
| Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 233 | static inline void membarrier_execve(struct task_struct *t) | 
|  | 234 | { | 
|  | 235 | atomic_set(&t->mm->membarrier_state, 0); | 
|  | 236 | } | 
|  | 237 | #else | 
| Mathieu Desnoyers | 3ccfebe | 2018-01-29 15:20:11 -0500 | [diff] [blame] | 238 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS | 
|  | 239 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, | 
|  | 240 | struct mm_struct *next, | 
|  | 241 | struct task_struct *tsk) | 
|  | 242 | { | 
|  | 243 | } | 
|  | 244 | #endif | 
| Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 245 | static inline void membarrier_execve(struct task_struct *t) | 
|  | 246 | { | 
|  | 247 | } | 
| Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 248 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) | 
|  | 249 | { | 
|  | 250 | } | 
| Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 251 | #endif | 
|  | 252 |  | 
| Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 253 | #endif /* _LINUX_SCHED_MM_H */ |