Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_MM_H |
| 3 | #define _LINUX_SCHED_MM_H |
| 4 | |
Ingo Molnar | b8d6d80 | 2017-02-08 18:51:54 +0100 | [diff] [blame] | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/atomic.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 7 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 8 | #include <linux/mm_types.h> |
Ingo Molnar | fd77123 | 2017-02-02 20:56:33 +0100 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 10 | #include <linux/sync_core.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 11 | |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 12 | /* |
| 13 | * Routines for handling mm_structs |
| 14 | */ |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 15 | extern struct mm_struct *mm_alloc(void); |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 16 | |
| 17 | /** |
| 18 | * mmgrab() - Pin a &struct mm_struct. |
| 19 | * @mm: The &struct mm_struct to pin. |
| 20 | * |
| 21 | * Make sure that @mm will not get freed even after the owning task |
| 22 | * exits. This doesn't guarantee that the associated address space |
| 23 | * will still exist later on and mmget_not_zero() has to be used before |
| 24 | * accessing it. |
| 25 | * |
| 26 | * This is a preferred way to to pin @mm for a longer/unbounded amount |
| 27 | * of time. |
| 28 | * |
| 29 | * Use mmdrop() to release the reference acquired by mmgrab(). |
| 30 | * |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 31 | * See also <Documentation/vm/active_mm.rst> for an in-depth explanation |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 32 | * of &mm_struct.mm_count vs &mm_struct.mm_users. |
| 33 | */ |
| 34 | static inline void mmgrab(struct mm_struct *mm) |
| 35 | { |
| 36 | atomic_inc(&mm->mm_count); |
| 37 | } |
| 38 | |
Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 39 | extern void __mmdrop(struct mm_struct *mm); |
| 40 | |
| 41 | static inline void mmdrop(struct mm_struct *mm) |
| 42 | { |
| 43 | /* |
| 44 | * The implicit full barrier implied by atomic_dec_and_test() is |
| 45 | * required by the membarrier system call before returning to |
| 46 | * user-space, after storing to rq->curr. |
| 47 | */ |
| 48 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) |
| 49 | __mmdrop(mm); |
| 50 | } |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 51 | |
| 52 | /** |
| 53 | * mmget() - Pin the address space associated with a &struct mm_struct. |
| 54 | * @mm: The address space to pin. |
| 55 | * |
| 56 | * Make sure that the address space of the given &struct mm_struct doesn't |
| 57 | * go away. This does not protect against parts of the address space being |
| 58 | * modified or freed, however. |
| 59 | * |
| 60 | * Never use this function to pin this address space for an |
| 61 | * unbounded/indefinite amount of time. |
| 62 | * |
| 63 | * Use mmput() to release the reference acquired by mmget(). |
| 64 | * |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 65 | * See also <Documentation/vm/active_mm.rst> for an in-depth explanation |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 66 | * of &mm_struct.mm_count vs &mm_struct.mm_users. |
| 67 | */ |
| 68 | static inline void mmget(struct mm_struct *mm) |
| 69 | { |
| 70 | atomic_inc(&mm->mm_users); |
| 71 | } |
| 72 | |
| 73 | static inline bool mmget_not_zero(struct mm_struct *mm) |
| 74 | { |
| 75 | return atomic_inc_not_zero(&mm->mm_users); |
| 76 | } |
| 77 | |
| 78 | /* mmput gets rid of the mappings and all user-space */ |
| 79 | extern void mmput(struct mm_struct *); |
Sherry Yang | a1b2289 | 2017-10-03 16:15:00 -0700 | [diff] [blame] | 80 | #ifdef CONFIG_MMU |
| 81 | /* same as above but performs the slow path from the async context. Can |
| 82 | * be called from the atomic context as well |
| 83 | */ |
| 84 | void mmput_async(struct mm_struct *); |
| 85 | #endif |
Ingo Molnar | 68e21be | 2017-02-01 19:08:20 +0100 | [diff] [blame] | 86 | |
| 87 | /* Grab a reference to a task's mm, if it is not already going away */ |
| 88 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
| 89 | /* |
| 90 | * Grab a reference to a task's mm, if it is not already going away |
| 91 | * and ptrace_may_access with the mode parameter passed to it |
| 92 | * succeeds. |
| 93 | */ |
| 94 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); |
| 95 | /* Remove the current tasks stale references to the old mm_struct */ |
| 96 | extern void mm_release(struct task_struct *, struct mm_struct *); |
| 97 | |
Ingo Molnar | 4240c8b | 2017-02-02 12:18:24 +0100 | [diff] [blame] | 98 | #ifdef CONFIG_MEMCG |
| 99 | extern void mm_update_next_owner(struct mm_struct *mm); |
| 100 | #else |
| 101 | static inline void mm_update_next_owner(struct mm_struct *mm) |
| 102 | { |
| 103 | } |
| 104 | #endif /* CONFIG_MEMCG */ |
| 105 | |
| 106 | #ifdef CONFIG_MMU |
Kees Cook | 8f2af15 | 2018-04-10 16:34:53 -0700 | [diff] [blame] | 107 | extern void arch_pick_mmap_layout(struct mm_struct *mm, |
| 108 | struct rlimit *rlim_stack); |
Ingo Molnar | 4240c8b | 2017-02-02 12:18:24 +0100 | [diff] [blame] | 109 | extern unsigned long |
| 110 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
| 111 | unsigned long, unsigned long); |
| 112 | extern unsigned long |
| 113 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
| 114 | unsigned long len, unsigned long pgoff, |
| 115 | unsigned long flags); |
| 116 | #else |
Kees Cook | 8f2af15 | 2018-04-10 16:34:53 -0700 | [diff] [blame] | 117 | static inline void arch_pick_mmap_layout(struct mm_struct *mm, |
| 118 | struct rlimit *rlim_stack) {} |
Ingo Molnar | 4240c8b | 2017-02-02 12:18:24 +0100 | [diff] [blame] | 119 | #endif |
| 120 | |
Ingo Molnar | d026ce7 | 2017-02-02 12:32:21 +0100 | [diff] [blame] | 121 | static inline bool in_vfork(struct task_struct *tsk) |
| 122 | { |
| 123 | bool ret; |
| 124 | |
| 125 | /* |
| 126 | * need RCU to access ->real_parent if CLONE_VM was used along with |
| 127 | * CLONE_PARENT. |
| 128 | * |
| 129 | * We check real_parent->mm == tsk->mm because CLONE_VFORK does not |
| 130 | * imply CLONE_VM |
| 131 | * |
| 132 | * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus |
| 133 | * ->real_parent is not necessarily the task doing vfork(), so in |
| 134 | * theory we can't rely on task_lock() if we want to dereference it. |
| 135 | * |
| 136 | * And in this case we can't trust the real_parent->mm == tsk->mm |
| 137 | * check, it can be false negative. But we do not care, if init or |
| 138 | * another oom-unkillable task does this it should blame itself. |
| 139 | */ |
| 140 | rcu_read_lock(); |
| 141 | ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; |
| 142 | rcu_read_unlock(); |
| 143 | |
| 144 | return ret; |
| 145 | } |
| 146 | |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 147 | /* |
| 148 | * Applies per-task gfp context to the given allocation flags. |
| 149 | * PF_MEMALLOC_NOIO implies GFP_NOIO |
| 150 | * PF_MEMALLOC_NOFS implies GFP_NOFS |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 151 | */ |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 152 | static inline gfp_t current_gfp_context(gfp_t flags) |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 153 | { |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 154 | /* |
| 155 | * NOIO implies both NOIO and NOFS and it is a weaker context |
| 156 | * so always make sure it makes precendence |
| 157 | */ |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 158 | if (unlikely(current->flags & PF_MEMALLOC_NOIO)) |
| 159 | flags &= ~(__GFP_IO | __GFP_FS); |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 160 | else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) |
| 161 | flags &= ~__GFP_FS; |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 162 | return flags; |
| 163 | } |
| 164 | |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 165 | #ifdef CONFIG_LOCKDEP |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 166 | extern void __fs_reclaim_acquire(void); |
| 167 | extern void __fs_reclaim_release(void); |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 168 | extern void fs_reclaim_acquire(gfp_t gfp_mask); |
| 169 | extern void fs_reclaim_release(gfp_t gfp_mask); |
| 170 | #else |
Omar Sandoval | 9378132 | 2018-06-07 17:07:02 -0700 | [diff] [blame] | 171 | static inline void __fs_reclaim_acquire(void) { } |
| 172 | static inline void __fs_reclaim_release(void) { } |
Peter Zijlstra | d92a8cf | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 173 | static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } |
| 174 | static inline void fs_reclaim_release(gfp_t gfp_mask) { } |
| 175 | #endif |
| 176 | |
Michal Hocko | 46ca359 | 2018-05-29 10:26:44 +0200 | [diff] [blame] | 177 | /** |
| 178 | * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. |
| 179 | * |
| 180 | * This functions marks the beginning of the GFP_NOIO allocation scope. |
| 181 | * All further allocations will implicitly drop __GFP_IO flag and so |
| 182 | * they are safe for the IO critical section from the allocation recursion |
| 183 | * point of view. Use memalloc_noio_restore to end the scope with flags |
| 184 | * returned by this function. |
| 185 | * |
| 186 | * This function is safe to be used from any context. |
| 187 | */ |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 188 | static inline unsigned int memalloc_noio_save(void) |
| 189 | { |
| 190 | unsigned int flags = current->flags & PF_MEMALLOC_NOIO; |
| 191 | current->flags |= PF_MEMALLOC_NOIO; |
| 192 | return flags; |
| 193 | } |
| 194 | |
Michal Hocko | 46ca359 | 2018-05-29 10:26:44 +0200 | [diff] [blame] | 195 | /** |
| 196 | * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. |
| 197 | * @flags: Flags to restore. |
| 198 | * |
| 199 | * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. |
| 200 | * Always make sure that that the given flags is the return value from the |
| 201 | * pairing memalloc_noio_save call. |
| 202 | */ |
Ingo Molnar | 74444ed | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 203 | static inline void memalloc_noio_restore(unsigned int flags) |
| 204 | { |
| 205 | current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; |
| 206 | } |
| 207 | |
Michal Hocko | 46ca359 | 2018-05-29 10:26:44 +0200 | [diff] [blame] | 208 | /** |
| 209 | * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. |
| 210 | * |
| 211 | * This functions marks the beginning of the GFP_NOFS allocation scope. |
| 212 | * All further allocations will implicitly drop __GFP_FS flag and so |
| 213 | * they are safe for the FS critical section from the allocation recursion |
| 214 | * point of view. Use memalloc_nofs_restore to end the scope with flags |
| 215 | * returned by this function. |
| 216 | * |
| 217 | * This function is safe to be used from any context. |
| 218 | */ |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 219 | static inline unsigned int memalloc_nofs_save(void) |
| 220 | { |
| 221 | unsigned int flags = current->flags & PF_MEMALLOC_NOFS; |
| 222 | current->flags |= PF_MEMALLOC_NOFS; |
| 223 | return flags; |
| 224 | } |
| 225 | |
Michal Hocko | 46ca359 | 2018-05-29 10:26:44 +0200 | [diff] [blame] | 226 | /** |
| 227 | * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. |
| 228 | * @flags: Flags to restore. |
| 229 | * |
| 230 | * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. |
| 231 | * Always make sure that that the given flags is the return value from the |
| 232 | * pairing memalloc_nofs_save call. |
| 233 | */ |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 234 | static inline void memalloc_nofs_restore(unsigned int flags) |
| 235 | { |
| 236 | current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; |
| 237 | } |
| 238 | |
Vlastimil Babka | 499118e | 2017-05-08 15:59:50 -0700 | [diff] [blame] | 239 | static inline unsigned int memalloc_noreclaim_save(void) |
| 240 | { |
| 241 | unsigned int flags = current->flags & PF_MEMALLOC; |
| 242 | current->flags |= PF_MEMALLOC; |
| 243 | return flags; |
| 244 | } |
| 245 | |
| 246 | static inline void memalloc_noreclaim_restore(unsigned int flags) |
| 247 | { |
| 248 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
| 249 | } |
| 250 | |
Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 251 | #ifdef CONFIG_MEMBARRIER |
| 252 | enum { |
Mathieu Desnoyers | c5f58bd | 2018-01-29 15:20:13 -0500 | [diff] [blame] | 253 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), |
| 254 | MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), |
| 255 | MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), |
| 256 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), |
Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 257 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), |
| 258 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), |
| 259 | }; |
| 260 | |
| 261 | enum { |
| 262 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), |
Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 263 | }; |
| 264 | |
Mathieu Desnoyers | 3ccfebe | 2018-01-29 15:20:11 -0500 | [diff] [blame] | 265 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
| 266 | #include <asm/membarrier.h> |
| 267 | #endif |
| 268 | |
Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 269 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
| 270 | { |
| 271 | if (likely(!(atomic_read(&mm->membarrier_state) & |
| 272 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) |
| 273 | return; |
| 274 | sync_core_before_usermode(); |
| 275 | } |
| 276 | |
Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 277 | static inline void membarrier_execve(struct task_struct *t) |
| 278 | { |
| 279 | atomic_set(&t->mm->membarrier_state, 0); |
| 280 | } |
| 281 | #else |
Mathieu Desnoyers | 3ccfebe | 2018-01-29 15:20:11 -0500 | [diff] [blame] | 282 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS |
| 283 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, |
| 284 | struct mm_struct *next, |
| 285 | struct task_struct *tsk) |
| 286 | { |
| 287 | } |
| 288 | #endif |
Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 289 | static inline void membarrier_execve(struct task_struct *t) |
| 290 | { |
| 291 | } |
Mathieu Desnoyers | 70216e1 | 2018-01-29 15:20:17 -0500 | [diff] [blame] | 292 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) |
| 293 | { |
| 294 | } |
Mathieu Desnoyers | a961e40 | 2017-10-19 13:30:15 -0400 | [diff] [blame] | 295 | #endif |
| 296 | |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 297 | #endif /* _LINUX_SCHED_MM_H */ |