blob: bd422561a75e51783d89b18d3cb0e70a271df935 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnar6e84f312017-02-08 18:51:29 +01002#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
Ingo Molnarb8d6d802017-02-08 18:51:54 +01005#include <linux/kernel.h>
6#include <linux/atomic.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01007#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +01008#include <linux/mm_types.h>
Ingo Molnarfd771232017-02-02 20:56:33 +01009#include <linux/gfp.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010010
Ingo Molnar68e21be2017-02-01 19:08:20 +010011/*
12 * Routines for handling mm_structs
13 */
Andrew Mortond70f2a12018-01-31 16:15:51 -080014extern struct mm_struct *mm_alloc(void);
Ingo Molnar68e21be2017-02-01 19:08:20 +010015
16/**
17 * mmgrab() - Pin a &struct mm_struct.
18 * @mm: The &struct mm_struct to pin.
19 *
20 * Make sure that @mm will not get freed even after the owning task
21 * exits. This doesn't guarantee that the associated address space
22 * will still exist later on and mmget_not_zero() has to be used before
23 * accessing it.
24 *
25 * This is a preferred way to to pin @mm for a longer/unbounded amount
26 * of time.
27 *
28 * Use mmdrop() to release the reference acquired by mmgrab().
29 *
30 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
31 * of &mm_struct.mm_count vs &mm_struct.mm_users.
32 */
33static inline void mmgrab(struct mm_struct *mm)
34{
35 atomic_inc(&mm->mm_count);
36}
37
Andrew Mortond70f2a12018-01-31 16:15:51 -080038extern void mmdrop(struct mm_struct *mm);
Ingo Molnar68e21be2017-02-01 19:08:20 +010039
40/**
41 * mmget() - Pin the address space associated with a &struct mm_struct.
42 * @mm: The address space to pin.
43 *
44 * Make sure that the address space of the given &struct mm_struct doesn't
45 * go away. This does not protect against parts of the address space being
46 * modified or freed, however.
47 *
48 * Never use this function to pin this address space for an
49 * unbounded/indefinite amount of time.
50 *
51 * Use mmput() to release the reference acquired by mmget().
52 *
53 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
54 * of &mm_struct.mm_count vs &mm_struct.mm_users.
55 */
56static inline void mmget(struct mm_struct *mm)
57{
58 atomic_inc(&mm->mm_users);
59}
60
61static inline bool mmget_not_zero(struct mm_struct *mm)
62{
63 return atomic_inc_not_zero(&mm->mm_users);
64}
65
66/* mmput gets rid of the mappings and all user-space */
67extern void mmput(struct mm_struct *);
Sherry Yanga1b22892017-10-03 16:15:00 -070068#ifdef CONFIG_MMU
69/* same as above but performs the slow path from the async context. Can
70 * be called from the atomic context as well
71 */
72void mmput_async(struct mm_struct *);
73#endif
Ingo Molnar68e21be2017-02-01 19:08:20 +010074
75/* Grab a reference to a task's mm, if it is not already going away */
76extern struct mm_struct *get_task_mm(struct task_struct *task);
77/*
78 * Grab a reference to a task's mm, if it is not already going away
79 * and ptrace_may_access with the mode parameter passed to it
80 * succeeds.
81 */
82extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
83/* Remove the current tasks stale references to the old mm_struct */
84extern void mm_release(struct task_struct *, struct mm_struct *);
85
Ingo Molnar4240c8b2017-02-02 12:18:24 +010086#ifdef CONFIG_MEMCG
87extern void mm_update_next_owner(struct mm_struct *mm);
88#else
89static inline void mm_update_next_owner(struct mm_struct *mm)
90{
91}
92#endif /* CONFIG_MEMCG */
93
94#ifdef CONFIG_MMU
95extern void arch_pick_mmap_layout(struct mm_struct *mm);
96extern unsigned long
97arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
98 unsigned long, unsigned long);
99extern unsigned long
100arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
101 unsigned long len, unsigned long pgoff,
102 unsigned long flags);
103#else
104static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
105#endif
106
Ingo Molnard026ce72017-02-02 12:32:21 +0100107static inline bool in_vfork(struct task_struct *tsk)
108{
109 bool ret;
110
111 /*
112 * need RCU to access ->real_parent if CLONE_VM was used along with
113 * CLONE_PARENT.
114 *
115 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
116 * imply CLONE_VM
117 *
118 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
119 * ->real_parent is not necessarily the task doing vfork(), so in
120 * theory we can't rely on task_lock() if we want to dereference it.
121 *
122 * And in this case we can't trust the real_parent->mm == tsk->mm
123 * check, it can be false negative. But we do not care, if init or
124 * another oom-unkillable task does this it should blame itself.
125 */
126 rcu_read_lock();
127 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
128 rcu_read_unlock();
129
130 return ret;
131}
132
Michal Hocko7dea19f2017-05-03 14:53:15 -0700133/*
134 * Applies per-task gfp context to the given allocation flags.
135 * PF_MEMALLOC_NOIO implies GFP_NOIO
136 * PF_MEMALLOC_NOFS implies GFP_NOFS
Ingo Molnar74444ed2017-02-02 20:43:54 +0100137 */
Michal Hocko7dea19f2017-05-03 14:53:15 -0700138static inline gfp_t current_gfp_context(gfp_t flags)
Ingo Molnar74444ed2017-02-02 20:43:54 +0100139{
Michal Hocko7dea19f2017-05-03 14:53:15 -0700140 /*
141 * NOIO implies both NOIO and NOFS and it is a weaker context
142 * so always make sure it makes precendence
143 */
Ingo Molnar74444ed2017-02-02 20:43:54 +0100144 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
145 flags &= ~(__GFP_IO | __GFP_FS);
Michal Hocko7dea19f2017-05-03 14:53:15 -0700146 else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
147 flags &= ~__GFP_FS;
Ingo Molnar74444ed2017-02-02 20:43:54 +0100148 return flags;
149}
150
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100151#ifdef CONFIG_LOCKDEP
152extern void fs_reclaim_acquire(gfp_t gfp_mask);
153extern void fs_reclaim_release(gfp_t gfp_mask);
154#else
155static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
156static inline void fs_reclaim_release(gfp_t gfp_mask) { }
157#endif
158
Ingo Molnar74444ed2017-02-02 20:43:54 +0100159static inline unsigned int memalloc_noio_save(void)
160{
161 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
162 current->flags |= PF_MEMALLOC_NOIO;
163 return flags;
164}
165
166static inline void memalloc_noio_restore(unsigned int flags)
167{
168 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
169}
170
Michal Hocko7dea19f2017-05-03 14:53:15 -0700171static inline unsigned int memalloc_nofs_save(void)
172{
173 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
174 current->flags |= PF_MEMALLOC_NOFS;
175 return flags;
176}
177
178static inline void memalloc_nofs_restore(unsigned int flags)
179{
180 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
181}
182
Vlastimil Babka499118e2017-05-08 15:59:50 -0700183static inline unsigned int memalloc_noreclaim_save(void)
184{
185 unsigned int flags = current->flags & PF_MEMALLOC;
186 current->flags |= PF_MEMALLOC;
187 return flags;
188}
189
190static inline void memalloc_noreclaim_restore(unsigned int flags)
191{
192 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
193}
194
Mathieu Desnoyersa961e402017-10-19 13:30:15 -0400195#ifdef CONFIG_MEMBARRIER
196enum {
197 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
198 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
199};
200
201static inline void membarrier_execve(struct task_struct *t)
202{
203 atomic_set(&t->mm->membarrier_state, 0);
204}
205#else
206static inline void membarrier_execve(struct task_struct *t)
207{
208}
209#endif
210
Ingo Molnar6e84f312017-02-08 18:51:29 +0100211#endif /* _LINUX_SCHED_MM_H */