blob: 5ede7cae1d673e38effa7ce9d1cc5aaf4481ac46 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_MMU_CONTEXT_H
3#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04004
5#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07006#include <linux/atomic.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07007#include <linux/mm_types.h>
Dave Hansen7d06d9c2016-07-29 09:30:12 -07008#include <linux/pkeys.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07009
10#include <trace/events/tlb.h>
11
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040012#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/paravirt.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080015#include <asm/mpx.h>
Andy Lutomirskif39681e2017-06-29 08:53:15 -070016
17extern atomic64_t last_mm_ctx_id;
18
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040019#ifndef CONFIG_PARAVIRT
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040020static inline void paravirt_activate_mm(struct mm_struct *prev,
21 struct mm_struct *next)
22{
23}
24#endif /* !CONFIG_PARAVIRT */
25
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070026#ifdef CONFIG_PERF_EVENTS
Andy Lutomirskia6673422014-10-24 15:58:13 -070027extern struct static_key rdpmc_always_available;
28
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070029static inline void load_mm_cr4(struct mm_struct *mm)
30{
Peter Zijlstraa8335812015-07-09 19:23:38 +020031 if (static_key_false(&rdpmc_always_available) ||
Andy Lutomirskia6673422014-10-24 15:58:13 -070032 atomic_read(&mm->context.perf_rdpmc_allowed))
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070033 cr4_set_bits(X86_CR4_PCE);
34 else
35 cr4_clear_bits(X86_CR4_PCE);
36}
37#else
38static inline void load_mm_cr4(struct mm_struct *mm) {}
39#endif
40
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070041#ifdef CONFIG_MODIFY_LDT_SYSCALL
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040042/*
Andy Lutomirski37868fe2015-07-30 14:31:32 -070043 * ldt_structs can be allocated, used, and freed, but they are never
44 * modified while live.
45 */
46struct ldt_struct {
47 /*
48 * Xen requires page-aligned LDTs with special permissions. This is
49 * needed to prevent us from installing evil descriptors such as
50 * call gates. On native, we could merge the ldt_struct and LDT
51 * allocations, but it's not worth trying to optimize.
52 */
53 struct desc_struct *entries;
Borislav Petkovbbf79d22017-06-06 19:31:16 +020054 unsigned int nr_entries;
Andy Lutomirski37868fe2015-07-30 14:31:32 -070055};
56
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070057/*
58 * Used for LDT copy/destruction.
59 */
Thomas Gleixnera4828f82017-12-14 12:27:31 +010060static inline void init_new_context_ldt(struct mm_struct *mm)
61{
62 mm->context.ldt = NULL;
63 init_rwsem(&mm->context.ldt_usr_sem);
64}
65int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
Dave Hansen39a05262016-02-12 13:02:34 -080066void destroy_context_ldt(struct mm_struct *mm);
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070067#else /* CONFIG_MODIFY_LDT_SYSCALL */
Thomas Gleixnera4828f82017-12-14 12:27:31 +010068static inline void init_new_context_ldt(struct mm_struct *mm) { }
69static inline int ldt_dup_context(struct mm_struct *oldmm,
70 struct mm_struct *mm)
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070071{
72 return 0;
73}
Dave Hansen39a05262016-02-12 13:02:34 -080074static inline void destroy_context_ldt(struct mm_struct *mm) {}
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070075#endif
76
Andy Lutomirski37868fe2015-07-30 14:31:32 -070077static inline void load_mm_ldt(struct mm_struct *mm)
78{
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070079#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -070080 struct ldt_struct *ldt;
81
Will Deacon33822902017-10-24 11:22:48 +010082 /* READ_ONCE synchronizes with smp_store_release */
83 ldt = READ_ONCE(mm->context.ldt);
Andy Lutomirski37868fe2015-07-30 14:31:32 -070084
85 /*
86 * Any change to mm->context.ldt is followed by an IPI to all
87 * CPUs with the mm active. The LDT will not be freed until
88 * after the IPI is handled by all such CPUs. This means that,
89 * if the ldt_struct changes before we return, the values we see
90 * will be safe, and the new values will be loaded before we run
91 * any user code.
92 *
93 * NB: don't try to convert this to use RCU without extreme care.
94 * We would still need IRQs off, because we don't want to change
95 * the local LDT after an IPI loaded a newer value than the one
96 * that we can see.
97 */
98
99 if (unlikely(ldt))
Borislav Petkovbbf79d22017-06-06 19:31:16 +0200100 set_ldt(ldt->entries, ldt->nr_entries);
Andy Lutomirski37868fe2015-07-30 14:31:32 -0700101 else
102 clear_LDT();
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -0700103#else
104 clear_LDT();
105#endif
Andy Lutomirski73534252017-06-20 22:22:08 -0700106}
107
108static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
109{
110#ifdef CONFIG_MODIFY_LDT_SYSCALL
111 /*
112 * Load the LDT if either the old or new mm had an LDT.
113 *
114 * An mm will never go from having an LDT to not having an LDT. Two
115 * mms never share an LDT, so we don't gain anything by checking to
116 * see whether the LDT changed. There's also no guarantee that
117 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
118 * then prev->context.ldt will also be non-NULL.
119 *
120 * If we really cared, we could optimize the case where prev == next
121 * and we're exiting lazy mode. Most of the time, if this happens,
122 * we don't actually need to reload LDTR, but modify_ldt() is mostly
123 * used by legacy code and emulators where we don't need this level of
124 * performance.
125 *
126 * This uses | instead of || because it generates better code.
127 */
128 if (unlikely((unsigned long)prev->context.ldt |
129 (unsigned long)next->context.ldt))
130 load_mm_ldt(next);
131#endif
Andy Lutomirski37868fe2015-07-30 14:31:32 -0700132
133 DEBUG_LOCKS_WARN_ON(preemptible());
134}
135
Andy Lutomirskib9565752017-10-09 09:50:49 -0700136void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
Brian Gerst6826c8f2009-01-21 17:26:06 +0900137
Dave Hansen39a05262016-02-12 13:02:34 -0800138static inline int init_new_context(struct task_struct *tsk,
139 struct mm_struct *mm)
140{
Peter Zijlstrac2b34962017-12-14 12:27:30 +0100141 mutex_init(&mm->context.lock);
142
Andy Lutomirskif39681e2017-06-29 08:53:15 -0700143 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
144 atomic64_set(&mm->context.tlb_gen, 0);
145
Thomas Gleixnera4828f82017-12-14 12:27:31 +0100146#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
Dave Hansene8c24d32016-07-29 09:30:15 -0700147 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
148 /* pkey 0 is the default and always allocated */
149 mm->context.pkey_allocation_map = 0x1;
150 /* -1 means unallocated or invalid */
151 mm->context.execute_only_pkey = -1;
152 }
Thomas Gleixnera4828f82017-12-14 12:27:31 +0100153#endif
154 init_new_context_ldt(mm);
155 return 0;
Dave Hansen39a05262016-02-12 13:02:34 -0800156}
157static inline void destroy_context(struct mm_struct *mm)
158{
159 destroy_context_ldt(mm);
160}
161
Andy Lutomirski69c03192016-04-26 09:39:08 -0700162extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
163 struct task_struct *tsk);
Brian Gerst6826c8f2009-01-21 17:26:06 +0900164
Andy Lutomirski078194f2016-04-26 09:39:09 -0700165extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
166 struct task_struct *tsk);
167#define switch_mm_irqs_off switch_mm_irqs_off
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400168
169#define activate_mm(prev, next) \
170do { \
171 paravirt_activate_mm((prev), (next)); \
172 switch_mm((prev), (next), NULL); \
173} while (0);
174
Brian Gerst6826c8f2009-01-21 17:26:06 +0900175#ifdef CONFIG_X86_32
176#define deactivate_mm(tsk, mm) \
177do { \
Tejun Heoccbeed32009-02-09 22:17:40 +0900178 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +0900179} while (0)
180#else
181#define deactivate_mm(tsk, mm) \
182do { \
183 load_gs_index(0); \
184 loadsegment(fs, 0); \
185} while (0)
186#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400187
Thomas Gleixnerc10e83f2017-12-14 12:27:29 +0100188static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Dave Hansena1ea1c02014-11-18 10:23:49 -0800189{
190 paravirt_arch_dup_mmap(oldmm, mm);
Thomas Gleixnera4828f82017-12-14 12:27:31 +0100191 return ldt_dup_context(oldmm, mm);
Dave Hansena1ea1c02014-11-18 10:23:49 -0800192}
193
194static inline void arch_exit_mmap(struct mm_struct *mm)
195{
196 paravirt_arch_exit_mmap(mm);
197}
198
Dave Hansenb0e9b092015-06-07 11:37:04 -0700199#ifdef CONFIG_X86_64
200static inline bool is_64bit_mm(struct mm_struct *mm)
201{
Masahiro Yamada97f26452016-08-03 13:45:50 -0700202 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
Dave Hansenb0e9b092015-06-07 11:37:04 -0700203 !(mm->context.ia32_compat == TIF_IA32);
204}
205#else
206static inline bool is_64bit_mm(struct mm_struct *mm)
207{
208 return false;
209}
210#endif
211
Dave Hansenfe3d1972014-11-14 07:18:29 -0800212static inline void arch_bprm_mm_init(struct mm_struct *mm,
213 struct vm_area_struct *vma)
214{
215 mpx_mm_init(mm);
216}
217
Dave Hansen1de4fa12014-11-14 07:18:31 -0800218static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
219 unsigned long start, unsigned long end)
220{
Dave Hansenc9222282015-01-08 14:30:21 -0800221 /*
222 * mpx_notify_unmap() goes and reads a rarely-hot
223 * cacheline in the mm_struct. That can be expensive
224 * enough to be seen in profiles.
225 *
226 * The mpx_notify_unmap() call and its contents have been
227 * observed to affect munmap() performance on hardware
228 * where MPX is not present.
229 *
230 * The unlikely() optimizes for the fast case: no MPX
231 * in the CPU, or no MPX use in the process. Even if
232 * we get this wrong (in the unlikely event that MPX
233 * is widely enabled on some system) the overhead of
234 * MPX itself (reading bounds tables) is expected to
235 * overwhelm the overhead of getting this unlikely()
236 * consistently wrong.
237 */
238 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
239 mpx_notify_unmap(mm, vma, start, end);
Dave Hansen1de4fa12014-11-14 07:18:31 -0800240}
241
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700242#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
Dave Hansen8f62c882016-02-12 13:02:10 -0800243static inline int vma_pkey(struct vm_area_struct *vma)
244{
Dave Hansen8f62c882016-02-12 13:02:10 -0800245 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
246 VM_PKEY_BIT2 | VM_PKEY_BIT3;
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700247
248 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
Dave Hansen8f62c882016-02-12 13:02:10 -0800249}
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700250#else
251static inline int vma_pkey(struct vm_area_struct *vma)
252{
253 return 0;
254}
255#endif
Dave Hansen8f62c882016-02-12 13:02:10 -0800256
Dave Hansen33a709b2016-02-12 13:02:19 -0800257/*
258 * We only want to enforce protection keys on the current process
259 * because we effectively have no access to PKRU for other
260 * processes or any way to tell *which * PKRU in a threaded
261 * process we could use.
262 *
263 * So do not enforce things if the VMA is not from the current
264 * mm, or if we are in a kernel thread.
265 */
266static inline bool vma_is_foreign(struct vm_area_struct *vma)
267{
268 if (!current->mm)
269 return true;
270 /*
271 * Should PKRU be enforced on the access to this VMA? If
272 * the VMA is from another process, then PKRU has no
273 * relevance and should not be enforced.
274 */
275 if (current->mm != vma->vm_mm)
276 return true;
277
278 return false;
279}
280
Dave Hansen1b2ee122016-02-12 13:02:21 -0800281static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
Dave Hansend61172b2016-02-12 13:02:24 -0800282 bool write, bool execute, bool foreign)
Dave Hansen33a709b2016-02-12 13:02:19 -0800283{
Dave Hansend61172b2016-02-12 13:02:24 -0800284 /* pkeys never affect instruction fetches */
285 if (execute)
286 return true;
Dave Hansen33a709b2016-02-12 13:02:19 -0800287 /* allow access if the VMA is not one from this process */
Dave Hansen1b2ee122016-02-12 13:02:21 -0800288 if (foreign || vma_is_foreign(vma))
Dave Hansen33a709b2016-02-12 13:02:19 -0800289 return true;
290 return __pkru_allows_pkey(vma_pkey(vma), write);
291}
292
Andy Lutomirski52a2af42017-09-17 09:03:49 -0700293/*
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700294 * This can be used from process context to figure out what the value of
Andy Lutomirski6c690ee2017-06-12 10:26:14 -0700295 * CR3 is without needing to do a (slow) __read_cr3().
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700296 *
297 * It's intended to be used for code like KVM that sneakily changes CR3
298 * and needs to restore it. It needs to be used very carefully.
299 */
300static inline unsigned long __get_current_cr3_fast(void)
301{
Dave Hansen50fb83a62017-12-04 15:07:54 +0100302 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
Andy Lutomirski47061a22017-09-17 09:03:48 -0700303 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
Andy Lutomirski10af6232017-07-24 21:41:38 -0700304
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700305 /* For now, be very restrictive about when this can be called. */
Roman Kagan4c07f902017-07-17 12:49:07 +0300306 VM_WARN_ON(in_nmi() || preemptible());
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700307
Andy Lutomirski6c690ee2017-06-12 10:26:14 -0700308 VM_BUG_ON(cr3 != __read_cr3());
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700309 return cr3;
310}
311
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700312#endif /* _ASM_X86_MMU_CONTEXT_H */