blob: 14b3cdccf4f9074b9d0b5db32799eeb51072e511 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07006#include <linux/mm_types.h>
Dave Hansen7d06d9c2016-07-29 09:30:12 -07007#include <linux/pkeys.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07008
9#include <trace/events/tlb.h>
10
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040011#include <asm/pgalloc.h>
12#include <asm/tlbflush.h>
13#include <asm/paravirt.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080014#include <asm/mpx.h>
Andy Lutomirskif39681e2017-06-29 08:53:15 -070015
16extern atomic64_t last_mm_ctx_id;
17
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040018#ifndef CONFIG_PARAVIRT
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040019static inline void paravirt_activate_mm(struct mm_struct *prev,
20 struct mm_struct *next)
21{
22}
23#endif /* !CONFIG_PARAVIRT */
24
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070025#ifdef CONFIG_PERF_EVENTS
Andy Lutomirskia6673422014-10-24 15:58:13 -070026extern struct static_key rdpmc_always_available;
27
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070028static inline void load_mm_cr4(struct mm_struct *mm)
29{
Peter Zijlstraa8335812015-07-09 19:23:38 +020030 if (static_key_false(&rdpmc_always_available) ||
Andy Lutomirskia6673422014-10-24 15:58:13 -070031 atomic_read(&mm->context.perf_rdpmc_allowed))
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070032 cr4_set_bits(X86_CR4_PCE);
33 else
34 cr4_clear_bits(X86_CR4_PCE);
35}
36#else
37static inline void load_mm_cr4(struct mm_struct *mm) {}
38#endif
39
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070040#ifdef CONFIG_MODIFY_LDT_SYSCALL
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040041/*
Andy Lutomirski37868fe2015-07-30 14:31:32 -070042 * ldt_structs can be allocated, used, and freed, but they are never
43 * modified while live.
44 */
45struct ldt_struct {
46 /*
47 * Xen requires page-aligned LDTs with special permissions. This is
48 * needed to prevent us from installing evil descriptors such as
49 * call gates. On native, we could merge the ldt_struct and LDT
50 * allocations, but it's not worth trying to optimize.
51 */
52 struct desc_struct *entries;
Borislav Petkovbbf79d22017-06-06 19:31:16 +020053 unsigned int nr_entries;
Andy Lutomirski37868fe2015-07-30 14:31:32 -070054};
55
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070056/*
57 * Used for LDT copy/destruction.
58 */
Dave Hansen39a05262016-02-12 13:02:34 -080059int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
60void destroy_context_ldt(struct mm_struct *mm);
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070061#else /* CONFIG_MODIFY_LDT_SYSCALL */
Dave Hansen39a05262016-02-12 13:02:34 -080062static inline int init_new_context_ldt(struct task_struct *tsk,
63 struct mm_struct *mm)
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070064{
65 return 0;
66}
Dave Hansen39a05262016-02-12 13:02:34 -080067static inline void destroy_context_ldt(struct mm_struct *mm) {}
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070068#endif
69
Andy Lutomirski37868fe2015-07-30 14:31:32 -070070static inline void load_mm_ldt(struct mm_struct *mm)
71{
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070072#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -070073 struct ldt_struct *ldt;
74
75 /* lockless_dereference synchronizes with smp_store_release */
76 ldt = lockless_dereference(mm->context.ldt);
77
78 /*
79 * Any change to mm->context.ldt is followed by an IPI to all
80 * CPUs with the mm active. The LDT will not be freed until
81 * after the IPI is handled by all such CPUs. This means that,
82 * if the ldt_struct changes before we return, the values we see
83 * will be safe, and the new values will be loaded before we run
84 * any user code.
85 *
86 * NB: don't try to convert this to use RCU without extreme care.
87 * We would still need IRQs off, because we don't want to change
88 * the local LDT after an IPI loaded a newer value than the one
89 * that we can see.
90 */
91
92 if (unlikely(ldt))
Borislav Petkovbbf79d22017-06-06 19:31:16 +020093 set_ldt(ldt->entries, ldt->nr_entries);
Andy Lutomirski37868fe2015-07-30 14:31:32 -070094 else
95 clear_LDT();
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -070096#else
97 clear_LDT();
98#endif
Andy Lutomirski73534252017-06-20 22:22:08 -070099}
100
101static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
102{
103#ifdef CONFIG_MODIFY_LDT_SYSCALL
104 /*
105 * Load the LDT if either the old or new mm had an LDT.
106 *
107 * An mm will never go from having an LDT to not having an LDT. Two
108 * mms never share an LDT, so we don't gain anything by checking to
109 * see whether the LDT changed. There's also no guarantee that
110 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
111 * then prev->context.ldt will also be non-NULL.
112 *
113 * If we really cared, we could optimize the case where prev == next
114 * and we're exiting lazy mode. Most of the time, if this happens,
115 * we don't actually need to reload LDTR, but modify_ldt() is mostly
116 * used by legacy code and emulators where we don't need this level of
117 * performance.
118 *
119 * This uses | instead of || because it generates better code.
120 */
121 if (unlikely((unsigned long)prev->context.ldt |
122 (unsigned long)next->context.ldt))
123 load_mm_ldt(next);
124#endif
Andy Lutomirski37868fe2015-07-30 14:31:32 -0700125
126 DEBUG_LOCKS_WARN_ON(preemptible());
127}
128
Brian Gerst6826c8f2009-01-21 17:26:06 +0900129static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
130{
Andy Lutomirski94b1b032017-06-29 08:53:17 -0700131 int cpu = smp_processor_id();
132
133 if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
134 cpumask_clear_cpu(cpu, mm_cpumask(mm));
Brian Gerst6826c8f2009-01-21 17:26:06 +0900135}
136
Dave Hansen39a05262016-02-12 13:02:34 -0800137static inline int init_new_context(struct task_struct *tsk,
138 struct mm_struct *mm)
139{
Andy Lutomirskif39681e2017-06-29 08:53:15 -0700140 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
141 atomic64_set(&mm->context.tlb_gen, 0);
142
Dave Hansene8c24d32016-07-29 09:30:15 -0700143 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
144 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
145 /* pkey 0 is the default and always allocated */
146 mm->context.pkey_allocation_map = 0x1;
147 /* -1 means unallocated or invalid */
148 mm->context.execute_only_pkey = -1;
149 }
150 #endif
Dave Hansen39a05262016-02-12 13:02:34 -0800151 init_new_context_ldt(tsk, mm);
Dave Hansene8c24d32016-07-29 09:30:15 -0700152
Dave Hansen39a05262016-02-12 13:02:34 -0800153 return 0;
154}
155static inline void destroy_context(struct mm_struct *mm)
156{
157 destroy_context_ldt(mm);
158}
159
Andy Lutomirski69c03192016-04-26 09:39:08 -0700160extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
161 struct task_struct *tsk);
Brian Gerst6826c8f2009-01-21 17:26:06 +0900162
Andy Lutomirski078194f2016-04-26 09:39:09 -0700163extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
164 struct task_struct *tsk);
165#define switch_mm_irqs_off switch_mm_irqs_off
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400166
167#define activate_mm(prev, next) \
168do { \
169 paravirt_activate_mm((prev), (next)); \
170 switch_mm((prev), (next), NULL); \
171} while (0);
172
Brian Gerst6826c8f2009-01-21 17:26:06 +0900173#ifdef CONFIG_X86_32
174#define deactivate_mm(tsk, mm) \
175do { \
Tejun Heoccbeed32009-02-09 22:17:40 +0900176 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +0900177} while (0)
178#else
179#define deactivate_mm(tsk, mm) \
180do { \
181 load_gs_index(0); \
182 loadsegment(fs, 0); \
183} while (0)
184#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400185
Dave Hansena1ea1c02014-11-18 10:23:49 -0800186static inline void arch_dup_mmap(struct mm_struct *oldmm,
187 struct mm_struct *mm)
188{
189 paravirt_arch_dup_mmap(oldmm, mm);
190}
191
192static inline void arch_exit_mmap(struct mm_struct *mm)
193{
194 paravirt_arch_exit_mmap(mm);
195}
196
Dave Hansenb0e9b092015-06-07 11:37:04 -0700197#ifdef CONFIG_X86_64
198static inline bool is_64bit_mm(struct mm_struct *mm)
199{
Masahiro Yamada97f26452016-08-03 13:45:50 -0700200 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
Dave Hansenb0e9b092015-06-07 11:37:04 -0700201 !(mm->context.ia32_compat == TIF_IA32);
202}
203#else
204static inline bool is_64bit_mm(struct mm_struct *mm)
205{
206 return false;
207}
208#endif
209
Dave Hansenfe3d1972014-11-14 07:18:29 -0800210static inline void arch_bprm_mm_init(struct mm_struct *mm,
211 struct vm_area_struct *vma)
212{
213 mpx_mm_init(mm);
214}
215
Dave Hansen1de4fa12014-11-14 07:18:31 -0800216static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
217 unsigned long start, unsigned long end)
218{
Dave Hansenc9222282015-01-08 14:30:21 -0800219 /*
220 * mpx_notify_unmap() goes and reads a rarely-hot
221 * cacheline in the mm_struct. That can be expensive
222 * enough to be seen in profiles.
223 *
224 * The mpx_notify_unmap() call and its contents have been
225 * observed to affect munmap() performance on hardware
226 * where MPX is not present.
227 *
228 * The unlikely() optimizes for the fast case: no MPX
229 * in the CPU, or no MPX use in the process. Even if
230 * we get this wrong (in the unlikely event that MPX
231 * is widely enabled on some system) the overhead of
232 * MPX itself (reading bounds tables) is expected to
233 * overwhelm the overhead of getting this unlikely()
234 * consistently wrong.
235 */
236 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
237 mpx_notify_unmap(mm, vma, start, end);
Dave Hansen1de4fa12014-11-14 07:18:31 -0800238}
239
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700240#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
Dave Hansen8f62c882016-02-12 13:02:10 -0800241static inline int vma_pkey(struct vm_area_struct *vma)
242{
Dave Hansen8f62c882016-02-12 13:02:10 -0800243 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
244 VM_PKEY_BIT2 | VM_PKEY_BIT3;
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700245
246 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
Dave Hansen8f62c882016-02-12 13:02:10 -0800247}
Dave Hansen7d06d9c2016-07-29 09:30:12 -0700248#else
249static inline int vma_pkey(struct vm_area_struct *vma)
250{
251 return 0;
252}
253#endif
Dave Hansen8f62c882016-02-12 13:02:10 -0800254
Dave Hansen33a709b2016-02-12 13:02:19 -0800255/*
256 * We only want to enforce protection keys on the current process
257 * because we effectively have no access to PKRU for other
258 * processes or any way to tell *which * PKRU in a threaded
259 * process we could use.
260 *
261 * So do not enforce things if the VMA is not from the current
262 * mm, or if we are in a kernel thread.
263 */
264static inline bool vma_is_foreign(struct vm_area_struct *vma)
265{
266 if (!current->mm)
267 return true;
268 /*
269 * Should PKRU be enforced on the access to this VMA? If
270 * the VMA is from another process, then PKRU has no
271 * relevance and should not be enforced.
272 */
273 if (current->mm != vma->vm_mm)
274 return true;
275
276 return false;
277}
278
Dave Hansen1b2ee122016-02-12 13:02:21 -0800279static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
Dave Hansend61172b2016-02-12 13:02:24 -0800280 bool write, bool execute, bool foreign)
Dave Hansen33a709b2016-02-12 13:02:19 -0800281{
Dave Hansend61172b2016-02-12 13:02:24 -0800282 /* pkeys never affect instruction fetches */
283 if (execute)
284 return true;
Dave Hansen33a709b2016-02-12 13:02:19 -0800285 /* allow access if the VMA is not one from this process */
Dave Hansen1b2ee122016-02-12 13:02:21 -0800286 if (foreign || vma_is_foreign(vma))
Dave Hansen33a709b2016-02-12 13:02:19 -0800287 return true;
288 return __pkru_allows_pkey(vma_pkey(vma), write);
289}
290
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700291
292/*
293 * This can be used from process context to figure out what the value of
Andy Lutomirski6c690ee2017-06-12 10:26:14 -0700294 * CR3 is without needing to do a (slow) __read_cr3().
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700295 *
296 * It's intended to be used for code like KVM that sneakily changes CR3
297 * and needs to restore it. It needs to be used very carefully.
298 */
299static inline unsigned long __get_current_cr3_fast(void)
300{
301 unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
302
Andy Lutomirski10af6232017-07-24 21:41:38 -0700303 if (static_cpu_has(X86_FEATURE_PCID))
304 cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
305
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700306 /* For now, be very restrictive about when this can be called. */
307 VM_WARN_ON(in_nmi() || !in_atomic());
308
Andy Lutomirski6c690ee2017-06-12 10:26:14 -0700309 VM_BUG_ON(cr3 != __read_cr3());
Andy Lutomirskid6e41f12017-05-28 10:00:17 -0700310 return cr3;
311}
312
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700313#endif /* _ASM_X86_MMU_CONTEXT_H */