blob: 6d6c15cd9b9a4decc20cd0452595bb1ebc8e9726 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
Hiroshi Shimamoto66125382008-01-30 13:31:03 +01006 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * X86-64 port
8 * Andi Kleen.
Ashok Raj76e4f662005-06-25 14:55:00 -07009 *
10 * CPU hotplug support - ashok.raj@intel.com
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
Ashok Raj76e4f662005-06-25 14:55:00 -070017#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
19#include <linux/sched.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010020#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/interrupt.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010028#include <linux/delay.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040029#include <linux/export.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010030#include <linux/ptrace.h>
Andi Kleen95833c82006-01-11 22:44:36 +010031#include <linux/notifier.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080032#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070033#include <linux/kdebug.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020034#include <linux/prctl.h>
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030035#include <linux/uaccess.h>
36#include <linux/io.h>
Frederic Weisbecker8b96f012008-12-06 03:40:00 +010037#include <linux/ftrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/processor.h>
Ingo Molnar78f7f1e2015-04-24 02:54:44 +020041#include <asm/fpu/internal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/desc.h>
45#include <asm/proto.h>
46#include <asm/ia32.h>
Andi Kleen95833c82006-01-11 22:44:36 +010047#include <asm/idle.h>
Jaswinder Singhbbc1f692008-07-21 21:34:13 +053048#include <asm/syscalls.h>
K.Prasad66cb5912009-06-01 23:44:55 +053049#include <asm/debugreg.h>
David Howellsf05e7982012-03-28 18:11:12 +010050#include <asm/switch_to.h>
Andy Lutomirskib7a584592016-03-16 14:14:21 -070051#include <asm/xen/hypervisor.h>
Dmitry Safonov2eefd872016-09-05 16:33:05 +030052#include <asm/vdso.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Thomas Gleixnerb5741ef2018-11-25 19:33:47 +010054#include "process.h"
55
Ingo Molnarc38e5032015-03-17 14:42:59 +010056__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010058/* Prints also some state that isn't saved in the pt_regs */
Pekka Enberge2ce07c2008-04-03 16:40:48 +030059void __show_regs(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
61 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
Alan Sternbb1995d2007-07-21 17:10:42 +020062 unsigned long d0, d1, d2, d3, d6, d7;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010063 unsigned int fsindex, gsindex;
64 unsigned int ds, cs, es;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Pekka Enbergd015a092009-12-28 10:26:59 +020066 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
Jiri Slaby5f01c982013-10-25 15:06:58 +020067 printk_address(regs->ip);
Pekka Enbergd015a092009-12-28 10:26:59 +020068 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -030069 regs->sp, regs->flags);
Pekka Enbergd015a092009-12-28 10:26:59 +020070 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010071 regs->ax, regs->bx, regs->cx);
Pekka Enbergd015a092009-12-28 10:26:59 +020072 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010073 regs->dx, regs->si, regs->di);
Pekka Enbergd015a092009-12-28 10:26:59 +020074 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010075 regs->bp, regs->r8, regs->r9);
Pekka Enbergd015a092009-12-28 10:26:59 +020076 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030077 regs->r10, regs->r11, regs->r12);
Pekka Enbergd015a092009-12-28 10:26:59 +020078 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030079 regs->r13, regs->r14, regs->r15);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030081 asm("movl %%ds,%0" : "=r" (ds));
82 asm("movl %%cs,%0" : "=r" (cs));
83 asm("movl %%es,%0" : "=r" (es));
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 asm("movl %%fs,%0" : "=r" (fsindex));
85 asm("movl %%gs,%0" : "=r" (gsindex));
86
87 rdmsrl(MSR_FS_BASE, fs);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030088 rdmsrl(MSR_GS_BASE, gs);
89 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Pekka Enberge2ce07c2008-04-03 16:40:48 +030091 if (!all)
92 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Glauber de Oliveira Costaf51c9452007-07-22 11:12:29 +020094 cr0 = read_cr0();
95 cr2 = read_cr2();
96 cr3 = read_cr3();
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070097 cr4 = __read_cr4();
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Pekka Enbergd015a092009-12-28 10:26:59 +020099 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300100 fs, fsindex, gs, gsindex, shadowgs);
Pekka Enbergd015a092009-12-28 10:26:59 +0200101 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300102 es, cr0);
Pekka Enbergd015a092009-12-28 10:26:59 +0200103 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300104 cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200105
106 get_debugreg(d0, 0);
107 get_debugreg(d1, 1);
108 get_debugreg(d2, 2);
Alan Sternbb1995d2007-07-21 17:10:42 +0200109 get_debugreg(d3, 3);
110 get_debugreg(d6, 6);
111 get_debugreg(d7, 7);
Dave Jones43387742013-06-18 12:09:11 -0400112
113 /* Only print out debug registers if they are in their non-default state. */
Nicolas Ioossba6d0182016-09-10 20:30:45 +0200114 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
115 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
116 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
117 d0, d1, d2);
118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
119 d3, d6, d7);
120 }
Dave Jones43387742013-06-18 12:09:11 -0400121
Dave Hansenc0b17b52016-02-12 13:02:25 -0800122 if (boot_cpu_has(X86_FEATURE_OSPKE))
123 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126void release_thread(struct task_struct *dead_task)
127{
128 if (dead_task->mm) {
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -0700129#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -0700130 if (dead_task->mm->context.ldt) {
Chen Gang349eab62012-11-06 14:45:46 +0800131 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
Joe Perchesc767a542012-05-21 19:50:07 -0700132 dead_task->comm,
Jan Beulich0d430e32015-12-22 08:42:44 -0700133 dead_task->mm->context.ldt->entries,
Andy Lutomirski37868fe2015-07-30 14:31:32 -0700134 dead_task->mm->context.ldt->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 BUG();
136 }
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -0700137#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 }
139}
140
Andy Lutomirski3fddeb82017-08-01 07:11:37 -0700141enum which_selector {
142 FS,
143 GS
144};
145
146/*
147 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
148 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
149 * It's forcibly inlined because it'll generate better code and this function
150 * is hot.
151 */
152static __always_inline void save_base_legacy(struct task_struct *prev_p,
153 unsigned short selector,
154 enum which_selector which)
155{
156 if (likely(selector == 0)) {
157 /*
158 * On Intel (without X86_BUG_NULL_SEG), the segment base could
159 * be the pre-existing saved base or it could be zero. On AMD
160 * (with X86_BUG_NULL_SEG), the segment base could be almost
161 * anything.
162 *
163 * This branch is very hot (it's hit twice on almost every
164 * context switch between 64-bit programs), and avoiding
165 * the RDMSR helps a lot, so we just assume that whatever
166 * value is already saved is correct. This matches historical
167 * Linux behavior, so it won't break existing applications.
168 *
169 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
170 * report that the base is zero, it needs to actually be zero:
171 * see the corresponding logic in load_seg_legacy.
172 */
173 } else {
174 /*
175 * If the selector is 1, 2, or 3, then the base is zero on
176 * !X86_BUG_NULL_SEG CPUs and could be anything on
177 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
178 * has never attempted to preserve the base across context
179 * switches.
180 *
181 * If selector > 3, then it refers to a real segment, and
182 * saving the base isn't necessary.
183 */
184 if (which == FS)
185 prev_p->thread.fsbase = 0;
186 else
187 prev_p->thread.gsbase = 0;
188 }
189}
190
191static __always_inline void save_fsgs(struct task_struct *task)
192{
193 savesegment(fs, task->thread.fsindex);
194 savesegment(gs, task->thread.gsindex);
195 save_base_legacy(task, task->thread.fsindex, FS);
196 save_base_legacy(task, task->thread.gsindex, GS);
197}
198
199static __always_inline void loadseg(enum which_selector which,
200 unsigned short sel)
201{
202 if (which == FS)
203 loadsegment(fs, sel);
204 else
205 load_gs_index(sel);
206}
207
208static __always_inline void load_seg_legacy(unsigned short prev_index,
209 unsigned long prev_base,
210 unsigned short next_index,
211 unsigned long next_base,
212 enum which_selector which)
213{
214 if (likely(next_index <= 3)) {
215 /*
216 * The next task is using 64-bit TLS, is not using this
217 * segment at all, or is having fun with arcane CPU features.
218 */
219 if (next_base == 0) {
220 /*
221 * Nasty case: on AMD CPUs, we need to forcibly zero
222 * the base.
223 */
224 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
225 loadseg(which, __USER_DS);
226 loadseg(which, next_index);
227 } else {
228 /*
229 * We could try to exhaustively detect cases
230 * under which we can skip the segment load,
231 * but there's really only one case that matters
232 * for performance: if both the previous and
233 * next states are fully zeroed, we can skip
234 * the load.
235 *
236 * (This assumes that prev_base == 0 has no
237 * false positives. This is the case on
238 * Intel-style CPUs.)
239 */
240 if (likely(prev_index | next_index | prev_base))
241 loadseg(which, next_index);
242 }
243 } else {
244 if (prev_index != next_index)
245 loadseg(which, next_index);
246 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
247 next_base);
248 }
249 } else {
250 /*
251 * The next task is using a real segment. Loading the selector
252 * is sufficient.
253 */
254 loadseg(which, next_index);
255 }
256}
257
Josh Triplettc1bd55f2015-06-30 15:00:00 -0700258int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
259 unsigned long arg, struct task_struct *p, unsigned long tls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 int err;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300262 struct pt_regs *childregs;
Brian Gerst01003012016-08-13 12:38:19 -0400263 struct fork_frame *fork_frame;
264 struct inactive_task_frame *frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 struct task_struct *me = current;
266
Al Viro7076aad2012-09-10 16:44:54 -0400267 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
268 childregs = task_pt_regs(p);
Brian Gerst01003012016-08-13 12:38:19 -0400269 fork_frame = container_of(childregs, struct fork_frame, regs);
270 frame = &fork_frame->frame;
Peter Zijlstra45fe6de2019-02-14 10:30:52 +0100271
272 /*
273 * For a new task use the RESET flags value since there is no before.
274 * All the status flags are zero; DF and all the system flags must also
275 * be 0, specifically IF must be 0 because we context switch to the new
276 * task with interrupts disabled.
277 */
278 frame->flags = X86_EFLAGS_FIXED;
Brian Gerst01003012016-08-13 12:38:19 -0400279 frame->bp = 0;
280 frame->ret_addr = (unsigned long) ret_from_fork;
281 p->thread.sp = (unsigned long) fork_frame;
K.Prasad66cb5912009-06-01 23:44:55 +0530282 p->thread.io_bitmap_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400284 savesegment(gs, p->thread.gsindex);
Andy Lutomirski296f7812016-04-26 12:23:29 -0700285 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400286 savesegment(fs, p->thread.fsindex);
Andy Lutomirski296f7812016-04-26 12:23:29 -0700287 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400288 savesegment(es, p->thread.es);
289 savesegment(ds, p->thread.ds);
Al Viro7076aad2012-09-10 16:44:54 -0400290 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
291
Al Viro1d4b4b22012-10-22 22:34:11 -0400292 if (unlikely(p->flags & PF_KTHREAD)) {
Al Viro7076aad2012-09-10 16:44:54 -0400293 /* kernel thread */
294 memset(childregs, 0, sizeof(struct pt_regs));
Brian Gerst616d2482016-08-13 12:38:20 -0400295 frame->bx = sp; /* function */
296 frame->r12 = arg;
Al Viro7076aad2012-09-10 16:44:54 -0400297 return 0;
298 }
Brian Gerst616d2482016-08-13 12:38:20 -0400299 frame->bx = 0;
Al Viro1d4b4b22012-10-22 22:34:11 -0400300 *childregs = *current_pt_regs();
Al Viro7076aad2012-09-10 16:44:54 -0400301
302 childregs->ax = 0;
Al Viro1d4b4b22012-10-22 22:34:11 -0400303 if (sp)
304 childregs->sp = sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
K.Prasad66cb5912009-06-01 23:44:55 +0530306 err = -ENOMEM;
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200307 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
Thomas Meyercced4022011-11-17 23:43:40 +0100308 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
309 IO_BITMAP_BYTES, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 if (!p->thread.io_bitmap_ptr) {
311 p->thread.io_bitmap_max = 0;
312 return -ENOMEM;
313 }
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200314 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317 /*
318 * Set a new TLS for the child thread?
319 */
320 if (clone_flags & CLONE_SETTLS) {
321#ifdef CONFIG_IA32_EMULATION
Dmitry Safonovabfb9492016-04-18 16:43:43 +0300322 if (in_ia32_syscall())
Roland McGrathefd1ca52008-01-30 13:30:46 +0100323 err = do_set_thread_area(p, -1,
Josh Triplettc1bd55f2015-06-30 15:00:00 -0700324 (struct user_desc __user *)tls, 0);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300325 else
326#endif
Josh Triplettc1bd55f2015-06-30 15:00:00 -0700327 err = do_arch_prctl(p, ARCH_SET_FS, tls);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300328 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 goto out;
330 }
331 err = 0;
332out:
333 if (err && p->thread.io_bitmap_ptr) {
334 kfree(p->thread.io_bitmap_ptr);
335 p->thread.io_bitmap_max = 0;
336 }
K.Prasad66cb5912009-06-01 23:44:55 +0530337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return err;
339}
340
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700341static void
342start_thread_common(struct pt_regs *regs, unsigned long new_ip,
343 unsigned long new_sp,
344 unsigned int _cs, unsigned int _ss, unsigned int _ds)
Ingo Molnar513ad842008-02-21 05:18:40 +0100345{
Andy Lutomirskic7d1dde2017-08-01 07:11:34 -0700346 WARN_ON_ONCE(regs != current_pt_regs());
347
348 if (static_cpu_has(X86_BUG_NULL_SEG)) {
349 /* Loading zero below won't clear the base. */
350 loadsegment(fs, __USER_DS);
351 load_gs_index(__USER_DS);
352 }
353
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400354 loadsegment(fs, 0);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700355 loadsegment(es, _ds);
356 loadsegment(ds, _ds);
Ingo Molnar513ad842008-02-21 05:18:40 +0100357 load_gs_index(0);
Andy Lutomirskic7d1dde2017-08-01 07:11:34 -0700358
Ingo Molnar513ad842008-02-21 05:18:40 +0100359 regs->ip = new_ip;
360 regs->sp = new_sp;
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700361 regs->cs = _cs;
362 regs->ss = _ss;
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700363 regs->flags = X86_EFLAGS_IF;
Brian Gerst1daeaa32015-03-21 18:54:21 -0400364 force_iret();
Ingo Molnar513ad842008-02-21 05:18:40 +0100365}
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700366
367void
368start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
369{
370 start_thread_common(regs, new_ip, new_sp,
371 __USER_CS, __USER_DS, 0);
372}
Rian Hunter62cfd812018-08-19 16:08:53 -0700373EXPORT_SYMBOL_GPL(start_thread);
Ingo Molnar513ad842008-02-21 05:18:40 +0100374
Brian Gerst7da77072015-06-22 07:55:13 -0400375#ifdef CONFIG_COMPAT
376void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700377{
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700378 start_thread_common(regs, new_ip, new_sp,
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800379 test_thread_flag(TIF_X32)
380 ? __USER_CS : __USER32_CS,
381 __USER_DS, __USER_DS);
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700382}
383#endif
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/*
386 * switch_to(x,y) should switch tasks from x to y.
387 *
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100388 * This could still be optimized:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 * - fold all the options into a flag word and test it with a single test.
390 * - could test fs/gs bitsliced
Andi Kleen099f3182006-02-03 21:51:38 +0100391 *
392 * Kprobes not supported here. Set the probe on schedule instead.
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100393 * Function graph tracer not supported too.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 */
Andi Kleen35ea79032013-08-05 15:02:39 -0700395__visible __notrace_funcgraph struct task_struct *
Andi Kleena88cde12005-11-05 17:25:54 +0100396__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700398 struct thread_struct *prev = &prev_p->thread;
399 struct thread_struct *next = &next_p->thread;
Ingo Molnar384a23f2015-04-23 17:43:27 +0200400 struct fpu *prev_fpu = &prev->fpu;
401 struct fpu *next_fpu = &next->fpu;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100402 int cpu = smp_processor_id();
Andy Lutomirski24933b82015-03-05 19:19:05 -0800403 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
Ingo Molnar384a23f2015-04-23 17:43:27 +0200404 fpu_switch_t fpu_switch;
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200405
Ingo Molnar384a23f2015-04-23 17:43:27 +0200406 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
Linus Torvalds49030622012-02-16 19:11:15 -0800407
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400408 /* We must save %fs and %gs before load_TLS() because
409 * %fs and %gs may be cleared by load_TLS().
410 *
411 * (e.g. xen_load_tls())
412 */
Andy Lutomirski3fddeb82017-08-01 07:11:37 -0700413 save_fsgs(prev_p);
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400414
Andy Lutomirskif647d7c2014-12-08 13:55:20 -0800415 /*
416 * Load TLS before restoring any segments so that segment loads
417 * reference the correct GDT entries.
418 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 load_TLS(next, cpu);
420
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400421 /*
Andy Lutomirskif647d7c2014-12-08 13:55:20 -0800422 * Leave lazy mode, flushing any hypercalls made here. This
423 * must be done after loading TLS entries in the GDT but before
424 * loading segments that might reference them, and and it must
Ingo Molnar3a0aee42015-04-22 13:16:47 +0200425 * be done before fpu__restore(), so the TS bit is up to
Andy Lutomirskif647d7c2014-12-08 13:55:20 -0800426 * date.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800428 arch_end_context_switch(next_p);
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400429
Andy Lutomirskif647d7c2014-12-08 13:55:20 -0800430 /* Switch DS and ES.
431 *
432 * Reading them only returns the selectors, but writing them (if
433 * nonzero) loads the full descriptor from the GDT or LDT. The
434 * LDT for next is loaded in switch_mm, and the GDT is loaded
435 * above.
436 *
437 * We therefore need to write new values to the segment
438 * registers on every context switch unless both the new and old
439 * values are zero.
440 *
441 * Note that we don't need to do anything for CS and SS, as
442 * those are saved and restored as part of pt_regs.
443 */
444 savesegment(es, prev->es);
445 if (unlikely(next->es | prev->es))
446 loadsegment(es, next->es);
447
448 savesegment(ds, prev->ds);
449 if (unlikely(next->ds | prev->ds))
450 loadsegment(ds, next->ds);
451
Andy Lutomirski3fddeb82017-08-01 07:11:37 -0700452 load_seg_legacy(prev->fsindex, prev->fsbase,
453 next->fsindex, next->fsbase, FS);
454 load_seg_legacy(prev->gsindex, prev->gsbase,
455 next->gsindex, next->gsbase, GS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Ingo Molnar384a23f2015-04-23 17:43:27 +0200457 switch_fpu_finish(next_fpu, fpu_switch);
Linus Torvalds34ddc812012-02-18 12:56:35 -0800458
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300459 /*
Jan Beulich45948d72006-03-25 16:29:25 +0100460 * Switch the PDA and FPU contexts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 */
Alex Shic6ae41e2012-05-11 15:35:27 +0800462 this_cpu_write(current_task, next_p);
Andi Kleen18bd0572006-04-20 02:36:45 +0200463
Andy Lutomirskib27559a2015-03-06 17:50:18 -0800464 /* Reload esp0 and ss1. This changes current_thread_info(). */
465 load_sp0(tss, next);
466
Thomas Gleixnerb5741ef2018-11-25 19:33:47 +0100467 switch_to_extra(prev_p, next_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Andy Lutomirskib7a584592016-03-16 14:14:21 -0700469#ifdef CONFIG_XEN
470 /*
471 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
472 * current_pt_regs()->flags may not match the current task's
473 * intended IOPL. We need to switch it manually.
474 */
475 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
476 prev->iopl != next->iopl))
477 xen_set_iopl_mask(next->iopl);
478#endif
479
Andy Lutomirski61f01dd2015-04-26 16:47:59 -0700480 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
481 /*
482 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
483 * does not update the cached descriptor. As a result, if we
484 * do SYSRET while SS is NULL, we'll end up in user mode with
485 * SS apparently equal to __USER_DS but actually unusable.
486 *
487 * The straightforward workaround would be to fix it up just
488 * before SYSRET, but that would slow down the system call
489 * fast paths. Instead, we ensure that SS is never NULL in
490 * system call context. We do this by replacing NULL SS
491 * selectors at every context switch. SYSCALL sets up a valid
492 * SS, so the only way to get NULL is to re-enter the kernel
493 * from CPL 3 through an interrupt. Since that can't happen
494 * in the same task as a running syscall, we are guaranteed to
495 * context switch between every interrupt vector entry and a
496 * subsequent SYSRET.
497 *
498 * We read SS first because SS reads are much faster than
499 * writes. Out of caution, we force SS to __KERNEL_DS even if
500 * it previously had a different non-NULL value.
501 */
502 unsigned short ss_sel;
503 savesegment(ss, ss_sel);
504 if (ss_sel != __KERNEL_DS)
505 loadsegment(ss, __KERNEL_DS);
506 }
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 return prev_p;
509}
510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511void set_personality_64bit(void)
512{
513 /* inherit personality from parent */
514
515 /* Make sure to be in 64bit mode */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100516 clear_thread_flag(TIF_IA32);
H. Peter Anvin6bd33002012-02-06 13:03:09 -0800517 clear_thread_flag(TIF_ADDR32);
H. Peter Anvinbb212722012-02-14 13:56:49 -0800518 clear_thread_flag(TIF_X32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Stephen Wilson375906f2011-03-13 15:49:14 -0400520 /* Ensure the corresponding mm is not marked. */
521 if (current->mm)
522 current->mm->context.ia32_compat = 0;
523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 /* TBD: overwrites user setup. Should have two bits.
525 But 64bit processes have always behaved this way,
526 so it's not too bad. The main problem is just that
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100527 32bit childs are affected again. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 current->personality &= ~READ_IMPLIES_EXEC;
529}
530
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800531void set_personality_ia32(bool x32)
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800532{
533 /* inherit personality from parent */
534
535 /* Make sure to be in 32bit mode */
H. Peter Anvin6bd33002012-02-06 13:03:09 -0800536 set_thread_flag(TIF_ADDR32);
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800537
Stephen Wilson375906f2011-03-13 15:49:14 -0400538 /* Mark the associated mm as containing 32-bit tasks. */
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800539 if (x32) {
540 clear_thread_flag(TIF_IA32);
541 set_thread_flag(TIF_X32);
Oleg Nesterovb24dc8d2014-04-19 18:10:09 +0200542 if (current->mm)
543 current->mm->context.ia32_compat = TIF_X32;
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800544 current->personality &= ~READ_IMPLIES_EXEC;
Andy Lutomirskif9701652016-03-22 14:25:27 -0700545 /* in_compat_syscall() uses the presence of the x32
Bobby Powersce5f7a92012-02-25 23:25:38 -0500546 syscall bit flag to determine compat status */
Andy Lutomirskif03d00b2018-01-28 10:38:50 -0800547 current_thread_info()->status &= ~TS_COMPAT;
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800548 } else {
549 set_thread_flag(TIF_IA32);
550 clear_thread_flag(TIF_X32);
Oleg Nesterovb24dc8d2014-04-19 18:10:09 +0200551 if (current->mm)
552 current->mm->context.ia32_compat = TIF_IA32;
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800553 current->personality |= force_personality32;
554 /* Prepare the first "return" to user space */
Andy Lutomirskif03d00b2018-01-28 10:38:50 -0800555 current_thread_info()->status |= TS_COMPAT;
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800556 }
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800557}
Larry Fingerfebb72a2012-05-06 19:40:03 -0500558EXPORT_SYMBOL_GPL(set_personality_ia32);
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800559
Ingo Molnar91b7bd32016-09-15 08:42:51 +0200560#ifdef CONFIG_CHECKPOINT_RESTORE
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300561static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
562{
563 int ret;
564
565 ret = map_vdso_once(image, addr);
566 if (ret)
567 return ret;
568
569 return (long)image->size;
570}
Ingo Molnar91b7bd32016-09-15 08:42:51 +0200571#endif
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300574{
575 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 int doit = task == current;
577 int cpu;
578
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300579 switch (code) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 case ARCH_SET_GS:
Andy Lutomirskid696ca02016-05-10 09:18:46 -0700581 if (addr >= TASK_SIZE_MAX)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300582 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 cpu = get_cpu();
Andy Lutomirski731e33e2016-04-26 12:23:28 -0700584 task->thread.gsindex = 0;
Andy Lutomirski296f7812016-04-26 12:23:29 -0700585 task->thread.gsbase = addr;
Andy Lutomirski731e33e2016-04-26 12:23:28 -0700586 if (doit) {
587 load_gs_index(0);
588 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
Mateusz Guzik4afd0562016-05-10 22:56:43 +0200590 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 break;
592 case ARCH_SET_FS:
593 /* Not strictly needed for fs, but do it for symmetry
594 with gs */
Andy Lutomirskid696ca02016-05-10 09:18:46 -0700595 if (addr >= TASK_SIZE_MAX)
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100596 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 cpu = get_cpu();
Andy Lutomirski731e33e2016-04-26 12:23:28 -0700598 task->thread.fsindex = 0;
Andy Lutomirski296f7812016-04-26 12:23:29 -0700599 task->thread.fsbase = addr;
Andy Lutomirski731e33e2016-04-26 12:23:28 -0700600 if (doit) {
601 /* set the selector to 0 to not confuse __switch_to */
602 loadsegment(fs, 0);
603 ret = wrmsrl_safe(MSR_FS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 }
605 put_cpu();
606 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100607 case ARCH_GET_FS: {
608 unsigned long base;
Andy Lutomirskid47b50e2016-04-07 17:31:45 -0700609 if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 rdmsrl(MSR_FS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100611 else
Andy Lutomirski296f7812016-04-26 12:23:29 -0700612 base = task->thread.fsbase;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100613 ret = put_user(base, (unsigned long __user *)addr);
614 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100616 case ARCH_GET_GS: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 unsigned long base;
Andy Lutomirskid47b50e2016-04-07 17:31:45 -0700618 if (doit)
619 rdmsrl(MSR_KERNEL_GS_BASE, base);
Andy Lutomirskid47b50e2016-04-07 17:31:45 -0700620 else
Andy Lutomirski296f7812016-04-26 12:23:29 -0700621 base = task->thread.gsbase;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100622 ret = put_user(base, (unsigned long __user *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 break;
624 }
625
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300626#ifdef CONFIG_CHECKPOINT_RESTORE
Vinson Lee6e68b082016-09-17 00:51:53 +0000627# ifdef CONFIG_X86_X32_ABI
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300628 case ARCH_MAP_VDSO_X32:
629 return prctl_map_vdso(&vdso_image_x32, addr);
Ingo Molnar91b7bd32016-09-15 08:42:51 +0200630# endif
631# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300632 case ARCH_MAP_VDSO_32:
633 return prctl_map_vdso(&vdso_image_32, addr);
Ingo Molnar91b7bd32016-09-15 08:42:51 +0200634# endif
Dmitry Safonov2eefd872016-09-05 16:33:05 +0300635 case ARCH_MAP_VDSO_64:
636 return prctl_map_vdso(&vdso_image_64, addr);
637#endif
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 default:
640 ret = -EINVAL;
641 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100644 return ret;
645}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647long sys_arch_prctl(int code, unsigned long addr)
648{
649 return do_arch_prctl(current, code, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
Stefani Seibold89240ba2009-11-03 10:22:40 +0100652unsigned long KSTK_ESP(struct task_struct *task)
653{
Denys Vlasenko263042e2015-03-09 19:39:23 +0100654 return task_pt_regs(task)->sp;
Stefani Seibold89240ba2009-11-03 10:22:40 +0100655}