Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | * |
| 4 | * Pentium III FXSR, SSE support |
| 5 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * X86-64 port |
| 8 | * Andi Kleen. |
Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 9 | * |
| 10 | * CPU hotplug support - ashok.raj@intel.com |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | /* |
| 14 | * This file handles the architecture-dependent parts of process handling.. |
| 15 | */ |
| 16 | |
Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 17 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/errno.h> |
| 19 | #include <linux/sched.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 20 | #include <linux/fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/elfcore.h> |
| 24 | #include <linux/smp.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/interrupt.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 28 | #include <linux/delay.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 29 | #include <linux/export.h> |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 30 | #include <linux/ptrace.h> |
Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 31 | #include <linux/notifier.h> |
bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 32 | #include <linux/kprobes.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 33 | #include <linux/kdebug.h> |
Erik Bosman | 529e25f | 2008-04-14 00:24:18 +0200 | [diff] [blame] | 34 | #include <linux/prctl.h> |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 35 | #include <linux/uaccess.h> |
| 36 | #include <linux/io.h> |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 37 | #include <linux/ftrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <asm/processor.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 41 | #include <asm/fpu/internal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <asm/prctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <asm/desc.h> |
| 45 | #include <asm/proto.h> |
| 46 | #include <asm/ia32.h> |
Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 47 | #include <asm/idle.h> |
Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 48 | #include <asm/syscalls.h> |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 49 | #include <asm/debugreg.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 50 | #include <asm/switch_to.h> |
Andy Lutomirski | b7a58459 | 2016-03-16 14:14:21 -0700 | [diff] [blame] | 51 | #include <asm/xen/hypervisor.h> |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 52 | #include <asm/vdso.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 54 | #include "process.h" |
| 55 | |
Ingo Molnar | c38e503 | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 56 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 58 | /* Prints also some state that isn't saved in the pt_regs */ |
Pekka Enberg | e2ce07c | 2008-04-03 16:40:48 +0300 | [diff] [blame] | 59 | void __show_regs(struct pt_regs *regs, int all) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | { |
| 61 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 62 | unsigned long d0, d1, d2, d3, d6, d7; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 63 | unsigned int fsindex, gsindex; |
| 64 | unsigned int ds, cs, es; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 66 | printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
Jiri Slaby | 5f01c98 | 2013-10-25 15:06:58 +0200 | [diff] [blame] | 67 | printk_address(regs->ip); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 68 | printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 69 | regs->sp, regs->flags); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 70 | printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 71 | regs->ax, regs->bx, regs->cx); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 72 | printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 73 | regs->dx, regs->si, regs->di); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 74 | printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n", |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 75 | regs->bp, regs->r8, regs->r9); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 76 | printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 77 | regs->r10, regs->r11, regs->r12); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 78 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 79 | regs->r13, regs->r14, regs->r15); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 81 | asm("movl %%ds,%0" : "=r" (ds)); |
| 82 | asm("movl %%cs,%0" : "=r" (cs)); |
| 83 | asm("movl %%es,%0" : "=r" (es)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | asm("movl %%fs,%0" : "=r" (fsindex)); |
| 85 | asm("movl %%gs,%0" : "=r" (gsindex)); |
| 86 | |
| 87 | rdmsrl(MSR_FS_BASE, fs); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 88 | rdmsrl(MSR_GS_BASE, gs); |
| 89 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Pekka Enberg | e2ce07c | 2008-04-03 16:40:48 +0300 | [diff] [blame] | 91 | if (!all) |
| 92 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 94 | cr0 = read_cr0(); |
| 95 | cr2 = read_cr2(); |
| 96 | cr3 = read_cr3(); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 97 | cr4 = __read_cr4(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 99 | printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 100 | fs, fsindex, gs, gsindex, shadowgs); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 101 | printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, |
Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 102 | es, cr0); |
Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 103 | printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, |
Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 104 | cr4); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 105 | |
| 106 | get_debugreg(d0, 0); |
| 107 | get_debugreg(d1, 1); |
| 108 | get_debugreg(d2, 2); |
Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 109 | get_debugreg(d3, 3); |
| 110 | get_debugreg(d6, 6); |
| 111 | get_debugreg(d7, 7); |
Dave Jones | 4338774 | 2013-06-18 12:09:11 -0400 | [diff] [blame] | 112 | |
| 113 | /* Only print out debug registers if they are in their non-default state. */ |
Nicolas Iooss | ba6d018 | 2016-09-10 20:30:45 +0200 | [diff] [blame] | 114 | if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && |
| 115 | (d6 == DR6_RESERVED) && (d7 == 0x400))) { |
| 116 | printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", |
| 117 | d0, d1, d2); |
| 118 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", |
| 119 | d3, d6, d7); |
| 120 | } |
Dave Jones | 4338774 | 2013-06-18 12:09:11 -0400 | [diff] [blame] | 121 | |
Dave Hansen | c0b17b5 | 2016-02-12 13:02:25 -0800 | [diff] [blame] | 122 | if (boot_cpu_has(X86_FEATURE_OSPKE)) |
| 123 | printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | void release_thread(struct task_struct *dead_task) |
| 127 | { |
| 128 | if (dead_task->mm) { |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 129 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 130 | if (dead_task->mm->context.ldt) { |
Chen Gang | 349eab6 | 2012-11-06 14:45:46 +0800 | [diff] [blame] | 131 | pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 132 | dead_task->comm, |
Jan Beulich | 0d430e3 | 2015-12-22 08:42:44 -0700 | [diff] [blame] | 133 | dead_task->mm->context.ldt->entries, |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 134 | dead_task->mm->context.ldt->size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | BUG(); |
| 136 | } |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 137 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
| 139 | } |
| 140 | |
Andy Lutomirski | 3fddeb8 | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 141 | enum which_selector { |
| 142 | FS, |
| 143 | GS |
| 144 | }; |
| 145 | |
| 146 | /* |
| 147 | * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are |
| 148 | * not available. The goal is to be reasonably fast on non-FSGSBASE systems. |
| 149 | * It's forcibly inlined because it'll generate better code and this function |
| 150 | * is hot. |
| 151 | */ |
| 152 | static __always_inline void save_base_legacy(struct task_struct *prev_p, |
| 153 | unsigned short selector, |
| 154 | enum which_selector which) |
| 155 | { |
| 156 | if (likely(selector == 0)) { |
| 157 | /* |
| 158 | * On Intel (without X86_BUG_NULL_SEG), the segment base could |
| 159 | * be the pre-existing saved base or it could be zero. On AMD |
| 160 | * (with X86_BUG_NULL_SEG), the segment base could be almost |
| 161 | * anything. |
| 162 | * |
| 163 | * This branch is very hot (it's hit twice on almost every |
| 164 | * context switch between 64-bit programs), and avoiding |
| 165 | * the RDMSR helps a lot, so we just assume that whatever |
| 166 | * value is already saved is correct. This matches historical |
| 167 | * Linux behavior, so it won't break existing applications. |
| 168 | * |
| 169 | * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we |
| 170 | * report that the base is zero, it needs to actually be zero: |
| 171 | * see the corresponding logic in load_seg_legacy. |
| 172 | */ |
| 173 | } else { |
| 174 | /* |
| 175 | * If the selector is 1, 2, or 3, then the base is zero on |
| 176 | * !X86_BUG_NULL_SEG CPUs and could be anything on |
| 177 | * X86_BUG_NULL_SEG CPUs. In the latter case, Linux |
| 178 | * has never attempted to preserve the base across context |
| 179 | * switches. |
| 180 | * |
| 181 | * If selector > 3, then it refers to a real segment, and |
| 182 | * saving the base isn't necessary. |
| 183 | */ |
| 184 | if (which == FS) |
| 185 | prev_p->thread.fsbase = 0; |
| 186 | else |
| 187 | prev_p->thread.gsbase = 0; |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | static __always_inline void save_fsgs(struct task_struct *task) |
| 192 | { |
| 193 | savesegment(fs, task->thread.fsindex); |
| 194 | savesegment(gs, task->thread.gsindex); |
| 195 | save_base_legacy(task, task->thread.fsindex, FS); |
| 196 | save_base_legacy(task, task->thread.gsindex, GS); |
| 197 | } |
| 198 | |
| 199 | static __always_inline void loadseg(enum which_selector which, |
| 200 | unsigned short sel) |
| 201 | { |
| 202 | if (which == FS) |
| 203 | loadsegment(fs, sel); |
| 204 | else |
| 205 | load_gs_index(sel); |
| 206 | } |
| 207 | |
| 208 | static __always_inline void load_seg_legacy(unsigned short prev_index, |
| 209 | unsigned long prev_base, |
| 210 | unsigned short next_index, |
| 211 | unsigned long next_base, |
| 212 | enum which_selector which) |
| 213 | { |
| 214 | if (likely(next_index <= 3)) { |
| 215 | /* |
| 216 | * The next task is using 64-bit TLS, is not using this |
| 217 | * segment at all, or is having fun with arcane CPU features. |
| 218 | */ |
| 219 | if (next_base == 0) { |
| 220 | /* |
| 221 | * Nasty case: on AMD CPUs, we need to forcibly zero |
| 222 | * the base. |
| 223 | */ |
| 224 | if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { |
| 225 | loadseg(which, __USER_DS); |
| 226 | loadseg(which, next_index); |
| 227 | } else { |
| 228 | /* |
| 229 | * We could try to exhaustively detect cases |
| 230 | * under which we can skip the segment load, |
| 231 | * but there's really only one case that matters |
| 232 | * for performance: if both the previous and |
| 233 | * next states are fully zeroed, we can skip |
| 234 | * the load. |
| 235 | * |
| 236 | * (This assumes that prev_base == 0 has no |
| 237 | * false positives. This is the case on |
| 238 | * Intel-style CPUs.) |
| 239 | */ |
| 240 | if (likely(prev_index | next_index | prev_base)) |
| 241 | loadseg(which, next_index); |
| 242 | } |
| 243 | } else { |
| 244 | if (prev_index != next_index) |
| 245 | loadseg(which, next_index); |
| 246 | wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, |
| 247 | next_base); |
| 248 | } |
| 249 | } else { |
| 250 | /* |
| 251 | * The next task is using a real segment. Loading the selector |
| 252 | * is sufficient. |
| 253 | */ |
| 254 | loadseg(which, next_index); |
| 255 | } |
| 256 | } |
| 257 | |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 258 | int copy_thread_tls(unsigned long clone_flags, unsigned long sp, |
| 259 | unsigned long arg, struct task_struct *p, unsigned long tls) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { |
| 261 | int err; |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 262 | struct pt_regs *childregs; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 263 | struct fork_frame *fork_frame; |
| 264 | struct inactive_task_frame *frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | struct task_struct *me = current; |
| 266 | |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 267 | p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; |
| 268 | childregs = task_pt_regs(p); |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 269 | fork_frame = container_of(childregs, struct fork_frame, regs); |
| 270 | frame = &fork_frame->frame; |
Peter Zijlstra | 45fe6de | 2019-02-14 10:30:52 +0100 | [diff] [blame] | 271 | |
| 272 | /* |
| 273 | * For a new task use the RESET flags value since there is no before. |
| 274 | * All the status flags are zero; DF and all the system flags must also |
| 275 | * be 0, specifically IF must be 0 because we context switch to the new |
| 276 | * task with interrupts disabled. |
| 277 | */ |
| 278 | frame->flags = X86_EFLAGS_FIXED; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 279 | frame->bp = 0; |
| 280 | frame->ret_addr = (unsigned long) ret_from_fork; |
| 281 | p->thread.sp = (unsigned long) fork_frame; |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 282 | p->thread.io_bitmap_ptr = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 284 | savesegment(gs, p->thread.gsindex); |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 285 | p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase; |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 286 | savesegment(fs, p->thread.fsindex); |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 287 | p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase; |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 288 | savesegment(es, p->thread.es); |
| 289 | savesegment(ds, p->thread.ds); |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 290 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
| 291 | |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 292 | if (unlikely(p->flags & PF_KTHREAD)) { |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 293 | /* kernel thread */ |
| 294 | memset(childregs, 0, sizeof(struct pt_regs)); |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 295 | frame->bx = sp; /* function */ |
| 296 | frame->r12 = arg; |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 297 | return 0; |
| 298 | } |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 299 | frame->bx = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 300 | *childregs = *current_pt_regs(); |
Al Viro | 7076aad | 2012-09-10 16:44:54 -0400 | [diff] [blame] | 301 | |
| 302 | childregs->ax = 0; |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 303 | if (sp) |
| 304 | childregs->sp = sp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 306 | err = -ENOMEM; |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 307 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
Thomas Meyer | cced402 | 2011-11-17 23:43:40 +0100 | [diff] [blame] | 308 | p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, |
| 309 | IO_BITMAP_BYTES, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | if (!p->thread.io_bitmap_ptr) { |
| 311 | p->thread.io_bitmap_max = 0; |
| 312 | return -ENOMEM; |
| 313 | } |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 314 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 315 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | |
| 317 | /* |
| 318 | * Set a new TLS for the child thread? |
| 319 | */ |
| 320 | if (clone_flags & CLONE_SETTLS) { |
| 321 | #ifdef CONFIG_IA32_EMULATION |
Dmitry Safonov | abfb949 | 2016-04-18 16:43:43 +0300 | [diff] [blame] | 322 | if (in_ia32_syscall()) |
Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 323 | err = do_set_thread_area(p, -1, |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 324 | (struct user_desc __user *)tls, 0); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 325 | else |
| 326 | #endif |
Josh Triplett | c1bd55f | 2015-06-30 15:00:00 -0700 | [diff] [blame] | 327 | err = do_arch_prctl(p, ARCH_SET_FS, tls); |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 328 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | goto out; |
| 330 | } |
| 331 | err = 0; |
| 332 | out: |
| 333 | if (err && p->thread.io_bitmap_ptr) { |
| 334 | kfree(p->thread.io_bitmap_ptr); |
| 335 | p->thread.io_bitmap_max = 0; |
| 336 | } |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | return err; |
| 339 | } |
| 340 | |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 341 | static void |
| 342 | start_thread_common(struct pt_regs *regs, unsigned long new_ip, |
| 343 | unsigned long new_sp, |
| 344 | unsigned int _cs, unsigned int _ss, unsigned int _ds) |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 345 | { |
Andy Lutomirski | c7d1dde | 2017-08-01 07:11:34 -0700 | [diff] [blame] | 346 | WARN_ON_ONCE(regs != current_pt_regs()); |
| 347 | |
| 348 | if (static_cpu_has(X86_BUG_NULL_SEG)) { |
| 349 | /* Loading zero below won't clear the base. */ |
| 350 | loadsegment(fs, __USER_DS); |
| 351 | load_gs_index(__USER_DS); |
| 352 | } |
| 353 | |
Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 354 | loadsegment(fs, 0); |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 355 | loadsegment(es, _ds); |
| 356 | loadsegment(ds, _ds); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 357 | load_gs_index(0); |
Andy Lutomirski | c7d1dde | 2017-08-01 07:11:34 -0700 | [diff] [blame] | 358 | |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 359 | regs->ip = new_ip; |
| 360 | regs->sp = new_sp; |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 361 | regs->cs = _cs; |
| 362 | regs->ss = _ss; |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 363 | regs->flags = X86_EFLAGS_IF; |
Brian Gerst | 1daeaa3 | 2015-03-21 18:54:21 -0400 | [diff] [blame] | 364 | force_iret(); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 365 | } |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 366 | |
| 367 | void |
| 368 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) |
| 369 | { |
| 370 | start_thread_common(regs, new_ip, new_sp, |
| 371 | __USER_CS, __USER_DS, 0); |
| 372 | } |
Rian Hunter | 62cfd81 | 2018-08-19 16:08:53 -0700 | [diff] [blame] | 373 | EXPORT_SYMBOL_GPL(start_thread); |
Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 374 | |
Brian Gerst | 7da7707 | 2015-06-22 07:55:13 -0400 | [diff] [blame] | 375 | #ifdef CONFIG_COMPAT |
| 376 | void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 377 | { |
H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 378 | start_thread_common(regs, new_ip, new_sp, |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 379 | test_thread_flag(TIF_X32) |
| 380 | ? __USER_CS : __USER32_CS, |
| 381 | __USER_DS, __USER_DS); |
H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 382 | } |
| 383 | #endif |
Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 384 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | /* |
| 386 | * switch_to(x,y) should switch tasks from x to y. |
| 387 | * |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 388 | * This could still be optimized: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | * - fold all the options into a flag word and test it with a single test. |
| 390 | * - could test fs/gs bitsliced |
Andi Kleen | 099f318 | 2006-02-03 21:51:38 +0100 | [diff] [blame] | 391 | * |
| 392 | * Kprobes not supported here. Set the probe on schedule instead. |
Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 393 | * Function graph tracer not supported too. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | */ |
Andi Kleen | 35ea7903 | 2013-08-05 15:02:39 -0700 | [diff] [blame] | 395 | __visible __notrace_funcgraph struct task_struct * |
Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 396 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { |
Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 398 | struct thread_struct *prev = &prev_p->thread; |
| 399 | struct thread_struct *next = &next_p->thread; |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 400 | struct fpu *prev_fpu = &prev->fpu; |
| 401 | struct fpu *next_fpu = &next->fpu; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 402 | int cpu = smp_processor_id(); |
Andy Lutomirski | 24933b8 | 2015-03-05 19:19:05 -0800 | [diff] [blame] | 403 | struct tss_struct *tss = &per_cpu(cpu_tss, cpu); |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 404 | fpu_switch_t fpu_switch; |
Arjan van de Ven | e07e23e | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 405 | |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 406 | fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu); |
Linus Torvalds | 4903062 | 2012-02-16 19:11:15 -0800 | [diff] [blame] | 407 | |
Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 408 | /* We must save %fs and %gs before load_TLS() because |
| 409 | * %fs and %gs may be cleared by load_TLS(). |
| 410 | * |
| 411 | * (e.g. xen_load_tls()) |
| 412 | */ |
Andy Lutomirski | 3fddeb8 | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 413 | save_fsgs(prev_p); |
Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 414 | |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 415 | /* |
| 416 | * Load TLS before restoring any segments so that segment loads |
| 417 | * reference the correct GDT entries. |
| 418 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | load_TLS(next, cpu); |
| 420 | |
Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 421 | /* |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 422 | * Leave lazy mode, flushing any hypercalls made here. This |
| 423 | * must be done after loading TLS entries in the GDT but before |
| 424 | * loading segments that might reference them, and and it must |
Ingo Molnar | 3a0aee4 | 2015-04-22 13:16:47 +0200 | [diff] [blame] | 425 | * be done before fpu__restore(), so the TS bit is up to |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 426 | * date. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | */ |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 428 | arch_end_context_switch(next_p); |
Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 429 | |
Andy Lutomirski | f647d7c | 2014-12-08 13:55:20 -0800 | [diff] [blame] | 430 | /* Switch DS and ES. |
| 431 | * |
| 432 | * Reading them only returns the selectors, but writing them (if |
| 433 | * nonzero) loads the full descriptor from the GDT or LDT. The |
| 434 | * LDT for next is loaded in switch_mm, and the GDT is loaded |
| 435 | * above. |
| 436 | * |
| 437 | * We therefore need to write new values to the segment |
| 438 | * registers on every context switch unless both the new and old |
| 439 | * values are zero. |
| 440 | * |
| 441 | * Note that we don't need to do anything for CS and SS, as |
| 442 | * those are saved and restored as part of pt_regs. |
| 443 | */ |
| 444 | savesegment(es, prev->es); |
| 445 | if (unlikely(next->es | prev->es)) |
| 446 | loadsegment(es, next->es); |
| 447 | |
| 448 | savesegment(ds, prev->ds); |
| 449 | if (unlikely(next->ds | prev->ds)) |
| 450 | loadsegment(ds, next->ds); |
| 451 | |
Andy Lutomirski | 3fddeb8 | 2017-08-01 07:11:37 -0700 | [diff] [blame] | 452 | load_seg_legacy(prev->fsindex, prev->fsbase, |
| 453 | next->fsindex, next->fsbase, FS); |
| 454 | load_seg_legacy(prev->gsindex, prev->gsbase, |
| 455 | next->gsindex, next->gsbase, GS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | |
Ingo Molnar | 384a23f | 2015-04-23 17:43:27 +0200 | [diff] [blame] | 457 | switch_fpu_finish(next_fpu, fpu_switch); |
Linus Torvalds | 34ddc81 | 2012-02-18 12:56:35 -0800 | [diff] [blame] | 458 | |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 459 | /* |
Jan Beulich | 45948d7 | 2006-03-25 16:29:25 +0100 | [diff] [blame] | 460 | * Switch the PDA and FPU contexts. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | */ |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 462 | this_cpu_write(current_task, next_p); |
Andi Kleen | 18bd057 | 2006-04-20 02:36:45 +0200 | [diff] [blame] | 463 | |
Andy Lutomirski | b27559a | 2015-03-06 17:50:18 -0800 | [diff] [blame] | 464 | /* Reload esp0 and ss1. This changes current_thread_info(). */ |
| 465 | load_sp0(tss, next); |
| 466 | |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 467 | switch_to_extra(prev_p, next_p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
Andy Lutomirski | b7a58459 | 2016-03-16 14:14:21 -0700 | [diff] [blame] | 469 | #ifdef CONFIG_XEN |
| 470 | /* |
| 471 | * On Xen PV, IOPL bits in pt_regs->flags have no effect, and |
| 472 | * current_pt_regs()->flags may not match the current task's |
| 473 | * intended IOPL. We need to switch it manually. |
| 474 | */ |
| 475 | if (unlikely(static_cpu_has(X86_FEATURE_XENPV) && |
| 476 | prev->iopl != next->iopl)) |
| 477 | xen_set_iopl_mask(next->iopl); |
| 478 | #endif |
| 479 | |
Andy Lutomirski | 61f01dd | 2015-04-26 16:47:59 -0700 | [diff] [blame] | 480 | if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { |
| 481 | /* |
| 482 | * AMD CPUs have a misfeature: SYSRET sets the SS selector but |
| 483 | * does not update the cached descriptor. As a result, if we |
| 484 | * do SYSRET while SS is NULL, we'll end up in user mode with |
| 485 | * SS apparently equal to __USER_DS but actually unusable. |
| 486 | * |
| 487 | * The straightforward workaround would be to fix it up just |
| 488 | * before SYSRET, but that would slow down the system call |
| 489 | * fast paths. Instead, we ensure that SS is never NULL in |
| 490 | * system call context. We do this by replacing NULL SS |
| 491 | * selectors at every context switch. SYSCALL sets up a valid |
| 492 | * SS, so the only way to get NULL is to re-enter the kernel |
| 493 | * from CPL 3 through an interrupt. Since that can't happen |
| 494 | * in the same task as a running syscall, we are guaranteed to |
| 495 | * context switch between every interrupt vector entry and a |
| 496 | * subsequent SYSRET. |
| 497 | * |
| 498 | * We read SS first because SS reads are much faster than |
| 499 | * writes. Out of caution, we force SS to __KERNEL_DS even if |
| 500 | * it previously had a different non-NULL value. |
| 501 | */ |
| 502 | unsigned short ss_sel; |
| 503 | savesegment(ss, ss_sel); |
| 504 | if (ss_sel != __KERNEL_DS) |
| 505 | loadsegment(ss, __KERNEL_DS); |
| 506 | } |
| 507 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | return prev_p; |
| 509 | } |
| 510 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | void set_personality_64bit(void) |
| 512 | { |
| 513 | /* inherit personality from parent */ |
| 514 | |
| 515 | /* Make sure to be in 64bit mode */ |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 516 | clear_thread_flag(TIF_IA32); |
H. Peter Anvin | 6bd3300 | 2012-02-06 13:03:09 -0800 | [diff] [blame] | 517 | clear_thread_flag(TIF_ADDR32); |
H. Peter Anvin | bb21272 | 2012-02-14 13:56:49 -0800 | [diff] [blame] | 518 | clear_thread_flag(TIF_X32); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | |
Stephen Wilson | 375906f | 2011-03-13 15:49:14 -0400 | [diff] [blame] | 520 | /* Ensure the corresponding mm is not marked. */ |
| 521 | if (current->mm) |
| 522 | current->mm->context.ia32_compat = 0; |
| 523 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | /* TBD: overwrites user setup. Should have two bits. |
| 525 | But 64bit processes have always behaved this way, |
| 526 | so it's not too bad. The main problem is just that |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 527 | 32bit childs are affected again. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | current->personality &= ~READ_IMPLIES_EXEC; |
| 529 | } |
| 530 | |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 531 | void set_personality_ia32(bool x32) |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 532 | { |
| 533 | /* inherit personality from parent */ |
| 534 | |
| 535 | /* Make sure to be in 32bit mode */ |
H. Peter Anvin | 6bd3300 | 2012-02-06 13:03:09 -0800 | [diff] [blame] | 536 | set_thread_flag(TIF_ADDR32); |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 537 | |
Stephen Wilson | 375906f | 2011-03-13 15:49:14 -0400 | [diff] [blame] | 538 | /* Mark the associated mm as containing 32-bit tasks. */ |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 539 | if (x32) { |
| 540 | clear_thread_flag(TIF_IA32); |
| 541 | set_thread_flag(TIF_X32); |
Oleg Nesterov | b24dc8d | 2014-04-19 18:10:09 +0200 | [diff] [blame] | 542 | if (current->mm) |
| 543 | current->mm->context.ia32_compat = TIF_X32; |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 544 | current->personality &= ~READ_IMPLIES_EXEC; |
Andy Lutomirski | f970165 | 2016-03-22 14:25:27 -0700 | [diff] [blame] | 545 | /* in_compat_syscall() uses the presence of the x32 |
Bobby Powers | ce5f7a9 | 2012-02-25 23:25:38 -0500 | [diff] [blame] | 546 | syscall bit flag to determine compat status */ |
Andy Lutomirski | f03d00b | 2018-01-28 10:38:50 -0800 | [diff] [blame] | 547 | current_thread_info()->status &= ~TS_COMPAT; |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 548 | } else { |
| 549 | set_thread_flag(TIF_IA32); |
| 550 | clear_thread_flag(TIF_X32); |
Oleg Nesterov | b24dc8d | 2014-04-19 18:10:09 +0200 | [diff] [blame] | 551 | if (current->mm) |
| 552 | current->mm->context.ia32_compat = TIF_IA32; |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 553 | current->personality |= force_personality32; |
| 554 | /* Prepare the first "return" to user space */ |
Andy Lutomirski | f03d00b | 2018-01-28 10:38:50 -0800 | [diff] [blame] | 555 | current_thread_info()->status |= TS_COMPAT; |
H. Peter Anvin | d1a797f | 2012-02-19 10:06:34 -0800 | [diff] [blame] | 556 | } |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 557 | } |
Larry Finger | febb72a | 2012-05-06 19:40:03 -0500 | [diff] [blame] | 558 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 559 | |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 560 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 561 | static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) |
| 562 | { |
| 563 | int ret; |
| 564 | |
| 565 | ret = map_vdso_once(image, addr); |
| 566 | if (ret) |
| 567 | return ret; |
| 568 | |
| 569 | return (long)image->size; |
| 570 | } |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 571 | #endif |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 574 | { |
| 575 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | int doit = task == current; |
| 577 | int cpu; |
| 578 | |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 579 | switch (code) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | case ARCH_SET_GS: |
Andy Lutomirski | d696ca0 | 2016-05-10 09:18:46 -0700 | [diff] [blame] | 581 | if (addr >= TASK_SIZE_MAX) |
Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 582 | return -EPERM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | cpu = get_cpu(); |
Andy Lutomirski | 731e33e | 2016-04-26 12:23:28 -0700 | [diff] [blame] | 584 | task->thread.gsindex = 0; |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 585 | task->thread.gsbase = addr; |
Andy Lutomirski | 731e33e | 2016-04-26 12:23:28 -0700 | [diff] [blame] | 586 | if (doit) { |
| 587 | load_gs_index(0); |
| 588 | ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } |
Mateusz Guzik | 4afd056 | 2016-05-10 22:56:43 +0200 | [diff] [blame] | 590 | put_cpu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | break; |
| 592 | case ARCH_SET_FS: |
| 593 | /* Not strictly needed for fs, but do it for symmetry |
| 594 | with gs */ |
Andy Lutomirski | d696ca0 | 2016-05-10 09:18:46 -0700 | [diff] [blame] | 595 | if (addr >= TASK_SIZE_MAX) |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 596 | return -EPERM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | cpu = get_cpu(); |
Andy Lutomirski | 731e33e | 2016-04-26 12:23:28 -0700 | [diff] [blame] | 598 | task->thread.fsindex = 0; |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 599 | task->thread.fsbase = addr; |
Andy Lutomirski | 731e33e | 2016-04-26 12:23:28 -0700 | [diff] [blame] | 600 | if (doit) { |
| 601 | /* set the selector to 0 to not confuse __switch_to */ |
| 602 | loadsegment(fs, 0); |
| 603 | ret = wrmsrl_safe(MSR_FS_BASE, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | } |
| 605 | put_cpu(); |
| 606 | break; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 607 | case ARCH_GET_FS: { |
| 608 | unsigned long base; |
Andy Lutomirski | d47b50e | 2016-04-07 17:31:45 -0700 | [diff] [blame] | 609 | if (doit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | rdmsrl(MSR_FS_BASE, base); |
Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 611 | else |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 612 | base = task->thread.fsbase; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 613 | ret = put_user(base, (unsigned long __user *)addr); |
| 614 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | } |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 616 | case ARCH_GET_GS: { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | unsigned long base; |
Andy Lutomirski | d47b50e | 2016-04-07 17:31:45 -0700 | [diff] [blame] | 618 | if (doit) |
| 619 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
Andy Lutomirski | d47b50e | 2016-04-07 17:31:45 -0700 | [diff] [blame] | 620 | else |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 621 | base = task->thread.gsbase; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 622 | ret = put_user(base, (unsigned long __user *)addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | break; |
| 624 | } |
| 625 | |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 626 | #ifdef CONFIG_CHECKPOINT_RESTORE |
Vinson Lee | 6e68b08 | 2016-09-17 00:51:53 +0000 | [diff] [blame] | 627 | # ifdef CONFIG_X86_X32_ABI |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 628 | case ARCH_MAP_VDSO_X32: |
| 629 | return prctl_map_vdso(&vdso_image_x32, addr); |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 630 | # endif |
| 631 | # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 632 | case ARCH_MAP_VDSO_32: |
| 633 | return prctl_map_vdso(&vdso_image_32, addr); |
Ingo Molnar | 91b7bd3 | 2016-09-15 08:42:51 +0200 | [diff] [blame] | 634 | # endif |
Dmitry Safonov | 2eefd87 | 2016-09-05 16:33:05 +0300 | [diff] [blame] | 635 | case ARCH_MAP_VDSO_64: |
| 636 | return prctl_map_vdso(&vdso_image_64, addr); |
| 637 | #endif |
| 638 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | default: |
| 640 | ret = -EINVAL; |
| 641 | break; |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 642 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | |
Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 644 | return ret; |
| 645 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | |
| 647 | long sys_arch_prctl(int code, unsigned long addr) |
| 648 | { |
| 649 | return do_arch_prctl(current, code, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | |
Stefani Seibold | 89240ba | 2009-11-03 10:22:40 +0100 | [diff] [blame] | 652 | unsigned long KSTK_ESP(struct task_struct *task) |
| 653 | { |
Denys Vlasenko | 263042e | 2015-03-09 19:39:23 +0100 | [diff] [blame] | 654 | return task_pt_regs(task)->sp; |
Stefani Seibold | 89240ba | 2009-11-03 10:22:40 +0100 | [diff] [blame] | 655 | } |