Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1 | #define pr_fmt(fmt) "SMP alternatives: " fmt |
| 2 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 3 | #include <linux/module.h> |
Al Viro | f6a5703 | 2006-10-18 01:47:25 -0400 | [diff] [blame] | 4 | #include <linux/sched.h> |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 5 | #include <linux/mutex.h> |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 6 | #include <linux/list.h> |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 7 | #include <linux/stringify.h> |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 8 | #include <linux/mm.h> |
| 9 | #include <linux/vmalloc.h> |
Masami Hiramatsu | 3945dab | 2009-03-06 10:37:22 -0500 | [diff] [blame] | 10 | #include <linux/memory.h> |
Masami Hiramatsu | 3d55cc8 | 2010-02-25 08:34:38 -0500 | [diff] [blame] | 11 | #include <linux/stop_machine.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 13 | #include <linux/kdebug.h> |
Andy Lutomirski | 35de5b0 | 2016-04-26 12:23:24 -0700 | [diff] [blame] | 14 | #include <asm/text-patching.h> |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 15 | #include <asm/alternative.h> |
| 16 | #include <asm/sections.h> |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 17 | #include <asm/pgtable.h> |
Andi Kleen | 8f4e956 | 2007-07-22 11:12:32 +0200 | [diff] [blame] | 18 | #include <asm/mce.h> |
| 19 | #include <asm/nmi.h> |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 21 | #include <asm/tlbflush.h> |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 22 | #include <asm/io.h> |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 23 | #include <asm/fixmap.h> |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 24 | |
Ingo Molnar | 5e907bb | 2015-04-30 09:09:26 +0200 | [diff] [blame] | 25 | int __read_mostly alternatives_patched; |
| 26 | |
| 27 | EXPORT_SYMBOL_GPL(alternatives_patched); |
| 28 | |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 29 | #define MAX_PATCH_LEN (255-1) |
| 30 | |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 31 | static int __initdata_or_module debug_alternative; |
Jeremy Fitzhardinge | b7fb4af | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 32 | |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 33 | static int __init debug_alt(char *str) |
| 34 | { |
| 35 | debug_alternative = 1; |
| 36 | return 1; |
| 37 | } |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 38 | __setup("debug-alternative", debug_alt); |
| 39 | |
Jan Beulich | 0948816 | 2007-07-21 17:10:25 +0200 | [diff] [blame] | 40 | static int noreplace_smp; |
| 41 | |
Jeremy Fitzhardinge | b7fb4af | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 42 | static int __init setup_noreplace_smp(char *str) |
| 43 | { |
| 44 | noreplace_smp = 1; |
| 45 | return 1; |
| 46 | } |
| 47 | __setup("noreplace-smp", setup_noreplace_smp); |
| 48 | |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 49 | #define DPRINTK(fmt, args...) \ |
| 50 | do { \ |
| 51 | if (debug_alternative) \ |
| 52 | printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 53 | } while (0) |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 54 | |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 55 | #define DUMP_BYTES(buf, len, fmt, args...) \ |
| 56 | do { \ |
| 57 | if (unlikely(debug_alternative)) { \ |
| 58 | int j; \ |
| 59 | \ |
| 60 | if (!(len)) \ |
| 61 | break; \ |
| 62 | \ |
| 63 | printk(KERN_DEBUG fmt, ##args); \ |
| 64 | for (j = 0; j < (len) - 1; j++) \ |
| 65 | printk(KERN_CONT "%02hhx ", buf[j]); \ |
| 66 | printk(KERN_CONT "%02hhx\n", buf[j]); \ |
| 67 | } \ |
| 68 | } while (0) |
| 69 | |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 70 | /* |
| 71 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes |
| 72 | * that correspond to that nop. Getting from one nop to the next, we |
| 73 | * add to the array the offset that is equal to the sum of all sizes of |
| 74 | * nops preceding the one we are after. |
| 75 | * |
| 76 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the |
| 77 | * nice symmetry of sizes of the previous nops. |
| 78 | */ |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 79 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 80 | static const unsigned char intelnops[] = |
| 81 | { |
| 82 | GENERIC_NOP1, |
| 83 | GENERIC_NOP2, |
| 84 | GENERIC_NOP3, |
| 85 | GENERIC_NOP4, |
| 86 | GENERIC_NOP5, |
| 87 | GENERIC_NOP6, |
| 88 | GENERIC_NOP7, |
| 89 | GENERIC_NOP8, |
| 90 | GENERIC_NOP5_ATOMIC |
| 91 | }; |
| 92 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = |
| 93 | { |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 94 | NULL, |
| 95 | intelnops, |
| 96 | intelnops + 1, |
| 97 | intelnops + 1 + 2, |
| 98 | intelnops + 1 + 2 + 3, |
| 99 | intelnops + 1 + 2 + 3 + 4, |
| 100 | intelnops + 1 + 2 + 3 + 4 + 5, |
| 101 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, |
| 102 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 103 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 104 | }; |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 105 | #endif |
| 106 | |
| 107 | #ifdef K8_NOP1 |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 108 | static const unsigned char k8nops[] = |
| 109 | { |
| 110 | K8_NOP1, |
| 111 | K8_NOP2, |
| 112 | K8_NOP3, |
| 113 | K8_NOP4, |
| 114 | K8_NOP5, |
| 115 | K8_NOP6, |
| 116 | K8_NOP7, |
| 117 | K8_NOP8, |
| 118 | K8_NOP5_ATOMIC |
| 119 | }; |
| 120 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = |
| 121 | { |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 122 | NULL, |
| 123 | k8nops, |
| 124 | k8nops + 1, |
| 125 | k8nops + 1 + 2, |
| 126 | k8nops + 1 + 2 + 3, |
| 127 | k8nops + 1 + 2 + 3 + 4, |
| 128 | k8nops + 1 + 2 + 3 + 4 + 5, |
| 129 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 130 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 131 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 132 | }; |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 133 | #endif |
| 134 | |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 135 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 136 | static const unsigned char k7nops[] = |
| 137 | { |
| 138 | K7_NOP1, |
| 139 | K7_NOP2, |
| 140 | K7_NOP3, |
| 141 | K7_NOP4, |
| 142 | K7_NOP5, |
| 143 | K7_NOP6, |
| 144 | K7_NOP7, |
| 145 | K7_NOP8, |
| 146 | K7_NOP5_ATOMIC |
| 147 | }; |
| 148 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = |
| 149 | { |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 150 | NULL, |
| 151 | k7nops, |
| 152 | k7nops + 1, |
| 153 | k7nops + 1 + 2, |
| 154 | k7nops + 1 + 2 + 3, |
| 155 | k7nops + 1 + 2 + 3 + 4, |
| 156 | k7nops + 1 + 2 + 3 + 4 + 5, |
| 157 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 158 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 159 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 160 | }; |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 161 | #endif |
| 162 | |
Jan Beulich | 32c464f | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 163 | #ifdef P6_NOP1 |
Avi Kivity | cb09cad | 2012-08-22 13:03:48 +0300 | [diff] [blame] | 164 | static const unsigned char p6nops[] = |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 165 | { |
| 166 | P6_NOP1, |
| 167 | P6_NOP2, |
| 168 | P6_NOP3, |
| 169 | P6_NOP4, |
| 170 | P6_NOP5, |
| 171 | P6_NOP6, |
| 172 | P6_NOP7, |
| 173 | P6_NOP8, |
| 174 | P6_NOP5_ATOMIC |
| 175 | }; |
| 176 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = |
| 177 | { |
Jan Beulich | 32c464f | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 178 | NULL, |
| 179 | p6nops, |
| 180 | p6nops + 1, |
| 181 | p6nops + 1 + 2, |
| 182 | p6nops + 1 + 2 + 3, |
| 183 | p6nops + 1 + 2 + 3 + 4, |
| 184 | p6nops + 1 + 2 + 3 + 4 + 5, |
| 185 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 186 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 187 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
Jan Beulich | 32c464f | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 188 | }; |
| 189 | #endif |
| 190 | |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 191 | /* Initialize these to a safe default */ |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 192 | #ifdef CONFIG_X86_64 |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 193 | const unsigned char * const *ideal_nops = p6_nops; |
| 194 | #else |
| 195 | const unsigned char * const *ideal_nops = intel_nops; |
| 196 | #endif |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 197 | |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 198 | void __init arch_init_ideal_nops(void) |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 199 | { |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 200 | switch (boot_cpu_data.x86_vendor) { |
| 201 | case X86_VENDOR_INTEL: |
H. Peter Anvin | d8d9766 | 2011-04-18 15:31:57 -0700 | [diff] [blame] | 202 | /* |
| 203 | * Due to a decoder implementation quirk, some |
| 204 | * specific Intel CPUs actually perform better with |
| 205 | * the "k8_nops" than with the SDM-recommended NOPs. |
| 206 | */ |
| 207 | if (boot_cpu_data.x86 == 6 && |
| 208 | boot_cpu_data.x86_model >= 0x0f && |
| 209 | boot_cpu_data.x86_model != 0x1c && |
| 210 | boot_cpu_data.x86_model != 0x26 && |
| 211 | boot_cpu_data.x86_model != 0x27 && |
| 212 | boot_cpu_data.x86_model < 0x30) { |
| 213 | ideal_nops = k8_nops; |
| 214 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 215 | ideal_nops = p6_nops; |
| 216 | } else { |
| 217 | #ifdef CONFIG_X86_64 |
| 218 | ideal_nops = k8_nops; |
| 219 | #else |
| 220 | ideal_nops = intel_nops; |
| 221 | #endif |
| 222 | } |
Alan Cox | d6250a3 | 2012-07-25 16:28:19 +0100 | [diff] [blame] | 223 | break; |
Borislav Petkov | f21262b | 2015-05-11 10:15:46 +0200 | [diff] [blame] | 224 | |
| 225 | case X86_VENDOR_AMD: |
| 226 | if (boot_cpu_data.x86 > 0xf) { |
| 227 | ideal_nops = p6_nops; |
| 228 | return; |
| 229 | } |
| 230 | |
| 231 | /* fall through */ |
| 232 | |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 233 | default: |
| 234 | #ifdef CONFIG_X86_64 |
| 235 | ideal_nops = k8_nops; |
| 236 | #else |
| 237 | if (boot_cpu_has(X86_FEATURE_K8)) |
| 238 | ideal_nops = k8_nops; |
| 239 | else if (boot_cpu_has(X86_FEATURE_K7)) |
| 240 | ideal_nops = k7_nops; |
| 241 | else |
| 242 | ideal_nops = intel_nops; |
| 243 | #endif |
| 244 | } |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 245 | } |
| 246 | |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 247 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 248 | static void __init_or_module add_nops(void *insns, unsigned int len) |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 249 | { |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 250 | while (len > 0) { |
| 251 | unsigned int noplen = len; |
| 252 | if (noplen > ASM_NOP_MAX) |
| 253 | noplen = ASM_NOP_MAX; |
H. Peter Anvin | dc326fc | 2011-04-18 15:19:51 -0700 | [diff] [blame] | 254 | memcpy(insns, ideal_nops[noplen], noplen); |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 255 | insns += noplen; |
| 256 | len -= noplen; |
| 257 | } |
| 258 | } |
| 259 | |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 260 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 261 | extern s32 __smp_locks[], __smp_locks_end[]; |
Jason Baron | fa6f2cc | 2010-09-17 11:08:56 -0400 | [diff] [blame] | 262 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 263 | |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 264 | /* |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 265 | * Are we looking at a near JMP with a 1 or 4-byte displacement. |
| 266 | */ |
| 267 | static inline bool is_jmp(const u8 opcode) |
| 268 | { |
| 269 | return opcode == 0xeb || opcode == 0xe9; |
| 270 | } |
| 271 | |
| 272 | static void __init_or_module |
| 273 | recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) |
| 274 | { |
| 275 | u8 *next_rip, *tgt_rip; |
| 276 | s32 n_dspl, o_dspl; |
| 277 | int repl_len; |
| 278 | |
| 279 | if (a->replacementlen != 5) |
| 280 | return; |
| 281 | |
| 282 | o_dspl = *(s32 *)(insnbuf + 1); |
| 283 | |
| 284 | /* next_rip of the replacement JMP */ |
| 285 | next_rip = repl_insn + a->replacementlen; |
| 286 | /* target rip of the replacement JMP */ |
| 287 | tgt_rip = next_rip + o_dspl; |
| 288 | n_dspl = tgt_rip - orig_insn; |
| 289 | |
| 290 | DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); |
| 291 | |
| 292 | if (tgt_rip - orig_insn >= 0) { |
| 293 | if (n_dspl - 2 <= 127) |
| 294 | goto two_byte_jmp; |
| 295 | else |
| 296 | goto five_byte_jmp; |
| 297 | /* negative offset */ |
| 298 | } else { |
| 299 | if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) |
| 300 | goto two_byte_jmp; |
| 301 | else |
| 302 | goto five_byte_jmp; |
| 303 | } |
| 304 | |
| 305 | two_byte_jmp: |
| 306 | n_dspl -= 2; |
| 307 | |
| 308 | insnbuf[0] = 0xeb; |
| 309 | insnbuf[1] = (s8)n_dspl; |
| 310 | add_nops(insnbuf + 2, 3); |
| 311 | |
| 312 | repl_len = 2; |
| 313 | goto done; |
| 314 | |
| 315 | five_byte_jmp: |
| 316 | n_dspl -= 5; |
| 317 | |
| 318 | insnbuf[0] = 0xe9; |
| 319 | *(s32 *)&insnbuf[1] = n_dspl; |
| 320 | |
| 321 | repl_len = 5; |
| 322 | |
| 323 | done: |
| 324 | |
| 325 | DPRINTK("final displ: 0x%08x, JMP 0x%lx", |
| 326 | n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); |
| 327 | } |
| 328 | |
Borislav Petkov | 4fd4b6e | 2015-01-10 20:34:07 +0100 | [diff] [blame] | 329 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) |
| 330 | { |
Thomas Gleixner | 66c117d | 2015-09-03 12:34:55 +0200 | [diff] [blame] | 331 | unsigned long flags; |
Borislav Petkov | 194dc04 | 2018-01-10 12:28:16 +0100 | [diff] [blame] | 332 | int i; |
Thomas Gleixner | 66c117d | 2015-09-03 12:34:55 +0200 | [diff] [blame] | 333 | |
Borislav Petkov | 194dc04 | 2018-01-10 12:28:16 +0100 | [diff] [blame] | 334 | for (i = 0; i < a->padlen; i++) { |
| 335 | if (instr[i] != 0x90) |
| 336 | return; |
| 337 | } |
Borislav Petkov | 69df353 | 2015-04-04 23:07:42 +0200 | [diff] [blame] | 338 | |
Thomas Gleixner | 66c117d | 2015-09-03 12:34:55 +0200 | [diff] [blame] | 339 | local_irq_save(flags); |
Borislav Petkov | 4fd4b6e | 2015-01-10 20:34:07 +0100 | [diff] [blame] | 340 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
Thomas Gleixner | 66c117d | 2015-09-03 12:34:55 +0200 | [diff] [blame] | 341 | sync_core(); |
| 342 | local_irq_restore(flags); |
Borislav Petkov | 4fd4b6e | 2015-01-10 20:34:07 +0100 | [diff] [blame] | 343 | |
| 344 | DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", |
| 345 | instr, a->instrlen - a->padlen, a->padlen); |
| 346 | } |
| 347 | |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 348 | /* |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 349 | * Replace instructions with better alternatives for this CPU type. This runs |
| 350 | * before SMP is initialized to avoid SMP problems with self modifying code. |
| 351 | * This implies that asymmetric systems where APs have less capabilities than |
| 352 | * the boot processor are not handled. Tough. Make sure you disable such |
| 353 | * features by hand. |
| 354 | */ |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 355 | void __init_or_module apply_alternatives(struct alt_instr *start, |
| 356 | struct alt_instr *end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 357 | { |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 358 | struct alt_instr *a; |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 359 | u8 *instr, *replacement; |
Jan Beulich | 1b1d925 | 2009-12-18 16:12:56 +0000 | [diff] [blame] | 360 | u8 insnbuf[MAX_PATCH_LEN]; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 361 | |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 362 | DPRINTK("alt table %p -> %p", start, end); |
Fenghua Yu | 5097313 | 2011-05-17 15:29:12 -0700 | [diff] [blame] | 363 | /* |
| 364 | * The scan order should be from start to end. A later scanned |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 365 | * alternative code can overwrite previously scanned alternative code. |
Fenghua Yu | 5097313 | 2011-05-17 15:29:12 -0700 | [diff] [blame] | 366 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to |
| 367 | * patch code. |
| 368 | * |
| 369 | * So be careful if you want to change the scan order to any other |
| 370 | * order. |
| 371 | */ |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 372 | for (a = start; a < end; a++) { |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 373 | int insnbuf_sz = 0; |
| 374 | |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 375 | instr = (u8 *)&a->instr_offset + a->instr_offset; |
| 376 | replacement = (u8 *)&a->repl_offset + a->repl_offset; |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 377 | BUG_ON(a->instrlen > sizeof(insnbuf)); |
Borislav Petkov | 65fc985 | 2013-03-20 15:07:23 +0100 | [diff] [blame] | 378 | BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); |
Borislav Petkov | 4fd4b6e | 2015-01-10 20:34:07 +0100 | [diff] [blame] | 379 | if (!boot_cpu_has(a->cpuid)) { |
| 380 | if (a->padlen > 1) |
| 381 | optimize_nops(a, instr); |
| 382 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 383 | continue; |
Borislav Petkov | 4fd4b6e | 2015-01-10 20:34:07 +0100 | [diff] [blame] | 384 | } |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 385 | |
Borislav Petkov | dbe4058 | 2015-04-04 15:34:43 +0200 | [diff] [blame] | 386 | DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 387 | a->cpuid >> 5, |
| 388 | a->cpuid & 0x1f, |
| 389 | instr, a->instrlen, |
Borislav Petkov | dbe4058 | 2015-04-04 15:34:43 +0200 | [diff] [blame] | 390 | replacement, a->replacementlen, a->padlen); |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 391 | |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 392 | DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); |
| 393 | DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); |
| 394 | |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 395 | memcpy(insnbuf, replacement, a->replacementlen); |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 396 | insnbuf_sz = a->replacementlen; |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 397 | |
| 398 | /* 0xe8 is a relative jump; fix the offset. */ |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 399 | if (*insnbuf == 0xe8 && a->replacementlen == 5) { |
| 400 | *(s32 *)(insnbuf + 1) += replacement - instr; |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 401 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
| 402 | *(s32 *)(insnbuf + 1), |
| 403 | (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 404 | } |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 405 | |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 406 | if (a->replacementlen && is_jmp(replacement[0])) |
| 407 | recompute_jump(a, instr, replacement, insnbuf); |
| 408 | |
| 409 | if (a->instrlen > a->replacementlen) { |
Borislav Petkov | 4332195 | 2014-12-27 10:41:52 +0100 | [diff] [blame] | 410 | add_nops(insnbuf + a->replacementlen, |
| 411 | a->instrlen - a->replacementlen); |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 412 | insnbuf_sz += a->instrlen - a->replacementlen; |
| 413 | } |
| 414 | DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); |
Andy Lutomirski | 59e97e4 | 2011-07-13 09:24:10 -0400 | [diff] [blame] | 415 | |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 416 | text_poke_early(instr, insnbuf, insnbuf_sz); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 417 | } |
| 418 | } |
| 419 | |
Gerd Hoffmann | 8ec4d41 | 2006-07-01 04:36:18 -0700 | [diff] [blame] | 420 | #ifdef CONFIG_SMP |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 421 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
| 422 | u8 *text, u8 *text_end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 423 | { |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 424 | const s32 *poff; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 425 | |
Masami Hiramatsu | 3945dab | 2009-03-06 10:37:22 -0500 | [diff] [blame] | 426 | mutex_lock(&text_mutex); |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 427 | for (poff = start; poff < end; poff++) { |
| 428 | u8 *ptr = (u8 *)poff + *poff; |
| 429 | |
| 430 | if (!*poff || ptr < text || ptr >= text_end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 431 | continue; |
Mathieu Desnoyers | f88f07e | 2008-08-14 16:58:15 -0400 | [diff] [blame] | 432 | /* turn DS segment override prefix into lock prefix */ |
H. Peter Anvin | d9c5841 | 2010-04-29 16:53:17 -0700 | [diff] [blame] | 433 | if (*ptr == 0x3e) |
| 434 | text_poke(ptr, ((unsigned char []){0xf0}), 1); |
Peter Senna Tschudin | 4b8073e | 2012-09-18 18:36:14 +0200 | [diff] [blame] | 435 | } |
Masami Hiramatsu | 3945dab | 2009-03-06 10:37:22 -0500 | [diff] [blame] | 436 | mutex_unlock(&text_mutex); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 437 | } |
| 438 | |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 439 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
| 440 | u8 *text, u8 *text_end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 441 | { |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 442 | const s32 *poff; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 443 | |
Masami Hiramatsu | 3945dab | 2009-03-06 10:37:22 -0500 | [diff] [blame] | 444 | mutex_lock(&text_mutex); |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 445 | for (poff = start; poff < end; poff++) { |
| 446 | u8 *ptr = (u8 *)poff + *poff; |
| 447 | |
| 448 | if (!*poff || ptr < text || ptr >= text_end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 449 | continue; |
Mathieu Desnoyers | f88f07e | 2008-08-14 16:58:15 -0400 | [diff] [blame] | 450 | /* turn lock prefix into DS segment override prefix */ |
H. Peter Anvin | d9c5841 | 2010-04-29 16:53:17 -0700 | [diff] [blame] | 451 | if (*ptr == 0xf0) |
| 452 | text_poke(ptr, ((unsigned char []){0x3E}), 1); |
Peter Senna Tschudin | 4b8073e | 2012-09-18 18:36:14 +0200 | [diff] [blame] | 453 | } |
Masami Hiramatsu | 3945dab | 2009-03-06 10:37:22 -0500 | [diff] [blame] | 454 | mutex_unlock(&text_mutex); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | struct smp_alt_module { |
| 458 | /* what is this ??? */ |
| 459 | struct module *mod; |
| 460 | char *name; |
| 461 | |
| 462 | /* ptrs to lock prefixes */ |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 463 | const s32 *locks; |
| 464 | const s32 *locks_end; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 465 | |
| 466 | /* .text segment, needed to avoid patching init code ;) */ |
| 467 | u8 *text; |
| 468 | u8 *text_end; |
| 469 | |
| 470 | struct list_head next; |
| 471 | }; |
| 472 | static LIST_HEAD(smp_alt_modules); |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 473 | static DEFINE_MUTEX(smp_alt); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 474 | static bool uniproc_patched = false; /* protected by smp_alt */ |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 475 | |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 476 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
| 477 | char *name, |
| 478 | void *locks, void *locks_end, |
| 479 | void *text, void *text_end) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 480 | { |
| 481 | struct smp_alt_module *smp; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 482 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 483 | mutex_lock(&smp_alt); |
| 484 | if (!uniproc_patched) |
| 485 | goto unlock; |
Jeremy Fitzhardinge | b7fb4af | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 486 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 487 | if (num_possible_cpus() == 1) |
| 488 | /* Don't bother remembering, we'll never have to undo it. */ |
| 489 | goto smp_unlock; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 490 | |
| 491 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); |
| 492 | if (NULL == smp) |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 493 | /* we'll run the (safe but slow) SMP code then ... */ |
| 494 | goto unlock; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 495 | |
| 496 | smp->mod = mod; |
| 497 | smp->name = name; |
| 498 | smp->locks = locks; |
| 499 | smp->locks_end = locks_end; |
| 500 | smp->text = text; |
| 501 | smp->text_end = text_end; |
Borislav Petkov | db477a3 | 2014-12-30 20:27:09 +0100 | [diff] [blame] | 502 | DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", |
| 503 | smp->locks, smp->locks_end, |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 504 | smp->text, smp->text_end, smp->name); |
| 505 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 506 | list_add_tail(&smp->next, &smp_alt_modules); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 507 | smp_unlock: |
| 508 | alternatives_smp_unlock(locks, locks_end, text, text_end); |
| 509 | unlock: |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 510 | mutex_unlock(&smp_alt); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 511 | } |
| 512 | |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 513 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 514 | { |
| 515 | struct smp_alt_module *item; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 516 | |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 517 | mutex_lock(&smp_alt); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 518 | list_for_each_entry(item, &smp_alt_modules, next) { |
| 519 | if (mod != item->mod) |
| 520 | continue; |
| 521 | list_del(&item->next); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 522 | kfree(item); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 523 | break; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 524 | } |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 525 | mutex_unlock(&smp_alt); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 526 | } |
| 527 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 528 | void alternatives_enable_smp(void) |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 529 | { |
| 530 | struct smp_alt_module *mod; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 531 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 532 | /* Why bother if there are no other CPUs? */ |
| 533 | BUG_ON(num_possible_cpus() == 1); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 534 | |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 535 | mutex_lock(&smp_alt); |
Andi Kleen | ca74a6f | 2008-01-30 13:33:17 +0100 | [diff] [blame] | 536 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 537 | if (uniproc_patched) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 538 | pr_info("switching to SMP code\n"); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 539 | BUG_ON(num_online_cpus() != 1); |
Jeremy Fitzhardinge | 53756d3 | 2008-01-30 13:30:55 +0100 | [diff] [blame] | 540 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
| 541 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 542 | list_for_each_entry(mod, &smp_alt_modules, next) |
| 543 | alternatives_smp_lock(mod->locks, mod->locks_end, |
| 544 | mod->text, mod->text_end); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 545 | uniproc_patched = false; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 546 | } |
Pekka Paalanen | 2f1dafe | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 547 | mutex_unlock(&smp_alt); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 548 | } |
| 549 | |
Masami Hiramatsu | 2cfa197 | 2010-02-02 16:49:11 -0500 | [diff] [blame] | 550 | /* Return 1 if the address range is reserved for smp-alternatives */ |
| 551 | int alternatives_text_reserved(void *start, void *end) |
| 552 | { |
| 553 | struct smp_alt_module *mod; |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 554 | const s32 *poff; |
Masami Hiramatsu | 076dc4a | 2010-02-05 12:16:47 -0500 | [diff] [blame] | 555 | u8 *text_start = start; |
| 556 | u8 *text_end = end; |
Masami Hiramatsu | 2cfa197 | 2010-02-02 16:49:11 -0500 | [diff] [blame] | 557 | |
| 558 | list_for_each_entry(mod, &smp_alt_modules, next) { |
Masami Hiramatsu | 076dc4a | 2010-02-05 12:16:47 -0500 | [diff] [blame] | 559 | if (mod->text > text_end || mod->text_end < text_start) |
Masami Hiramatsu | 2cfa197 | 2010-02-02 16:49:11 -0500 | [diff] [blame] | 560 | continue; |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 561 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
| 562 | const u8 *ptr = (const u8 *)poff + *poff; |
| 563 | |
| 564 | if (text_start <= ptr && text_end > ptr) |
Masami Hiramatsu | 2cfa197 | 2010-02-02 16:49:11 -0500 | [diff] [blame] | 565 | return 1; |
Jan Beulich | 5967ed8 | 2010-04-21 16:08:14 +0100 | [diff] [blame] | 566 | } |
Masami Hiramatsu | 2cfa197 | 2010-02-02 16:49:11 -0500 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | return 0; |
| 570 | } |
Borislav Petkov | 48c7a25 | 2015-01-05 13:48:41 +0100 | [diff] [blame] | 571 | #endif /* CONFIG_SMP */ |
Gerd Hoffmann | 8ec4d41 | 2006-07-01 04:36:18 -0700 | [diff] [blame] | 572 | |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 573 | #ifdef CONFIG_PARAVIRT |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 574 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
| 575 | struct paravirt_patch_site *end) |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 576 | { |
Jeremy Fitzhardinge | 98de032 | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 577 | struct paravirt_patch_site *p; |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 578 | char insnbuf[MAX_PATCH_LEN]; |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 579 | |
| 580 | for (p = start; p < end; p++) { |
| 581 | unsigned int used; |
| 582 | |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 583 | BUG_ON(p->len > MAX_PATCH_LEN); |
Chris Wright | d34fda4 | 2007-08-18 14:31:41 -0700 | [diff] [blame] | 584 | /* prep the buffer with the original instructions */ |
| 585 | memcpy(insnbuf, p->instr, p->len); |
Jeremy Fitzhardinge | 93b1eab | 2007-10-16 11:51:29 -0700 | [diff] [blame] | 586 | used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, |
| 587 | (unsigned long)p->instr, p->len); |
Jeremy Fitzhardinge | 7f63c41 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 588 | |
Jeremy Fitzhardinge | 63f7027 | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 589 | BUG_ON(used > p->len); |
| 590 | |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 591 | /* Pad the rest with nops */ |
Andi Kleen | ab144f5 | 2007-08-10 22:31:03 +0200 | [diff] [blame] | 592 | add_nops(insnbuf + used, p->len - used); |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 593 | text_poke_early(p->instr, insnbuf, p->len); |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 594 | } |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 595 | } |
Jeremy Fitzhardinge | 98de032 | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 596 | extern struct paravirt_patch_site __start_parainstructions[], |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 597 | __stop_parainstructions[]; |
| 598 | #endif /* CONFIG_PARAVIRT */ |
| 599 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 600 | void __init alternative_instructions(void) |
| 601 | { |
Andi Kleen | 8f4e956 | 2007-07-22 11:12:32 +0200 | [diff] [blame] | 602 | /* The patching is not fully atomic, so try to avoid local interruptions |
| 603 | that might execute the to be patched code. |
| 604 | Other CPUs are not running. */ |
| 605 | stop_nmi(); |
Andi Kleen | 123aa76 | 2009-02-12 13:39:27 +0100 | [diff] [blame] | 606 | |
| 607 | /* |
| 608 | * Don't stop machine check exceptions while patching. |
| 609 | * MCEs only happen when something got corrupted and in this |
| 610 | * case we must do something about the corruption. |
| 611 | * Ignoring it is worse than a unlikely patching race. |
| 612 | * Also machine checks tend to be broadcast and if one CPU |
| 613 | * goes into machine check the others follow quickly, so we don't |
| 614 | * expect a machine check to cause undue problems during to code |
| 615 | * patching. |
| 616 | */ |
Andi Kleen | 8f4e956 | 2007-07-22 11:12:32 +0200 | [diff] [blame] | 617 | |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 618 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
| 619 | |
Gerd Hoffmann | 8ec4d41 | 2006-07-01 04:36:18 -0700 | [diff] [blame] | 620 | #ifdef CONFIG_SMP |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 621 | /* Patch to UP if other cpus not imminent. */ |
| 622 | if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { |
| 623 | uniproc_patched = true; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 624 | alternatives_smp_module_add(NULL, "core kernel", |
| 625 | __smp_locks, __smp_locks_end, |
| 626 | _text, _etext); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 627 | } |
Andi Kleen | 8f4e956 | 2007-07-22 11:12:32 +0200 | [diff] [blame] | 628 | |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 629 | if (!uniproc_patched || num_possible_cpus() == 1) |
Fengguang Wu | f68fd5f | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 630 | free_init_pages("SMP alternatives", |
| 631 | (unsigned long)__smp_locks, |
| 632 | (unsigned long)__smp_locks_end); |
Rusty Russell | 816afe4 | 2012-08-06 17:29:49 +0930 | [diff] [blame] | 633 | #endif |
| 634 | |
| 635 | apply_paravirt(__parainstructions, __parainstructions_end); |
Fengguang Wu | f68fd5f | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 636 | |
Andi Kleen | 8f4e956 | 2007-07-22 11:12:32 +0200 | [diff] [blame] | 637 | restart_nmi(); |
Ingo Molnar | 5e907bb | 2015-04-30 09:09:26 +0200 | [diff] [blame] | 638 | alternatives_patched = 1; |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 639 | } |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 640 | |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 641 | /** |
| 642 | * text_poke_early - Update instructions on a live kernel at boot time |
| 643 | * @addr: address to modify |
| 644 | * @opcode: source of the copy |
| 645 | * @len: length to copy |
| 646 | * |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 647 | * When you use this code to patch more than one byte of an instruction |
| 648 | * you need to make sure that other CPUs cannot execute this code in parallel. |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 649 | * Also no thread must be currently preempted in the middle of these |
| 650 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
| 651 | * handlers seeing an inconsistent instruction while you patch. |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 652 | */ |
Jason Baron | fa6f2cc | 2010-09-17 11:08:56 -0400 | [diff] [blame] | 653 | void *__init_or_module text_poke_early(void *addr, const void *opcode, |
Jan Beulich | 8b5a10f | 2009-08-19 08:40:48 +0100 | [diff] [blame] | 654 | size_t len) |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 655 | { |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 656 | unsigned long flags; |
| 657 | local_irq_save(flags); |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 658 | memcpy(addr, opcode, len); |
| 659 | sync_core(); |
Ben Hutchings | 5367b688 | 2009-09-10 02:53:50 +0100 | [diff] [blame] | 660 | local_irq_restore(flags); |
Andi Kleen | a534b67 | 2007-09-06 16:59:52 +0200 | [diff] [blame] | 661 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
| 662 | that causes hangs on some VIA CPUs. */ |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 663 | return addr; |
| 664 | } |
| 665 | |
| 666 | /** |
| 667 | * text_poke - Update instructions on a live kernel |
| 668 | * @addr: address to modify |
| 669 | * @opcode: source of the copy |
| 670 | * @len: length to copy |
| 671 | * |
| 672 | * Only atomic text poke/set should be allowed when not doing early patching. |
| 673 | * It means the size must be writable atomically and the address must be aligned |
| 674 | * in a way that permits an atomic write. It also makes sure we fit on a single |
| 675 | * page. |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 676 | * |
| 677 | * Note: Must be called under text_mutex. |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 678 | */ |
Masami Hiramatsu | 9c54b61 | 2014-04-17 17:18:07 +0900 | [diff] [blame] | 679 | void *text_poke(void *addr, const void *opcode, size_t len) |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 680 | { |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 681 | unsigned long flags; |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 682 | char *vaddr; |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 683 | struct page *pages[2]; |
| 684 | int i; |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 685 | |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 686 | if (!core_kernel_text((unsigned long)addr)) { |
| 687 | pages[0] = vmalloc_to_page(addr); |
| 688 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
Mathieu Desnoyers | 15a601e | 2008-03-12 11:54:16 -0400 | [diff] [blame] | 689 | } else { |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 690 | pages[0] = virt_to_page(addr); |
Ingo Molnar | 00c6b2d | 2008-04-25 17:07:03 +0200 | [diff] [blame] | 691 | WARN_ON(!PageReserved(pages[0])); |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 692 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 693 | } |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 694 | BUG_ON(!pages[0]); |
Masami Hiramatsu | 7cf4942 | 2009-03-09 12:40:40 -0400 | [diff] [blame] | 695 | local_irq_save(flags); |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 696 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
| 697 | if (pages[1]) |
| 698 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); |
| 699 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 700 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
Masami Hiramatsu | 78ff7fa | 2009-03-06 10:37:54 -0500 | [diff] [blame] | 701 | clear_fixmap(FIX_TEXT_POKE0); |
| 702 | if (pages[1]) |
| 703 | clear_fixmap(FIX_TEXT_POKE1); |
| 704 | local_flush_tlb(); |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 705 | sync_core(); |
| 706 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
| 707 | that causes hangs on some VIA CPUs. */ |
Mathieu Desnoyers | b7b66ba | 2008-04-24 11:03:33 -0400 | [diff] [blame] | 708 | for (i = 0; i < len; i++) |
| 709 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); |
Masami Hiramatsu | 7cf4942 | 2009-03-09 12:40:40 -0400 | [diff] [blame] | 710 | local_irq_restore(flags); |
Mathieu Desnoyers | e587cad | 2008-03-06 08:48:49 -0500 | [diff] [blame] | 711 | return addr; |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 712 | } |
Masami Hiramatsu | 3d55cc8 | 2010-02-25 08:34:38 -0500 | [diff] [blame] | 713 | |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 714 | static void do_sync_core(void *info) |
| 715 | { |
| 716 | sync_core(); |
| 717 | } |
| 718 | |
| 719 | static bool bp_patching_in_progress; |
| 720 | static void *bp_int3_handler, *bp_int3_addr; |
| 721 | |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 722 | int poke_int3_handler(struct pt_regs *regs) |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 723 | { |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 724 | /* bp_patching_in_progress */ |
| 725 | smp_rmb(); |
| 726 | |
| 727 | if (likely(!bp_patching_in_progress)) |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 728 | return 0; |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 729 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 730 | if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 731 | return 0; |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 732 | |
| 733 | /* set up the specified breakpoint handler */ |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 734 | regs->ip = (unsigned long) bp_int3_handler; |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 735 | |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 736 | return 1; |
| 737 | |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 738 | } |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 739 | |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 740 | /** |
| 741 | * text_poke_bp() -- update instructions on live kernel on SMP |
| 742 | * @addr: address to patch |
| 743 | * @opcode: opcode of new instruction |
| 744 | * @len: length to copy |
| 745 | * @handler: address to jump to when the temporary breakpoint is hit |
| 746 | * |
| 747 | * Modify multi-byte instruction by using int3 breakpoint on SMP. |
Masami Hiramatsu | ea8596b | 2013-07-18 20:47:53 +0900 | [diff] [blame] | 748 | * We completely avoid stop_machine() here, and achieve the |
| 749 | * synchronization using int3 breakpoint. |
Jiri Kosina | fd4363f | 2013-07-12 11:21:48 +0200 | [diff] [blame] | 750 | * |
| 751 | * The way it is done: |
| 752 | * - add a int3 trap to the address that will be patched |
| 753 | * - sync cores |
| 754 | * - update all but the first byte of the patched range |
| 755 | * - sync cores |
| 756 | * - replace the first byte (int3) by the first byte of |
| 757 | * replacing opcode |
| 758 | * - sync cores |
| 759 | * |
| 760 | * Note: must be called under text_mutex. |
| 761 | */ |
| 762 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
| 763 | { |
| 764 | unsigned char int3 = 0xcc; |
| 765 | |
| 766 | bp_int3_handler = handler; |
| 767 | bp_int3_addr = (u8 *)addr + sizeof(int3); |
| 768 | bp_patching_in_progress = true; |
| 769 | /* |
| 770 | * Corresponding read barrier in int3 notifier for |
| 771 | * making sure the in_progress flags is correctly ordered wrt. |
| 772 | * patching |
| 773 | */ |
| 774 | smp_wmb(); |
| 775 | |
| 776 | text_poke(addr, &int3, sizeof(int3)); |
| 777 | |
| 778 | on_each_cpu(do_sync_core, NULL, 1); |
| 779 | |
| 780 | if (len - sizeof(int3) > 0) { |
| 781 | /* patch all but the first byte */ |
| 782 | text_poke((char *)addr + sizeof(int3), |
| 783 | (const char *) opcode + sizeof(int3), |
| 784 | len - sizeof(int3)); |
| 785 | /* |
| 786 | * According to Intel, this core syncing is very likely |
| 787 | * not necessary and we'd be safe even without it. But |
| 788 | * better safe than sorry (plus there's not only Intel). |
| 789 | */ |
| 790 | on_each_cpu(do_sync_core, NULL, 1); |
| 791 | } |
| 792 | |
| 793 | /* patch the first byte */ |
| 794 | text_poke(addr, opcode, sizeof(int3)); |
| 795 | |
| 796 | on_each_cpu(do_sync_core, NULL, 1); |
| 797 | |
| 798 | bp_patching_in_progress = false; |
| 799 | smp_wmb(); |
| 800 | |
| 801 | return addr; |
| 802 | } |
| 803 | |