blob: 50eb0e03777e4b1a7f0fe564f507b6ccfe358f18 [file] [log] [blame]
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -08001#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
Gerd Hoffmannd167a512006-06-26 13:56:16 +02007static int no_replacement = 0;
8static int smp_alt_once = 0;
9static int debug_alternative = 0;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080010
Gerd Hoffmannd167a512006-06-26 13:56:16 +020011static int __init noreplacement_setup(char *s)
12{
13 no_replacement = 1;
14 return 1;
15}
16static int __init bootonly(char *str)
17{
18 smp_alt_once = 1;
19 return 1;
20}
21static int __init debug_alt(char *str)
22{
23 debug_alternative = 1;
24 return 1;
25}
26
27__setup("noreplacement", noreplacement_setup);
28__setup("smp-alt-boot", bootonly);
29__setup("debug-alternative", debug_alt);
30
31#define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
33
34#ifdef GENERIC_NOP1
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080035/* Use inline assembly to define this because the nops are defined
36 as inline assembly strings in the include files and we cannot
37 get them easily into strings. */
38asm("\t.data\nintelnops: "
39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
40 GENERIC_NOP7 GENERIC_NOP8);
Gerd Hoffmannd167a512006-06-26 13:56:16 +020041extern unsigned char intelnops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080042static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
43 NULL,
44 intelnops,
45 intelnops + 1,
46 intelnops + 1 + 2,
47 intelnops + 1 + 2 + 3,
48 intelnops + 1 + 2 + 3 + 4,
49 intelnops + 1 + 2 + 3 + 4 + 5,
50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
52};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020053#endif
54
55#ifdef K8_NOP1
56asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59extern unsigned char k8nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080060static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
61 NULL,
62 k8nops,
63 k8nops + 1,
64 k8nops + 1 + 2,
65 k8nops + 1 + 2 + 3,
66 k8nops + 1 + 2 + 3 + 4,
67 k8nops + 1 + 2 + 3 + 4 + 5,
68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
70};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020071#endif
72
73#ifdef K7_NOP1
74asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77extern unsigned char k7nops[];
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -080078static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
79 NULL,
80 k7nops,
81 k7nops + 1,
82 k7nops + 1 + 2,
83 k7nops + 1 + 2 + 3,
84 k7nops + 1 + 2 + 3 + 4,
85 k7nops + 1 + 2 + 3 + 4 + 5,
86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
88};
Gerd Hoffmannd167a512006-06-26 13:56:16 +020089#endif
90
91#ifdef CONFIG_X86_64
92
93extern char __vsyscall_0;
94static inline unsigned char** find_nop_table(void)
95{
96 return k8_nops;
97}
98
99#else /* CONFIG_X86_64 */
100
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800101static struct nop {
102 int cpuid;
103 unsigned char **noptable;
104} noptypes[] = {
105 { X86_FEATURE_K8, k8_nops },
106 { X86_FEATURE_K7, k7_nops },
107 { -1, NULL }
108};
109
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800110static unsigned char** find_nop_table(void)
111{
112 unsigned char **noptable = intel_nops;
113 int i;
114
115 for (i = 0; noptypes[i].cpuid >= 0; i++) {
116 if (boot_cpu_has(noptypes[i].cpuid)) {
117 noptable = noptypes[i].noptable;
118 break;
119 }
120 }
121 return noptable;
122}
123
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200124#endif /* CONFIG_X86_64 */
125
126extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128extern u8 *__smp_locks[], *__smp_locks_end[];
129
130extern u8 __smp_alt_begin[], __smp_alt_end[];
131
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800132/* Replace instructions with better alternatives for this CPU type.
133 This runs before SMP is initialized to avoid SMP problems with
134 self modifying code. This implies that assymetric systems where
135 APs have less capabilities than the boot processor are not handled.
136 Tough. Make sure you disable such features by hand. */
137
138void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
139{
140 unsigned char **noptable = find_nop_table();
141 struct alt_instr *a;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200142 u8 *instr;
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800143 int diff, i, k;
144
145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
146 for (a = start; a < end; a++) {
147 BUG_ON(a->replacementlen > a->instrlen);
148 if (!boot_cpu_has(a->cpuid))
149 continue;
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200150 instr = a->instr;
151#ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
157 }
158#endif
159 memcpy(instr, a->replacement, a->replacementlen);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800160 diff = a->instrlen - a->replacementlen;
161 /* Pad the rest with nops */
162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
163 k = diff;
164 if (k > ASM_NOP_MAX)
165 k = ASM_NOP_MAX;
166 memcpy(a->instr + i, noptable[k], k);
167 }
168 }
169}
170
171static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
172{
173 struct alt_instr *a;
174
175 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
176 for (a = start; a < end; a++) {
177 memcpy(a->replacement + a->replacementlen,
178 a->instr,
179 a->instrlen);
180 }
181}
182
183static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
184{
185 struct alt_instr *a;
186
187 for (a = start; a < end; a++) {
188 memcpy(a->instr,
189 a->replacement + a->replacementlen,
190 a->instrlen);
191 }
192}
193
194static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
195{
196 u8 **ptr;
197
198 for (ptr = start; ptr < end; ptr++) {
199 if (*ptr < text)
200 continue;
201 if (*ptr > text_end)
202 continue;
203 **ptr = 0xf0; /* lock prefix */
204 };
205}
206
207static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
208{
209 unsigned char **noptable = find_nop_table();
210 u8 **ptr;
211
212 for (ptr = start; ptr < end; ptr++) {
213 if (*ptr < text)
214 continue;
215 if (*ptr > text_end)
216 continue;
217 **ptr = noptable[1][0];
218 };
219}
220
221struct smp_alt_module {
222 /* what is this ??? */
223 struct module *mod;
224 char *name;
225
226 /* ptrs to lock prefixes */
227 u8 **locks;
228 u8 **locks_end;
229
230 /* .text segment, needed to avoid patching init code ;) */
231 u8 *text;
232 u8 *text_end;
233
234 struct list_head next;
235};
236static LIST_HEAD(smp_alt_modules);
237static DEFINE_SPINLOCK(smp_alt);
238
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800239void alternatives_smp_module_add(struct module *mod, char *name,
240 void *locks, void *locks_end,
241 void *text, void *text_end)
242{
243 struct smp_alt_module *smp;
244 unsigned long flags;
245
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200246 if (no_replacement)
247 return;
248
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800249 if (smp_alt_once) {
250 if (boot_cpu_has(X86_FEATURE_UP))
251 alternatives_smp_unlock(locks, locks_end,
252 text, text_end);
253 return;
254 }
255
256 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
257 if (NULL == smp)
258 return; /* we'll run the (safe but slow) SMP code then ... */
259
260 smp->mod = mod;
261 smp->name = name;
262 smp->locks = locks;
263 smp->locks_end = locks_end;
264 smp->text = text;
265 smp->text_end = text_end;
266 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
267 __FUNCTION__, smp->locks, smp->locks_end,
268 smp->text, smp->text_end, smp->name);
269
270 spin_lock_irqsave(&smp_alt, flags);
271 list_add_tail(&smp->next, &smp_alt_modules);
272 if (boot_cpu_has(X86_FEATURE_UP))
273 alternatives_smp_unlock(smp->locks, smp->locks_end,
274 smp->text, smp->text_end);
275 spin_unlock_irqrestore(&smp_alt, flags);
276}
277
278void alternatives_smp_module_del(struct module *mod)
279{
280 struct smp_alt_module *item;
281 unsigned long flags;
282
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200283 if (no_replacement || smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800284 return;
285
286 spin_lock_irqsave(&smp_alt, flags);
287 list_for_each_entry(item, &smp_alt_modules, next) {
288 if (mod != item->mod)
289 continue;
290 list_del(&item->next);
291 spin_unlock_irqrestore(&smp_alt, flags);
292 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
293 kfree(item);
294 return;
295 }
296 spin_unlock_irqrestore(&smp_alt, flags);
297}
298
299void alternatives_smp_switch(int smp)
300{
301 struct smp_alt_module *mod;
302 unsigned long flags;
303
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200304 if (no_replacement || smp_alt_once)
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800305 return;
306 BUG_ON(!smp && (num_online_cpus() > 1));
307
308 spin_lock_irqsave(&smp_alt, flags);
309 if (smp) {
310 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
311 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
312 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
313 alternatives_smp_apply(__smp_alt_instructions,
314 __smp_alt_instructions_end);
315 list_for_each_entry(mod, &smp_alt_modules, next)
316 alternatives_smp_lock(mod->locks, mod->locks_end,
317 mod->text, mod->text_end);
318 } else {
319 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
320 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 apply_alternatives(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_unlock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end);
327 }
328 spin_unlock_irqrestore(&smp_alt, flags);
329}
330
331void __init alternative_instructions(void)
332{
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200333 if (no_replacement) {
334 printk(KERN_INFO "(SMP-)alternatives turned off\n");
335 free_init_pages("SMP alternatives",
336 (unsigned long)__smp_alt_begin,
337 (unsigned long)__smp_alt_end);
338 return;
339 }
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800340 apply_alternatives(__alt_instructions, __alt_instructions_end);
341
342 /* switch to patch-once-at-boottime-only mode and free the
343 * tables in case we know the number of CPUs will never ever
344 * change */
345#ifdef CONFIG_HOTPLUG_CPU
346 if (num_possible_cpus() < 2)
347 smp_alt_once = 1;
348#else
349 smp_alt_once = 1;
350#endif
351
352 if (smp_alt_once) {
353 if (1 == num_possible_cpus()) {
354 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
355 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
356 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
357 apply_alternatives(__smp_alt_instructions,
358 __smp_alt_instructions_end);
359 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
360 _text, _etext);
361 }
362 free_init_pages("SMP alternatives",
363 (unsigned long)__smp_alt_begin,
364 (unsigned long)__smp_alt_end);
365 } else {
366 alternatives_smp_save(__smp_alt_instructions,
367 __smp_alt_instructions_end);
368 alternatives_smp_module_add(NULL, "core kernel",
369 __smp_locks, __smp_locks_end,
370 _text, _etext);
371 alternatives_smp_switch(0);
372 }
373}