blob: 7a6ccbb74d9159eb8a61f6c6dffca1f9c9549f5b [file] [log] [blame]
Michael Ellerman51c52e82008-06-24 11:32:36 +10001/*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
7 * Copyright 2008 Michael Ellerman, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
Stephen Rothwell3880ecb2010-06-28 21:08:29 +000015#include <linux/types.h>
Aneesh Kumar K.V309b3152016-07-23 14:42:38 +053016#include <linux/jump_label.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100017#include <linux/kernel.h>
Michael Ellerman362e7702008-06-24 11:33:03 +100018#include <linux/string.h>
19#include <linux/init.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100020#include <asm/cputable.h>
21#include <asm/code-patching.h>
Anton Blanchardd715e432011-11-14 12:54:47 +000022#include <asm/page.h>
23#include <asm/sections.h>
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +100024#include <asm/setup.h>
25#include <asm/firmware.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100026
27struct fixup_entry {
28 unsigned long mask;
29 unsigned long value;
30 long start_off;
31 long end_off;
Michael Ellermanfac23fe2008-06-24 11:32:54 +100032 long alt_start_off;
33 long alt_end_off;
Michael Ellerman51c52e82008-06-24 11:32:36 +100034};
35
Michael Ellerman9b1a7352008-06-24 11:33:02 +100036static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
Michael Ellerman51c52e82008-06-24 11:32:36 +100037{
Michael Ellerman9b1a7352008-06-24 11:33:02 +100038 /*
39 * We store the offset to the code as a negative offset from
40 * the start of the alt_entry, to support the VDSO. This
41 * routine converts that back into an actual address.
42 */
43 return (unsigned int *)((unsigned long)fcur + offset);
44}
45
46static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
47 unsigned int *alt_start, unsigned int *alt_end)
48{
49 unsigned int instr;
50
51 instr = *src;
52
53 if (instr_is_relative_branch(*src)) {
54 unsigned int *target = (unsigned int *)branch_target(src);
55
56 /* Branch within the section doesn't need translating */
57 if (target < alt_start || target >= alt_end) {
58 instr = translate_branch(dest, src);
59 if (!instr)
60 return 1;
61 }
62 }
63
64 patch_instruction(dest, instr);
65
66 return 0;
67}
68
69static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
70{
71 unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
72
73 start = calc_addr(fcur, fcur->start_off);
74 end = calc_addr(fcur, fcur->end_off);
75 alt_start = calc_addr(fcur, fcur->alt_start_off);
76 alt_end = calc_addr(fcur, fcur->alt_end_off);
77
78 if ((alt_end - alt_start) > (end - start))
79 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100080
81 if ((value & fcur->mask) == fcur->value)
Michael Ellerman9b1a7352008-06-24 11:33:02 +100082 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100083
Michael Ellerman9b1a7352008-06-24 11:33:02 +100084 src = alt_start;
85 dest = start;
Michael Ellerman51c52e82008-06-24 11:32:36 +100086
Michael Ellerman9b1a7352008-06-24 11:33:02 +100087 for (; src < alt_end; src++, dest++) {
88 if (patch_alt_instruction(src, dest, alt_start, alt_end))
89 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100090 }
Michael Ellerman9b1a7352008-06-24 11:33:02 +100091
92 for (; dest < end; dest++)
Kumar Gala16c57b32009-02-10 20:10:44 +000093 patch_instruction(dest, PPC_INST_NOP);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100094
95 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100096}
97
98void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
99{
100 struct fixup_entry *fcur, *fend;
101
102 fcur = fixup_start;
103 fend = fixup_end;
104
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000105 for (; fcur < fend; fcur++) {
106 if (patch_feature_section(value, fcur)) {
Michael Ellerman1856c022008-07-17 14:46:00 +1000107 WARN_ON(1);
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000108 printk("Unable to patch feature section at %p - %p" \
109 " with %p - %p\n",
110 calc_addr(fcur, fcur->start_off),
111 calc_addr(fcur, fcur->end_off),
112 calc_addr(fcur, fcur->alt_start_off),
113 calc_addr(fcur, fcur->alt_end_off));
114 }
115 }
Michael Ellerman51c52e82008-06-24 11:32:36 +1000116}
Michael Ellerman362e7702008-06-24 11:33:03 +1000117
Kumar Gala2d1b2022008-07-02 01:16:40 +1000118void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
119{
Benjamin Herrenschmidt3d98ffb2010-02-26 18:29:17 +1100120 long *start, *end;
121 unsigned int *dest;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000122
123 if (!(value & CPU_FTR_LWSYNC))
124 return ;
125
126 start = fixup_start;
127 end = fixup_end;
128
129 for (; start < end; start++) {
130 dest = (void *)start + *start;
Kumar Gala16c57b32009-02-10 20:10:44 +0000131 patch_instruction(dest, PPC_INST_LWSYNC);
Kumar Gala2d1b2022008-07-02 01:16:40 +1000132 }
133}
134
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000135static void do_final_fixups(void)
Anton Blanchardd715e432011-11-14 12:54:47 +0000136{
137#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
138 int *src, *dest;
139 unsigned long length;
140
141 if (PHYSICAL_START == 0)
142 return;
143
144 src = (int *)(KERNELBASE + PHYSICAL_START);
145 dest = (int *)KERNELBASE;
146 length = (__end_interrupts - _stext) / sizeof(int);
147
148 while (length--) {
149 patch_instruction(dest, *src);
150 src++;
151 dest++;
152 }
153#endif
154}
155
Michael Ellermana28e46f2016-07-26 22:29:18 +1000156static unsigned long __initdata saved_cpu_features;
157static unsigned int __initdata saved_mmu_features;
158#ifdef CONFIG_PPC64
159static unsigned long __initdata saved_firmware_features;
160#endif
161
162void __init apply_feature_fixups(void)
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000163{
164 struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
165
Michael Ellermana28e46f2016-07-26 22:29:18 +1000166 *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
167 *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
168
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000169 /*
170 * Apply the CPU-specific and firmware specific fixups to kernel text
171 * (nop out sections not relevant to this CPU or this firmware).
172 */
173 do_feature_fixups(spec->cpu_features,
174 PTRRELOC(&__start___ftr_fixup),
175 PTRRELOC(&__stop___ftr_fixup));
176
177 do_feature_fixups(spec->mmu_features,
178 PTRRELOC(&__start___mmu_ftr_fixup),
179 PTRRELOC(&__stop___mmu_ftr_fixup));
180
181 do_lwsync_fixups(spec->cpu_features,
182 PTRRELOC(&__start___lwsync_fixup),
183 PTRRELOC(&__stop___lwsync_fixup));
184
185#ifdef CONFIG_PPC64
Michael Ellermana28e46f2016-07-26 22:29:18 +1000186 saved_firmware_features = powerpc_firmware_features;
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000187 do_feature_fixups(powerpc_firmware_features,
188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
189#endif
190 do_final_fixups();
Aneesh Kumar K.V309b3152016-07-23 14:42:38 +0530191
192 /*
193 * Initialise jump label. This causes all the cpu/mmu_has_feature()
194 * checks to take on their correct polarity based on the current set of
195 * CPU/MMU features.
196 */
197 jump_label_init();
Kevin Hao4db73272016-07-23 14:42:41 +0530198 cpu_feature_keys_init();
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000199}
200
Michael Ellermana28e46f2016-07-26 22:29:18 +1000201static int __init check_features(void)
202{
203 WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
204 "CPU features changed after feature patching!\n");
205 WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
206 "MMU features changed after feature patching!\n");
207#ifdef CONFIG_PPC64
208 WARN(saved_firmware_features != powerpc_firmware_features,
209 "Firmware features changed after feature patching!\n");
210#endif
211
212 return 0;
213}
214late_initcall(check_features);
215
Michael Ellerman362e7702008-06-24 11:33:03 +1000216#ifdef CONFIG_FTR_FIXUP_SELFTEST
217
218#define check(x) \
219 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
220
221/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
222static struct fixup_entry fixup;
223
224static long calc_offset(struct fixup_entry *entry, unsigned int *p)
225{
226 return (unsigned long)p - (unsigned long)entry;
227}
228
Anton Blancharde51df2c2014-08-20 08:55:18 +1000229static void test_basic_patching(void)
Michael Ellerman362e7702008-06-24 11:33:03 +1000230{
231 extern unsigned int ftr_fixup_test1;
232 extern unsigned int end_ftr_fixup_test1;
233 extern unsigned int ftr_fixup_test1_orig;
234 extern unsigned int ftr_fixup_test1_expected;
235 int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
236
237 fixup.value = fixup.mask = 8;
238 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
239 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
240 fixup.alt_start_off = fixup.alt_end_off = 0;
241
242 /* Sanity check */
243 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
244
245 /* Check we don't patch if the value matches */
246 patch_feature_section(8, &fixup);
247 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
248
249 /* Check we do patch if the value doesn't match */
250 patch_feature_section(0, &fixup);
251 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
252
253 /* Check we do patch if the mask doesn't match */
254 memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
255 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
256 patch_feature_section(~8, &fixup);
257 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
258}
259
260static void test_alternative_patching(void)
261{
262 extern unsigned int ftr_fixup_test2;
263 extern unsigned int end_ftr_fixup_test2;
264 extern unsigned int ftr_fixup_test2_orig;
265 extern unsigned int ftr_fixup_test2_alt;
266 extern unsigned int ftr_fixup_test2_expected;
267 int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
268
269 fixup.value = fixup.mask = 0xF;
270 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
271 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
272 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
273 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
274
275 /* Sanity check */
276 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
277
278 /* Check we don't patch if the value matches */
279 patch_feature_section(0xF, &fixup);
280 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
281
282 /* Check we do patch if the value doesn't match */
283 patch_feature_section(0, &fixup);
284 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
285
286 /* Check we do patch if the mask doesn't match */
287 memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
288 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
289 patch_feature_section(~0xF, &fixup);
290 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
291}
292
293static void test_alternative_case_too_big(void)
294{
295 extern unsigned int ftr_fixup_test3;
296 extern unsigned int end_ftr_fixup_test3;
297 extern unsigned int ftr_fixup_test3_orig;
298 extern unsigned int ftr_fixup_test3_alt;
299 int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
300
301 fixup.value = fixup.mask = 0xC;
302 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
303 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
304 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
305 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
306
307 /* Sanity check */
308 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
309
310 /* Expect nothing to be patched, and the error returned to us */
311 check(patch_feature_section(0xF, &fixup) == 1);
312 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
313 check(patch_feature_section(0, &fixup) == 1);
314 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
315 check(patch_feature_section(~0xF, &fixup) == 1);
316 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
317}
318
319static void test_alternative_case_too_small(void)
320{
321 extern unsigned int ftr_fixup_test4;
322 extern unsigned int end_ftr_fixup_test4;
323 extern unsigned int ftr_fixup_test4_orig;
324 extern unsigned int ftr_fixup_test4_alt;
325 extern unsigned int ftr_fixup_test4_expected;
326 int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
327 unsigned long flag;
328
329 /* Check a high-bit flag */
330 flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
331 fixup.value = fixup.mask = flag;
332 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
333 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
334 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
335 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
336
337 /* Sanity check */
338 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
339
340 /* Check we don't patch if the value matches */
341 patch_feature_section(flag, &fixup);
342 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
343
344 /* Check we do patch if the value doesn't match */
345 patch_feature_section(0, &fixup);
346 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
347
348 /* Check we do patch if the mask doesn't match */
349 memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
350 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
351 patch_feature_section(~flag, &fixup);
352 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
353}
354
355static void test_alternative_case_with_branch(void)
356{
357 extern unsigned int ftr_fixup_test5;
358 extern unsigned int end_ftr_fixup_test5;
359 extern unsigned int ftr_fixup_test5_expected;
360 int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
361
362 check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
363}
364
365static void test_alternative_case_with_external_branch(void)
366{
367 extern unsigned int ftr_fixup_test6;
368 extern unsigned int end_ftr_fixup_test6;
369 extern unsigned int ftr_fixup_test6_expected;
370 int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
371
372 check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
373}
374
375static void test_cpu_macros(void)
376{
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000377 extern u8 ftr_fixup_test_FTR_macros;
378 extern u8 ftr_fixup_test_FTR_macros_expected;
Michael Ellerman362e7702008-06-24 11:33:03 +1000379 unsigned long size = &ftr_fixup_test_FTR_macros_expected -
380 &ftr_fixup_test_FTR_macros;
381
382 /* The fixups have already been done for us during boot */
383 check(memcmp(&ftr_fixup_test_FTR_macros,
384 &ftr_fixup_test_FTR_macros_expected, size) == 0);
385}
386
387static void test_fw_macros(void)
388{
389#ifdef CONFIG_PPC64
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000390 extern u8 ftr_fixup_test_FW_FTR_macros;
391 extern u8 ftr_fixup_test_FW_FTR_macros_expected;
Michael Ellerman362e7702008-06-24 11:33:03 +1000392 unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
393 &ftr_fixup_test_FW_FTR_macros;
394
395 /* The fixups have already been done for us during boot */
396 check(memcmp(&ftr_fixup_test_FW_FTR_macros,
397 &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
398#endif
399}
400
Kumar Gala2d1b2022008-07-02 01:16:40 +1000401static void test_lwsync_macros(void)
402{
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000403 extern u8 lwsync_fixup_test;
404 extern u8 end_lwsync_fixup_test;
405 extern u8 lwsync_fixup_test_expected_LWSYNC;
406 extern u8 lwsync_fixup_test_expected_SYNC;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000407 unsigned long size = &end_lwsync_fixup_test -
408 &lwsync_fixup_test;
409
410 /* The fixups have already been done for us during boot */
411 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
412 check(memcmp(&lwsync_fixup_test,
413 &lwsync_fixup_test_expected_LWSYNC, size) == 0);
414 } else {
415 check(memcmp(&lwsync_fixup_test,
416 &lwsync_fixup_test_expected_SYNC, size) == 0);
417 }
418}
419
Michael Ellerman362e7702008-06-24 11:33:03 +1000420static int __init test_feature_fixups(void)
421{
422 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
423
424 test_basic_patching();
425 test_alternative_patching();
426 test_alternative_case_too_big();
427 test_alternative_case_too_small();
428 test_alternative_case_with_branch();
429 test_alternative_case_with_external_branch();
430 test_cpu_macros();
431 test_fw_macros();
Kumar Gala2d1b2022008-07-02 01:16:40 +1000432 test_lwsync_macros();
Michael Ellerman362e7702008-06-24 11:33:03 +1000433
434 return 0;
435}
436late_initcall(test_feature_fixups);
437
438#endif /* CONFIG_FTR_FIXUP_SELFTEST */