blob: defb2998b8183a9588c76bc10a1f575ee28aa46d [file] [log] [blame]
Michael Ellerman51c52e82008-06-24 11:32:36 +10001/*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
7 * Copyright 2008 Michael Ellerman, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
Stephen Rothwell3880ecb2010-06-28 21:08:29 +000015#include <linux/types.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100016#include <linux/kernel.h>
Michael Ellerman362e7702008-06-24 11:33:03 +100017#include <linux/string.h>
18#include <linux/init.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100019#include <asm/cputable.h>
20#include <asm/code-patching.h>
Anton Blanchardd715e432011-11-14 12:54:47 +000021#include <asm/page.h>
22#include <asm/sections.h>
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +100023#include <asm/setup.h>
24#include <asm/firmware.h>
Michael Ellerman51c52e82008-06-24 11:32:36 +100025
26struct fixup_entry {
27 unsigned long mask;
28 unsigned long value;
29 long start_off;
30 long end_off;
Michael Ellermanfac23fe2008-06-24 11:32:54 +100031 long alt_start_off;
32 long alt_end_off;
Michael Ellerman51c52e82008-06-24 11:32:36 +100033};
34
Michael Ellerman9b1a7352008-06-24 11:33:02 +100035static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
Michael Ellerman51c52e82008-06-24 11:32:36 +100036{
Michael Ellerman9b1a7352008-06-24 11:33:02 +100037 /*
38 * We store the offset to the code as a negative offset from
39 * the start of the alt_entry, to support the VDSO. This
40 * routine converts that back into an actual address.
41 */
42 return (unsigned int *)((unsigned long)fcur + offset);
43}
44
45static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
46 unsigned int *alt_start, unsigned int *alt_end)
47{
48 unsigned int instr;
49
50 instr = *src;
51
52 if (instr_is_relative_branch(*src)) {
53 unsigned int *target = (unsigned int *)branch_target(src);
54
55 /* Branch within the section doesn't need translating */
56 if (target < alt_start || target >= alt_end) {
57 instr = translate_branch(dest, src);
58 if (!instr)
59 return 1;
60 }
61 }
62
63 patch_instruction(dest, instr);
64
65 return 0;
66}
67
68static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
69{
70 unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
71
72 start = calc_addr(fcur, fcur->start_off);
73 end = calc_addr(fcur, fcur->end_off);
74 alt_start = calc_addr(fcur, fcur->alt_start_off);
75 alt_end = calc_addr(fcur, fcur->alt_end_off);
76
77 if ((alt_end - alt_start) > (end - start))
78 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100079
80 if ((value & fcur->mask) == fcur->value)
Michael Ellerman9b1a7352008-06-24 11:33:02 +100081 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100082
Michael Ellerman9b1a7352008-06-24 11:33:02 +100083 src = alt_start;
84 dest = start;
Michael Ellerman51c52e82008-06-24 11:32:36 +100085
Michael Ellerman9b1a7352008-06-24 11:33:02 +100086 for (; src < alt_end; src++, dest++) {
87 if (patch_alt_instruction(src, dest, alt_start, alt_end))
88 return 1;
Michael Ellerman51c52e82008-06-24 11:32:36 +100089 }
Michael Ellerman9b1a7352008-06-24 11:33:02 +100090
91 for (; dest < end; dest++)
Kumar Gala16c57b32009-02-10 20:10:44 +000092 patch_instruction(dest, PPC_INST_NOP);
Michael Ellerman9b1a7352008-06-24 11:33:02 +100093
94 return 0;
Michael Ellerman51c52e82008-06-24 11:32:36 +100095}
96
97void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
98{
99 struct fixup_entry *fcur, *fend;
100
101 fcur = fixup_start;
102 fend = fixup_end;
103
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000104 for (; fcur < fend; fcur++) {
105 if (patch_feature_section(value, fcur)) {
Michael Ellerman1856c022008-07-17 14:46:00 +1000106 WARN_ON(1);
Michael Ellerman9b1a7352008-06-24 11:33:02 +1000107 printk("Unable to patch feature section at %p - %p" \
108 " with %p - %p\n",
109 calc_addr(fcur, fcur->start_off),
110 calc_addr(fcur, fcur->end_off),
111 calc_addr(fcur, fcur->alt_start_off),
112 calc_addr(fcur, fcur->alt_end_off));
113 }
114 }
Michael Ellerman51c52e82008-06-24 11:32:36 +1000115}
Michael Ellerman362e7702008-06-24 11:33:03 +1000116
Kumar Gala2d1b2022008-07-02 01:16:40 +1000117void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
118{
Benjamin Herrenschmidt3d98ffb2010-02-26 18:29:17 +1100119 long *start, *end;
120 unsigned int *dest;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000121
122 if (!(value & CPU_FTR_LWSYNC))
123 return ;
124
125 start = fixup_start;
126 end = fixup_end;
127
128 for (; start < end; start++) {
129 dest = (void *)start + *start;
Kumar Gala16c57b32009-02-10 20:10:44 +0000130 patch_instruction(dest, PPC_INST_LWSYNC);
Kumar Gala2d1b2022008-07-02 01:16:40 +1000131 }
132}
133
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000134static void do_final_fixups(void)
Anton Blanchardd715e432011-11-14 12:54:47 +0000135{
136#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
137 int *src, *dest;
138 unsigned long length;
139
140 if (PHYSICAL_START == 0)
141 return;
142
143 src = (int *)(KERNELBASE + PHYSICAL_START);
144 dest = (int *)KERNELBASE;
145 length = (__end_interrupts - _stext) / sizeof(int);
146
147 while (length--) {
148 patch_instruction(dest, *src);
149 src++;
150 dest++;
151 }
152#endif
153}
154
Benjamin Herrenschmidt9402c682016-07-05 15:03:41 +1000155void apply_feature_fixups(void)
156{
157 struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
158
159 /*
160 * Apply the CPU-specific and firmware specific fixups to kernel text
161 * (nop out sections not relevant to this CPU or this firmware).
162 */
163 do_feature_fixups(spec->cpu_features,
164 PTRRELOC(&__start___ftr_fixup),
165 PTRRELOC(&__stop___ftr_fixup));
166
167 do_feature_fixups(spec->mmu_features,
168 PTRRELOC(&__start___mmu_ftr_fixup),
169 PTRRELOC(&__stop___mmu_ftr_fixup));
170
171 do_lwsync_fixups(spec->cpu_features,
172 PTRRELOC(&__start___lwsync_fixup),
173 PTRRELOC(&__stop___lwsync_fixup));
174
175#ifdef CONFIG_PPC64
176 do_feature_fixups(powerpc_firmware_features,
177 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
178#endif
179 do_final_fixups();
180}
181
Michael Ellerman362e7702008-06-24 11:33:03 +1000182#ifdef CONFIG_FTR_FIXUP_SELFTEST
183
184#define check(x) \
185 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
186
187/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
188static struct fixup_entry fixup;
189
190static long calc_offset(struct fixup_entry *entry, unsigned int *p)
191{
192 return (unsigned long)p - (unsigned long)entry;
193}
194
Anton Blancharde51df2c2014-08-20 08:55:18 +1000195static void test_basic_patching(void)
Michael Ellerman362e7702008-06-24 11:33:03 +1000196{
197 extern unsigned int ftr_fixup_test1;
198 extern unsigned int end_ftr_fixup_test1;
199 extern unsigned int ftr_fixup_test1_orig;
200 extern unsigned int ftr_fixup_test1_expected;
201 int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
202
203 fixup.value = fixup.mask = 8;
204 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
205 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
206 fixup.alt_start_off = fixup.alt_end_off = 0;
207
208 /* Sanity check */
209 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
210
211 /* Check we don't patch if the value matches */
212 patch_feature_section(8, &fixup);
213 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
214
215 /* Check we do patch if the value doesn't match */
216 patch_feature_section(0, &fixup);
217 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
218
219 /* Check we do patch if the mask doesn't match */
220 memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
221 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
222 patch_feature_section(~8, &fixup);
223 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
224}
225
226static void test_alternative_patching(void)
227{
228 extern unsigned int ftr_fixup_test2;
229 extern unsigned int end_ftr_fixup_test2;
230 extern unsigned int ftr_fixup_test2_orig;
231 extern unsigned int ftr_fixup_test2_alt;
232 extern unsigned int ftr_fixup_test2_expected;
233 int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
234
235 fixup.value = fixup.mask = 0xF;
236 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
237 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
238 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
239 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
240
241 /* Sanity check */
242 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
243
244 /* Check we don't patch if the value matches */
245 patch_feature_section(0xF, &fixup);
246 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
247
248 /* Check we do patch if the value doesn't match */
249 patch_feature_section(0, &fixup);
250 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
251
252 /* Check we do patch if the mask doesn't match */
253 memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
254 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
255 patch_feature_section(~0xF, &fixup);
256 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
257}
258
259static void test_alternative_case_too_big(void)
260{
261 extern unsigned int ftr_fixup_test3;
262 extern unsigned int end_ftr_fixup_test3;
263 extern unsigned int ftr_fixup_test3_orig;
264 extern unsigned int ftr_fixup_test3_alt;
265 int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
266
267 fixup.value = fixup.mask = 0xC;
268 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
269 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
270 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
271 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
272
273 /* Sanity check */
274 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
275
276 /* Expect nothing to be patched, and the error returned to us */
277 check(patch_feature_section(0xF, &fixup) == 1);
278 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
279 check(patch_feature_section(0, &fixup) == 1);
280 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
281 check(patch_feature_section(~0xF, &fixup) == 1);
282 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
283}
284
285static void test_alternative_case_too_small(void)
286{
287 extern unsigned int ftr_fixup_test4;
288 extern unsigned int end_ftr_fixup_test4;
289 extern unsigned int ftr_fixup_test4_orig;
290 extern unsigned int ftr_fixup_test4_alt;
291 extern unsigned int ftr_fixup_test4_expected;
292 int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
293 unsigned long flag;
294
295 /* Check a high-bit flag */
296 flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
297 fixup.value = fixup.mask = flag;
298 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
299 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
300 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
301 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
302
303 /* Sanity check */
304 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
305
306 /* Check we don't patch if the value matches */
307 patch_feature_section(flag, &fixup);
308 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
309
310 /* Check we do patch if the value doesn't match */
311 patch_feature_section(0, &fixup);
312 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
313
314 /* Check we do patch if the mask doesn't match */
315 memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
316 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
317 patch_feature_section(~flag, &fixup);
318 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
319}
320
321static void test_alternative_case_with_branch(void)
322{
323 extern unsigned int ftr_fixup_test5;
324 extern unsigned int end_ftr_fixup_test5;
325 extern unsigned int ftr_fixup_test5_expected;
326 int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
327
328 check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
329}
330
331static void test_alternative_case_with_external_branch(void)
332{
333 extern unsigned int ftr_fixup_test6;
334 extern unsigned int end_ftr_fixup_test6;
335 extern unsigned int ftr_fixup_test6_expected;
336 int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
337
338 check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
339}
340
341static void test_cpu_macros(void)
342{
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000343 extern u8 ftr_fixup_test_FTR_macros;
344 extern u8 ftr_fixup_test_FTR_macros_expected;
Michael Ellerman362e7702008-06-24 11:33:03 +1000345 unsigned long size = &ftr_fixup_test_FTR_macros_expected -
346 &ftr_fixup_test_FTR_macros;
347
348 /* The fixups have already been done for us during boot */
349 check(memcmp(&ftr_fixup_test_FTR_macros,
350 &ftr_fixup_test_FTR_macros_expected, size) == 0);
351}
352
353static void test_fw_macros(void)
354{
355#ifdef CONFIG_PPC64
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000356 extern u8 ftr_fixup_test_FW_FTR_macros;
357 extern u8 ftr_fixup_test_FW_FTR_macros_expected;
Michael Ellerman362e7702008-06-24 11:33:03 +1000358 unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
359 &ftr_fixup_test_FW_FTR_macros;
360
361 /* The fixups have already been done for us during boot */
362 check(memcmp(&ftr_fixup_test_FW_FTR_macros,
363 &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
364#endif
365}
366
Kumar Gala2d1b2022008-07-02 01:16:40 +1000367static void test_lwsync_macros(void)
368{
Stephen Rothwell3880ecb2010-06-28 21:08:29 +0000369 extern u8 lwsync_fixup_test;
370 extern u8 end_lwsync_fixup_test;
371 extern u8 lwsync_fixup_test_expected_LWSYNC;
372 extern u8 lwsync_fixup_test_expected_SYNC;
Kumar Gala2d1b2022008-07-02 01:16:40 +1000373 unsigned long size = &end_lwsync_fixup_test -
374 &lwsync_fixup_test;
375
376 /* The fixups have already been done for us during boot */
377 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
378 check(memcmp(&lwsync_fixup_test,
379 &lwsync_fixup_test_expected_LWSYNC, size) == 0);
380 } else {
381 check(memcmp(&lwsync_fixup_test,
382 &lwsync_fixup_test_expected_SYNC, size) == 0);
383 }
384}
385
Michael Ellerman362e7702008-06-24 11:33:03 +1000386static int __init test_feature_fixups(void)
387{
388 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
389
390 test_basic_patching();
391 test_alternative_patching();
392 test_alternative_case_too_big();
393 test_alternative_case_too_small();
394 test_alternative_case_with_branch();
395 test_alternative_case_with_external_branch();
396 test_cpu_macros();
397 test_fw_macros();
Kumar Gala2d1b2022008-07-02 01:16:40 +1000398 test_lwsync_macros();
Michael Ellerman362e7702008-06-24 11:33:03 +1000399
400 return 0;
401}
402late_initcall(test_feature_fixups);
403
404#endif /* CONFIG_FTR_FIXUP_SELFTEST */