blob: f6fd5d2ff10deb14d2e9741c30c2eda40f338af8 [file] [log] [blame]
Michael Ellerman51c52e82008-06-24 11:32:36 +10001/*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
7 * Copyright 2008 Michael Ellerman, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <asm/cputable.h>
17#include <asm/code-patching.h>
18
19
20struct fixup_entry {
21 unsigned long mask;
22 unsigned long value;
23 long start_off;
24 long end_off;
25};
26
27static void patch_feature_section(unsigned long value, struct fixup_entry *fcur)
28{
29 unsigned int *pstart, *pend, *p;
30
31 if ((value & fcur->mask) == fcur->value)
32 return;
33
34 pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
35 pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
36
37 for (p = pstart; p < pend; p++) {
38 *p = PPC_NOP_INSTR;
39 asm volatile ("dcbst 0, %0" : : "r" (p));
40 }
41 asm volatile ("sync" : : : "memory");
42 for (p = pstart; p < pend; p++)
43 asm volatile ("icbi 0,%0" : : "r" (p));
44 asm volatile ("sync; isync" : : : "memory");
45}
46
47void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
48{
49 struct fixup_entry *fcur, *fend;
50
51 fcur = fixup_start;
52 fend = fixup_end;
53
54 for (; fcur < fend; fcur++)
55 patch_feature_section(value, fcur);
56}