blob: 4bee1cfa9dea553b49a50f513fb8f768fdf61640 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110017#undef DEBUG
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/pgtable.h>
20#include <asm/mmu.h>
21#include <asm/mmu_context.h>
22#include <asm/paca.h>
23#include <asm/cputable.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110024#include <asm/cacheflush.h>
Michael Neuling2f6093c2006-08-07 16:19:19 +100025#include <asm/smp.h>
Stephen Rothwell56291e12006-11-14 12:57:38 +110026#include <asm/firmware.h>
Michael Neuling2f6093c2006-08-07 16:19:19 +100027#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110029#ifdef DEBUG
30#define DBG(fmt...) udbg_printf(fmt)
31#else
32#define DBG(fmt...)
33#endif
34
35extern void slb_allocate_realmode(unsigned long ea);
36extern void slb_allocate_user(unsigned long ea);
37
38static void slb_allocate(unsigned long ea)
39{
40 /* Currently, we do real mode for all SLBs including user, but
41 * that will change if we bring back dynamic VSIDs
42 */
43 slb_allocate_realmode(ea);
44}
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
47{
48 return (ea & ESID_MASK) | SLB_ESID_V | slot;
49}
50
51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
52{
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
54}
55
Michael Neuling67439b72007-08-03 11:55:39 +100056static inline void slb_shadow_update(unsigned long ea,
57 unsigned long flags,
Michael Neuling2f6093c2006-08-07 16:19:19 +100058 unsigned long entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Michael Neuling2f6093c2006-08-07 16:19:19 +100060 /*
61 * Clear the ESID first so the entry is not valid while we are
Michael Neuling00efee72007-08-24 16:58:37 +100062 * updating it. No write barriers are needed here, provided
63 * we only update the current CPU's SLB shadow buffer.
Michael Neuling2f6093c2006-08-07 16:19:19 +100064 */
65 get_slb_shadow()->save_area[entry].esid = 0;
Michael Neuling67439b72007-08-03 11:55:39 +100066 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
Michael Neuling67439b72007-08-03 11:55:39 +100067 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
Michael Neuling2f6093c2006-08-07 16:19:19 +100068}
69
Paul Mackerrasedd06222007-08-10 21:04:07 +100070static inline void slb_shadow_clear(unsigned long entry)
Michael Neuling2f6093c2006-08-07 16:19:19 +100071{
Paul Mackerrasedd06222007-08-10 21:04:07 +100072 get_slb_shadow()->save_area[entry].esid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Paul Mackerras175587c2007-08-25 13:14:28 +100075static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
76 unsigned long entry)
77{
78 /*
79 * Updating the shadow buffer before writing the SLB ensures
80 * we don't get a stale entry here if we get preempted by PHYP
81 * between these two statements.
82 */
83 slb_shadow_update(ea, flags, entry);
84
85 asm volatile("slbmte %0,%1" :
86 : "r" (mk_vsid_data(ea, flags)),
87 "r" (mk_esid_data(ea, entry))
88 : "memory" );
89}
90
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100091void slb_flush_and_rebolt(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
93 /* If you change this make sure you change SLB_NUM_BOLTED
94 * appropriately too. */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100095 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 unsigned long ksp_esid_data;
97
98 WARN_ON(!irqs_disabled());
99
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100100 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000101 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100102 lflags = SLB_VSID_KERNEL | linear_llp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000103 vflags = SLB_VSID_KERNEL | vmalloc_llp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
Paul Mackerrasedd06222007-08-10 21:04:07 +1000106 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ksp_esid_data &= ~SLB_ESID_V;
Paul Mackerrasedd06222007-08-10 21:04:07 +1000108 slb_shadow_clear(2);
109 } else {
110 /* Update stack entry; others don't change */
111 slb_shadow_update(get_paca()->kstack, lflags, 2);
112 }
Michael Neuling2f6093c2006-08-07 16:19:19 +1000113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 /* We need to do this all in asm, so we're sure we don't touch
115 * the stack between the slbia and rebolting it. */
116 asm volatile("isync\n"
117 "slbia\n"
118 /* Slot 1 - first VMALLOC segment */
119 "slbmte %0,%1\n"
120 /* Slot 2 - kernel stack */
121 "slbmte %2,%3\n"
122 "isync"
David Gibson14c89e72005-12-14 16:08:40 +1100123 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
124 "r"(mk_esid_data(VMALLOC_START, 1)),
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100125 "r"(mk_vsid_data(ksp_esid_data, lflags)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 "r"(ksp_esid_data)
127 : "memory");
128}
129
Michael Neuling67439b72007-08-03 11:55:39 +1000130void slb_vmalloc_update(void)
131{
132 unsigned long vflags;
133
134 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
135 slb_shadow_update(VMALLOC_START, vflags, 1);
136 slb_flush_and_rebolt();
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/* Flush all user entries from the segment table of the current processor. */
140void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
141{
142 unsigned long offset = get_paca()->slb_cache_ptr;
143 unsigned long esid_data = 0;
144 unsigned long pc = KSTK_EIP(tsk);
145 unsigned long stack = KSTK_ESP(tsk);
146 unsigned long unmapped_base;
147
148 if (offset <= SLB_CACHE_ENTRIES) {
149 int i;
150 asm volatile("isync" : : : "memory");
151 for (i = 0; i < offset; i++) {
David Gibson14b34662005-09-06 14:59:47 +1000152 esid_data = ((unsigned long)get_paca()->slb_cache[i]
153 << SID_SHIFT) | SLBIE_C;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 asm volatile("slbie %0" : : "r" (esid_data));
155 }
156 asm volatile("isync" : : : "memory");
157 } else {
158 slb_flush_and_rebolt();
159 }
160
161 /* Workaround POWER5 < DD2.1 issue */
162 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
163 asm volatile("slbie %0" : : "r" (esid_data));
164
165 get_paca()->slb_cache_ptr = 0;
166 get_paca()->context = mm->context;
167
168 /*
169 * preload some userspace segments into the SLB.
170 */
171 if (test_tsk_thread_flag(tsk, TIF_32BIT))
172 unmapped_base = TASK_UNMAPPED_BASE_USER32;
173 else
174 unmapped_base = TASK_UNMAPPED_BASE_USER64;
175
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100176 if (is_kernel_addr(pc))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 return;
178 slb_allocate(pc);
179
180 if (GET_ESID(pc) == GET_ESID(stack))
181 return;
182
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100183 if (is_kernel_addr(stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 return;
185 slb_allocate(stack);
186
187 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
188 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
189 return;
190
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100191 if (is_kernel_addr(unmapped_base))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return;
193 slb_allocate(unmapped_base);
194}
195
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100196static inline void patch_slb_encoding(unsigned int *insn_addr,
197 unsigned int immed)
198{
199 /* Assume the instruction had a "0" immediate value, just
200 * "or" in the new value
201 */
202 *insn_addr |= immed;
203 flush_icache_range((unsigned long)insn_addr, 4+
204 (unsigned long)insn_addr);
205}
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207void slb_initialize(void)
208{
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000209 unsigned long linear_llp, vmalloc_llp, io_llp;
Stephen Rothwell56291e12006-11-14 12:57:38 +1100210 unsigned long lflags, vflags;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100211 static int slb_encoding_inited;
212 extern unsigned int *slb_miss_kernel_load_linear;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000213 extern unsigned int *slb_miss_kernel_load_io;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100214
215 /* Prepare our SLB miss handler based on our page size */
216 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000217 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
218 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
219 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
220
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100221 if (!slb_encoding_inited) {
222 slb_encoding_inited = 1;
223 patch_slb_encoding(slb_miss_kernel_load_linear,
224 SLB_VSID_KERNEL | linear_llp);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000225 patch_slb_encoding(slb_miss_kernel_load_io,
226 SLB_VSID_KERNEL | io_llp);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100227
228 DBG("SLB: linear LLP = %04x\n", linear_llp);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000229 DBG("SLB: io LLP = %04x\n", io_llp);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100230 }
231
Stephen Rothwell56291e12006-11-14 12:57:38 +1100232 get_paca()->stab_rr = SLB_NUM_BOLTED;
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 /* On iSeries the bolted entries have already been set up by
235 * the hypervisor from the lparMap data in head.S */
Stephen Rothwell56291e12006-11-14 12:57:38 +1100236 if (firmware_has_feature(FW_FEATURE_ISERIES))
237 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100239 lflags = SLB_VSID_KERNEL | linear_llp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000240 vflags = SLB_VSID_KERNEL | vmalloc_llp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100242 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
Paul Mackerras175587c2007-08-25 13:14:28 +1000243 asm volatile("isync":::"memory");
244 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
245 asm volatile("isync; slbia; isync":::"memory");
246 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100247
Paul Mackerras175587c2007-08-25 13:14:28 +1000248 create_shadowed_slbe(VMALLOC_START, vflags, 1);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100249
Paul Mackerras175587c2007-08-25 13:14:28 +1000250 /* We don't bolt the stack for the time being - we're in boot,
251 * so the stack is in the bolted segment. By the time it goes
252 * elsewhere, we'll call _switch() which will bolt in the new
253 * one. */
254 asm volatile("isync":::"memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}