blob: eeeacab548e655f79f2f0ea90c86d16dd2b47ffd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 Segment Translation Support.
3 *
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
6 *
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/pgtable.h>
16#include <asm/mmu.h>
17#include <asm/mmu_context.h>
18#include <asm/paca.h>
19#include <asm/cputable.h>
David Gibson533f0812005-07-27 11:44:19 -070020#include <asm/lmb.h>
21#include <asm/abs_addr.h>
Paul Mackerras799d6042005-11-10 13:37:51 +110022#include <asm/firmware.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
David Gibson1f8d4192005-05-05 16:15:13 -070024struct stab_entry {
25 unsigned long esid_data;
26 unsigned long vsid_data;
27};
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#define NR_STAB_CACHE_ENTRIES 8
30DEFINE_PER_CPU(long, stab_cache_ptr);
31DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
32
33/*
34 * Create a segment table entry for the given esid/vsid pair.
35 */
36static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
37{
38 unsigned long esid_data, vsid_data;
39 unsigned long entry, group, old_esid, castout_entry, i;
40 unsigned int global_entry;
41 struct stab_entry *ste, *castout_ste;
Michael Ellermanb5666f72005-12-05 10:24:33 -060042 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 vsid_data = vsid << STE_VSID_SHIFT;
45 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
46 if (! kernel_segment)
47 esid_data |= STE_ESID_KS;
48
49 /* Search the primary group first. */
50 global_entry = (esid & 0x1f) << 3;
51 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
52
53 /* Find an empty entry, if one exists. */
54 for (group = 0; group < 2; group++) {
55 for (entry = 0; entry < 8; entry++, ste++) {
56 if (!(ste->esid_data & STE_ESID_V)) {
57 ste->vsid_data = vsid_data;
58 asm volatile("eieio":::"memory");
59 ste->esid_data = esid_data;
60 return (global_entry | entry);
61 }
62 }
63 /* Now search the secondary group. */
64 global_entry = ((~esid) & 0x1f) << 3;
65 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
66 }
67
68 /*
69 * Could not find empty entry, pick one with a round robin selection.
70 * Search all entries in the two groups.
71 */
72 castout_entry = get_paca()->stab_rr;
73 for (i = 0; i < 16; i++) {
74 if (castout_entry < 8) {
75 global_entry = (esid & 0x1f) << 3;
76 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
77 castout_ste = ste + castout_entry;
78 } else {
79 global_entry = ((~esid) & 0x1f) << 3;
80 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
81 castout_ste = ste + (castout_entry - 8);
82 }
83
84 /* Dont cast out the first kernel segment */
Michael Ellermanb5666f72005-12-05 10:24:33 -060085 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 break;
87
88 castout_entry = (castout_entry + 1) & 0xf;
89 }
90
91 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
92
93 /* Modify the old entry to the new value. */
94
95 /* Force previous translations to complete. DRENG */
96 asm volatile("isync" : : : "memory");
97
98 old_esid = castout_ste->esid_data >> SID_SHIFT;
99 castout_ste->esid_data = 0; /* Invalidate old entry */
100
101 asm volatile("sync" : : : "memory"); /* Order update */
102
103 castout_ste->vsid_data = vsid_data;
104 asm volatile("eieio" : : : "memory"); /* Order update */
105 castout_ste->esid_data = esid_data;
106
107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
108 /* Ensure completion of slbie */
109 asm volatile("sync" : : : "memory");
110
111 return (global_entry | (castout_entry & 0x7));
112}
113
114/*
115 * Allocate a segment table entry for the given ea and mm
116 */
117static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
118{
119 unsigned long vsid;
120 unsigned char stab_entry;
121 unsigned long offset;
122
123 /* Kernel or user address? */
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100124 if (is_kernel_addr(ea)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 vsid = get_kernel_vsid(ea);
126 } else {
127 if ((ea >= TASK_SIZE_USER64) || (! mm))
128 return 1;
129
130 vsid = get_vsid(mm->context.id, ea);
131 }
132
133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
134
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100135 if (!is_kernel_addr(ea)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 offset = __get_cpu_var(stab_cache_ptr);
137 if (offset < NR_STAB_CACHE_ENTRIES)
138 __get_cpu_var(stab_cache[offset++]) = stab_entry;
139 else
140 offset = NR_STAB_CACHE_ENTRIES+1;
141 __get_cpu_var(stab_cache_ptr) = offset;
142
143 /* Order update */
144 asm volatile("sync":::"memory");
145 }
146
147 return 0;
148}
149
150int ste_allocate(unsigned long ea)
151{
152 return __ste_allocate(ea, current->mm);
153}
154
155/*
156 * Do the segment table work for a context switch: flush all user
157 * entries from the table, then preload some probably useful entries
158 * for the new task
159 */
160void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
161{
162 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
163 struct stab_entry *ste;
164 unsigned long offset = __get_cpu_var(stab_cache_ptr);
165 unsigned long pc = KSTK_EIP(tsk);
166 unsigned long stack = KSTK_ESP(tsk);
167 unsigned long unmapped_base;
168
169 /* Force previous translations to complete. DRENG */
170 asm volatile("isync" : : : "memory");
171
172 if (offset <= NR_STAB_CACHE_ENTRIES) {
173 int i;
174
175 for (i = 0; i < offset; i++) {
176 ste = stab + __get_cpu_var(stab_cache[i]);
177 ste->esid_data = 0; /* invalidate entry */
178 }
179 } else {
180 unsigned long entry;
181
182 /* Invalidate all entries. */
183 ste = stab;
184
185 /* Never flush the first entry. */
186 ste += 1;
187 for (entry = 1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100188 entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 entry++, ste++) {
190 unsigned long ea;
191 ea = ste->esid_data & ESID_MASK;
Michael Ellerman51fae6de2005-12-04 18:39:15 +1100192 if (!is_kernel_addr(ea)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 ste->esid_data = 0;
194 }
195 }
196 }
197
198 asm volatile("sync; slbia; sync":::"memory");
199
200 __get_cpu_var(stab_cache_ptr) = 0;
201
202 /* Now preload some entries for the new task */
203 if (test_tsk_thread_flag(tsk, TIF_32BIT))
204 unmapped_base = TASK_UNMAPPED_BASE_USER32;
205 else
206 unmapped_base = TASK_UNMAPPED_BASE_USER64;
207
208 __ste_allocate(pc, mm);
209
210 if (GET_ESID(pc) == GET_ESID(stack))
211 return;
212
213 __ste_allocate(stack, mm);
214
215 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
216 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
217 return;
218
219 __ste_allocate(unmapped_base, mm);
220
221 /* Order update */
222 asm volatile("sync" : : : "memory");
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/*
David Gibson533f0812005-07-27 11:44:19 -0700226 * Allocate segment tables for secondary CPUs. These must all go in
227 * the first (bolted) segment, so that do_stab_bolted won't get a
228 * recursive segment miss on the segment table itself.
229 */
230void stabs_alloc(void)
231{
232 int cpu;
233
234 if (cpu_has_feature(CPU_FTR_SLB))
235 return;
236
KAMEZAWA Hiroyuki0e551952006-03-28 14:50:51 -0800237 for_each_possible_cpu(cpu) {
David Gibson533f0812005-07-27 11:44:19 -0700238 unsigned long newstab;
239
240 if (cpu == 0)
241 continue; /* stab for CPU 0 is statically allocated */
242
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100243 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
244 1<<SID_SHIFT);
Michael Ellermanb5666f72005-12-05 10:24:33 -0600245 newstab = (unsigned long)__va(newstab);
David Gibson533f0812005-07-27 11:44:19 -0700246
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100247 memset((void *)newstab, 0, HW_PAGE_SIZE);
David Gibson533f0812005-07-27 11:44:19 -0700248
249 paca[cpu].stab_addr = newstab;
250 paca[cpu].stab_real = virt_to_abs(newstab);
Paul Mackerras799d6042005-11-10 13:37:51 +1100251 printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100252 "virtual, 0x%lx absolute\n",
253 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
David Gibson533f0812005-07-27 11:44:19 -0700254 }
255}
256
257/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 * Build an entry for the base kernel segment and put it into
259 * the segment table or SLB. All other segment table or SLB
260 * entries are faulted in.
261 */
262void stab_initialize(unsigned long stab)
263{
Michael Ellermanb5666f72005-12-05 10:24:33 -0600264 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
Paul Mackerras799d6042005-11-10 13:37:51 +1100265 unsigned long stabreal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100267 asm volatile("isync; slbia; isync":::"memory");
Michael Ellermanb5666f72005-12-05 10:24:33 -0600268 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100270 /* Order update */
271 asm volatile("sync":::"memory");
Paul Mackerras799d6042005-11-10 13:37:51 +1100272
273 /* Set ASR */
274 stabreal = get_paca()->stab_real | 0x1ul;
275
276#ifdef CONFIG_PPC_ISERIES
277 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
278 HvCall1(HvCallBaseSetASR, stabreal);
279 return;
280 }
281#endif /* CONFIG_PPC_ISERIES */
Olof Johanssonca507eaf2005-11-29 14:04:17 -0600282
Paul Mackerras799d6042005-11-10 13:37:51 +1100283 mtspr(SPRN_ASR, stabreal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}