blob: d2b0b796d35e14376488a0ca4fc3bea51484ee2e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC memory management structures
3 *
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
5 * PPC64 rework.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _PPC64_MMU_H_
14#define _PPC64_MMU_H_
15
16#include <linux/config.h>
Kumar Gala5f7c6902005-09-09 15:02:25 -050017#include <asm/ppc_asm.h> /* for ASM_CONST */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
David Gibson1f8d4192005-05-05 16:15:13 -070020/*
21 * Segment table
22 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#define STE_ESID_V 0x80
25#define STE_ESID_KS 0x20
26#define STE_ESID_KP 0x10
27#define STE_ESID_N 0x08
28
29#define STE_VSID_SHIFT 12
30
David Gibson1f8d4192005-05-05 16:15:13 -070031/* Location of cpu0's segment table */
David Gibsonc59c4642005-08-19 14:52:31 +100032#define STAB0_PAGE 0x6
Olof Johansson637a6ff2005-09-20 13:47:41 +100033#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
David Gibsonc59c4642005-08-19 14:52:31 +100034
35#ifndef __ASSEMBLY__
36extern char initial_stab[];
37#endif /* ! __ASSEMBLY */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
David Gibson1f8d4192005-05-05 16:15:13 -070039/*
40 * SLB
41 */
42
43#define SLB_NUM_BOLTED 3
44#define SLB_CACHE_ENTRIES 8
45
46/* Bits in the SLB ESID word */
47#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
48
49/* Bits in the SLB VSID word */
50#define SLB_VSID_SHIFT 12
51#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
52#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
53#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
Arnd Bergmannfef1c772005-06-23 09:43:37 +100054#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */
David Gibson1f8d4192005-05-05 16:15:13 -070055#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
Arnd Bergmannfef1c772005-06-23 09:43:37 +100056#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
57
David Gibson14b34662005-09-06 14:59:47 +100058#define SLB_VSID_KERNEL (SLB_VSID_KP)
59#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60
61#define SLBIE_C (0x08000000)
David Gibson1f8d4192005-05-05 16:15:13 -070062
63/*
64 * Hash table
65 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67#define HPTES_PER_GROUP 8
68
David Gibson96e28442005-07-13 01:11:42 -070069#define HPTE_V_AVPN_SHIFT 7
70#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
71#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
72#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
73#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
74#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
75#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
76#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
77
78#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
79#define HPTE_R_TS ASM_CONST(0x4000000000000000)
80#define HPTE_R_RPN_SHIFT 12
81#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
82#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
83#define HPTE_R_PP ASM_CONST(0x0000000000000003)
84
David Gibson1f8d4192005-05-05 16:15:13 -070085/* Values for PP (assumes Ks=0, Kp=1) */
86/* pp0 will always be 0 for linux */
87#define PP_RWXX 0 /* Supervisor read/write, User none */
88#define PP_RWRX 1 /* Supervisor read/write, User read */
89#define PP_RWRW 2 /* Supervisor read/write, User read/write */
90#define PP_RXRX 3 /* Supervisor read, User read */
91
92#ifndef __ASSEMBLY__
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094typedef struct {
David Gibson96e28442005-07-13 01:11:42 -070095 unsigned long v;
96 unsigned long r;
97} hpte_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
David Gibson96e28442005-07-13 01:11:42 -070099extern hpte_t *htab_address;
100extern unsigned long htab_hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102static inline unsigned long hpt_hash(unsigned long vpn, int large)
103{
104 unsigned long vsid;
105 unsigned long page;
106
107 if (large) {
108 vsid = vpn >> 4;
109 page = vpn & 0xf;
110 } else {
111 vsid = vpn >> 16;
112 page = vpn & 0xffff;
113 }
114
115 return (vsid & 0x7fffffffffUL) ^ page;
116}
117
118static inline void __tlbie(unsigned long va, int large)
119{
120 /* clear top 16 bits, non SLS segment */
121 va &= ~(0xffffULL << 48);
122
123 if (large) {
124 va &= HPAGE_MASK;
125 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
126 } else {
127 va &= PAGE_MASK;
128 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
129 }
130}
131
132static inline void tlbie(unsigned long va, int large)
133{
134 asm volatile("ptesync": : :"memory");
135 __tlbie(va, large);
136 asm volatile("eieio; tlbsync; ptesync": : :"memory");
137}
138
139static inline void __tlbiel(unsigned long va)
140{
141 /* clear top 16 bits, non SLS segment */
142 va &= ~(0xffffULL << 48);
143 va &= PAGE_MASK;
144
145 /*
146 * Thanks to Alan Modra we are now able to use machine specific
147 * assembly instructions (like tlbiel) by using the gas -many flag.
148 * However we have to support older toolchains so for the moment
149 * we hardwire it.
150 */
151#if 0
152 asm volatile("tlbiel %0" : : "r"(va) : "memory");
153#else
154 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
155#endif
156}
157
158static inline void tlbiel(unsigned long va)
159{
160 asm volatile("ptesync": : :"memory");
161 __tlbiel(va);
162 asm volatile("ptesync": : :"memory");
163}
164
David Gibson96e28442005-07-13 01:11:42 -0700165static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
R Sharadaf4c82d52005-06-25 14:58:08 -0700166{
David Gibson96e28442005-07-13 01:11:42 -0700167 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
R Sharadaf4c82d52005-06-25 14:58:08 -0700168 unsigned long va;
169
170 va = avpn << 23;
171
David Gibson96e28442005-07-13 01:11:42 -0700172 if (! (hpte_v & HPTE_V_LARGE)) {
R Sharadaf4c82d52005-06-25 14:58:08 -0700173 unsigned long vpi, pteg;
174
175 pteg = slot / HPTES_PER_GROUP;
David Gibson96e28442005-07-13 01:11:42 -0700176 if (hpte_v & HPTE_V_SECONDARY)
R Sharadaf4c82d52005-06-25 14:58:08 -0700177 pteg = ~pteg;
178
179 vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
180
181 va |= vpi << PAGE_SHIFT;
182 }
183
184 return va;
185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
188 * Handle a fault by adding an HPTE. If the address can't be determined
189 * to be valid via Linux page tables, return 1. If handled return 0
190 */
191extern int __hash_page(unsigned long ea, unsigned long access,
192 unsigned long vsid, pte_t *ptep, unsigned long trap,
193 int local);
194
195extern void htab_finish_init(void);
196
David Gibson1f8d4192005-05-05 16:15:13 -0700197extern void hpte_init_native(void);
198extern void hpte_init_lpar(void);
199extern void hpte_init_iSeries(void);
200
201extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
202 unsigned long va, unsigned long prpn,
David Gibson96e28442005-07-13 01:11:42 -0700203 unsigned long vflags,
204 unsigned long rflags);
David Gibson1f8d4192005-05-05 16:15:13 -0700205extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
David Gibson96e28442005-07-13 01:11:42 -0700206 unsigned long prpn,
207 unsigned long vflags, unsigned long rflags);
David Gibson1f8d4192005-05-05 16:15:13 -0700208
David Gibson533f0812005-07-27 11:44:19 -0700209extern void stabs_alloc(void);
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211#endif /* __ASSEMBLY__ */
212
213/*
David Gibson1f8d4192005-05-05 16:15:13 -0700214 * VSID allocation
215 *
216 * We first generate a 36-bit "proto-VSID". For kernel addresses this
217 * is equal to the ESID, for user addresses it is:
218 * (context << 15) | (esid & 0x7fff)
219 *
220 * The two forms are distinguishable because the top bit is 0 for user
221 * addresses, whereas the top two bits are 1 for kernel addresses.
222 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
223 * now.
224 *
225 * The proto-VSIDs are then scrambled into real VSIDs with the
226 * multiplicative hash:
227 *
228 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
229 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
230 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
231 *
232 * This scramble is only well defined for proto-VSIDs below
233 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
234 * reserved. VSID_MULTIPLIER is prime, so in particular it is
235 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
236 * Because the modulus is 2^n-1 we can compute it efficiently without
237 * a divide or extra multiply (see below).
238 *
239 * This scheme has several advantages over older methods:
240 *
241 * - We have VSIDs allocated for every kernel address
242 * (i.e. everything above 0xC000000000000000), except the very top
243 * segment, which simplifies several things.
244 *
245 * - We allow for 15 significant bits of ESID and 20 bits of
246 * context for user addresses. i.e. 8T (43 bits) of address space for
247 * up to 1M contexts (although the page table structure and context
248 * allocation will need changes to take advantage of this).
249 *
250 * - The scramble function gives robust scattering in the hash
251 * table (at least based on some initial results). The previous
252 * method was more susceptible to pathological cases giving excessive
253 * hash collisions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 */
David Gibson1f8d4192005-05-05 16:15:13 -0700255/*
256 * WARNING - If you change these you must make sure the asm
257 * implementations in slb_allocate (slb_low.S), do_stab_bolted
258 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
259 *
260 * You'll also need to change the precomputed VSID values in head.S
261 * which are used by the iSeries firmware.
262 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
265#define VSID_BITS 36
266#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
267
David Gibsone28f7fa2005-08-05 19:39:06 +1000268#define CONTEXT_BITS 19
269#define USER_ESID_BITS 16
270
271#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273/*
274 * This macro generates asm code to compute the VSID scramble
275 * function. Used in slb_allocate() and do_stab_bolted. The function
276 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
277 *
278 * rt = register continaing the proto-VSID and into which the
279 * VSID will be stored
280 * rx = scratch register (clobbered)
281 *
282 * - rt and rx must be different registers
283 * - The answer will end up in the low 36 bits of rt. The higher
284 * bits may contain other garbage, so you may need to mask the
285 * result.
286 */
287#define ASM_VSID_SCRAMBLE(rt, rx) \
288 lis rx,VSID_MULTIPLIER@h; \
289 ori rx,rx,VSID_MULTIPLIER@l; \
290 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
291 \
292 srdi rx,rt,VSID_BITS; \
293 clrldi rt,rt,(64-VSID_BITS); \
294 add rt,rt,rx; /* add high and low bits */ \
295 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
296 * 2^36-1+2^28-1. That in particular means that if r3 >= \
297 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
298 * the bit clear, r3 already has the answer we want, if it \
299 * doesn't, the answer is the low 36 bits of r3+1. So in all \
300 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
301 addi rx,rt,1; \
302 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
303 add rt,rt,rx
304
David Gibson1f8d4192005-05-05 16:15:13 -0700305
306#ifndef __ASSEMBLY__
307
308typedef unsigned long mm_context_id_t;
309
310typedef struct {
311 mm_context_id_t id;
312#ifdef CONFIG_HUGETLB_PAGE
David Gibsonc594ada2005-08-11 16:55:21 +1000313 u16 low_htlb_areas, high_htlb_areas;
David Gibson1f8d4192005-05-05 16:15:13 -0700314#endif
315} mm_context_t;
316
317
318static inline unsigned long vsid_scramble(unsigned long protovsid)
319{
320#if 0
321 /* The code below is equivalent to this function for arguments
322 * < 2^VSID_BITS, which is all this should ever be called
323 * with. However gcc is not clever enough to compute the
324 * modulus (2^n-1) without a second multiply. */
325 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
326#else /* 1 */
327 unsigned long x;
328
329 x = protovsid * VSID_MULTIPLIER;
330 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
331 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
332#endif /* 1 */
333}
334
335/* This is only valid for addresses >= KERNELBASE */
336static inline unsigned long get_kernel_vsid(unsigned long ea)
337{
338 return vsid_scramble(ea >> SID_SHIFT);
339}
340
341/* This is only valid for user addresses (which are below 2^41) */
342static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
343{
344 return vsid_scramble((context << USER_ESID_BITS)
345 | (ea >> SID_SHIFT));
346}
347
David Gibson488f8492005-07-27 11:44:21 -0700348#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
349#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
350
David Gibson1f8d4192005-05-05 16:15:13 -0700351#endif /* __ASSEMBLY */
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353#endif /* _PPC64_MMU_H_ */