blob: 3d07ddd11e3b2fdd4764991922bf141c032b3942 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC memory management structures
3 *
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
5 * PPC64 rework.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _PPC64_MMU_H_
14#define _PPC64_MMU_H_
15
16#include <linux/config.h>
17#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
David Gibson1f8d4192005-05-05 16:15:13 -070019/*
20 * Segment table
21 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#define STE_ESID_V 0x80
24#define STE_ESID_KS 0x20
25#define STE_ESID_KP 0x10
26#define STE_ESID_N 0x08
27
28#define STE_VSID_SHIFT 12
29
David Gibson1f8d4192005-05-05 16:15:13 -070030/* Location of cpu0's segment table */
31#define STAB0_PAGE 0x9
32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
33#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
David Gibson1f8d4192005-05-05 16:15:13 -070035/*
36 * SLB
37 */
38
39#define SLB_NUM_BOLTED 3
40#define SLB_CACHE_ENTRIES 8
41
42/* Bits in the SLB ESID word */
43#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
44
45/* Bits in the SLB VSID word */
46#define SLB_VSID_SHIFT 12
47#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
48#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
49#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
Arnd Bergmannfef1c772005-06-23 09:43:37 +100050#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */
David Gibson1f8d4192005-05-05 16:15:13 -070051#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
Arnd Bergmannfef1c772005-06-23 09:43:37 +100052#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
53
David Gibson1f8d4192005-05-05 16:15:13 -070054#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
55#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
56
57/*
58 * Hash table
59 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61#define HPTES_PER_GROUP 8
62
David Gibson96e28442005-07-13 01:11:42 -070063#define HPTE_V_AVPN_SHIFT 7
64#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
65#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
66#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
67#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
68#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
69#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
70#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
71
72#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
73#define HPTE_R_TS ASM_CONST(0x4000000000000000)
74#define HPTE_R_RPN_SHIFT 12
75#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
76#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
77#define HPTE_R_PP ASM_CONST(0x0000000000000003)
78
David Gibson1f8d4192005-05-05 16:15:13 -070079/* Values for PP (assumes Ks=0, Kp=1) */
80/* pp0 will always be 0 for linux */
81#define PP_RWXX 0 /* Supervisor read/write, User none */
82#define PP_RWRX 1 /* Supervisor read/write, User read */
83#define PP_RWRW 2 /* Supervisor read/write, User read/write */
84#define PP_RXRX 3 /* Supervisor read, User read */
85
86#ifndef __ASSEMBLY__
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088typedef struct {
David Gibson96e28442005-07-13 01:11:42 -070089 unsigned long v;
90 unsigned long r;
91} hpte_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
David Gibson96e28442005-07-13 01:11:42 -070093extern hpte_t *htab_address;
94extern unsigned long htab_hash_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static inline unsigned long hpt_hash(unsigned long vpn, int large)
97{
98 unsigned long vsid;
99 unsigned long page;
100
101 if (large) {
102 vsid = vpn >> 4;
103 page = vpn & 0xf;
104 } else {
105 vsid = vpn >> 16;
106 page = vpn & 0xffff;
107 }
108
109 return (vsid & 0x7fffffffffUL) ^ page;
110}
111
112static inline void __tlbie(unsigned long va, int large)
113{
114 /* clear top 16 bits, non SLS segment */
115 va &= ~(0xffffULL << 48);
116
117 if (large) {
118 va &= HPAGE_MASK;
119 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
120 } else {
121 va &= PAGE_MASK;
122 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
123 }
124}
125
126static inline void tlbie(unsigned long va, int large)
127{
128 asm volatile("ptesync": : :"memory");
129 __tlbie(va, large);
130 asm volatile("eieio; tlbsync; ptesync": : :"memory");
131}
132
133static inline void __tlbiel(unsigned long va)
134{
135 /* clear top 16 bits, non SLS segment */
136 va &= ~(0xffffULL << 48);
137 va &= PAGE_MASK;
138
139 /*
140 * Thanks to Alan Modra we are now able to use machine specific
141 * assembly instructions (like tlbiel) by using the gas -many flag.
142 * However we have to support older toolchains so for the moment
143 * we hardwire it.
144 */
145#if 0
146 asm volatile("tlbiel %0" : : "r"(va) : "memory");
147#else
148 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
149#endif
150}
151
152static inline void tlbiel(unsigned long va)
153{
154 asm volatile("ptesync": : :"memory");
155 __tlbiel(va);
156 asm volatile("ptesync": : :"memory");
157}
158
David Gibson96e28442005-07-13 01:11:42 -0700159static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
R Sharadaf4c82d52005-06-25 14:58:08 -0700160{
David Gibson96e28442005-07-13 01:11:42 -0700161 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
R Sharadaf4c82d52005-06-25 14:58:08 -0700162 unsigned long va;
163
164 va = avpn << 23;
165
David Gibson96e28442005-07-13 01:11:42 -0700166 if (! (hpte_v & HPTE_V_LARGE)) {
R Sharadaf4c82d52005-06-25 14:58:08 -0700167 unsigned long vpi, pteg;
168
169 pteg = slot / HPTES_PER_GROUP;
David Gibson96e28442005-07-13 01:11:42 -0700170 if (hpte_v & HPTE_V_SECONDARY)
R Sharadaf4c82d52005-06-25 14:58:08 -0700171 pteg = ~pteg;
172
173 vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
174
175 va |= vpi << PAGE_SHIFT;
176 }
177
178 return va;
179}
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
182 * Handle a fault by adding an HPTE. If the address can't be determined
183 * to be valid via Linux page tables, return 1. If handled return 0
184 */
185extern int __hash_page(unsigned long ea, unsigned long access,
186 unsigned long vsid, pte_t *ptep, unsigned long trap,
187 int local);
188
189extern void htab_finish_init(void);
190
David Gibson1f8d4192005-05-05 16:15:13 -0700191extern void hpte_init_native(void);
192extern void hpte_init_lpar(void);
193extern void hpte_init_iSeries(void);
194
195extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
196 unsigned long va, unsigned long prpn,
David Gibson96e28442005-07-13 01:11:42 -0700197 unsigned long vflags,
198 unsigned long rflags);
David Gibson1f8d4192005-05-05 16:15:13 -0700199extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
David Gibson96e28442005-07-13 01:11:42 -0700200 unsigned long prpn,
201 unsigned long vflags, unsigned long rflags);
David Gibson1f8d4192005-05-05 16:15:13 -0700202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203#endif /* __ASSEMBLY__ */
204
205/*
David Gibson1f8d4192005-05-05 16:15:13 -0700206 * VSID allocation
207 *
208 * We first generate a 36-bit "proto-VSID". For kernel addresses this
209 * is equal to the ESID, for user addresses it is:
210 * (context << 15) | (esid & 0x7fff)
211 *
212 * The two forms are distinguishable because the top bit is 0 for user
213 * addresses, whereas the top two bits are 1 for kernel addresses.
214 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
215 * now.
216 *
217 * The proto-VSIDs are then scrambled into real VSIDs with the
218 * multiplicative hash:
219 *
220 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
221 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
222 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
223 *
224 * This scramble is only well defined for proto-VSIDs below
225 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
226 * reserved. VSID_MULTIPLIER is prime, so in particular it is
227 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
228 * Because the modulus is 2^n-1 we can compute it efficiently without
229 * a divide or extra multiply (see below).
230 *
231 * This scheme has several advantages over older methods:
232 *
233 * - We have VSIDs allocated for every kernel address
234 * (i.e. everything above 0xC000000000000000), except the very top
235 * segment, which simplifies several things.
236 *
237 * - We allow for 15 significant bits of ESID and 20 bits of
238 * context for user addresses. i.e. 8T (43 bits) of address space for
239 * up to 1M contexts (although the page table structure and context
240 * allocation will need changes to take advantage of this).
241 *
242 * - The scramble function gives robust scattering in the hash
243 * table (at least based on some initial results). The previous
244 * method was more susceptible to pathological cases giving excessive
245 * hash collisions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 */
David Gibson1f8d4192005-05-05 16:15:13 -0700247/*
248 * WARNING - If you change these you must make sure the asm
249 * implementations in slb_allocate (slb_low.S), do_stab_bolted
250 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
251 *
252 * You'll also need to change the precomputed VSID values in head.S
253 * which are used by the iSeries firmware.
254 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
257#define VSID_BITS 36
258#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
259
260#define CONTEXT_BITS 20
261#define USER_ESID_BITS 15
262
263/*
264 * This macro generates asm code to compute the VSID scramble
265 * function. Used in slb_allocate() and do_stab_bolted. The function
266 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
267 *
268 * rt = register continaing the proto-VSID and into which the
269 * VSID will be stored
270 * rx = scratch register (clobbered)
271 *
272 * - rt and rx must be different registers
273 * - The answer will end up in the low 36 bits of rt. The higher
274 * bits may contain other garbage, so you may need to mask the
275 * result.
276 */
277#define ASM_VSID_SCRAMBLE(rt, rx) \
278 lis rx,VSID_MULTIPLIER@h; \
279 ori rx,rx,VSID_MULTIPLIER@l; \
280 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
281 \
282 srdi rx,rt,VSID_BITS; \
283 clrldi rt,rt,(64-VSID_BITS); \
284 add rt,rt,rx; /* add high and low bits */ \
285 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
286 * 2^36-1+2^28-1. That in particular means that if r3 >= \
287 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
288 * the bit clear, r3 already has the answer we want, if it \
289 * doesn't, the answer is the low 36 bits of r3+1. So in all \
290 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
291 addi rx,rt,1; \
292 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
293 add rt,rt,rx
294
David Gibson1f8d4192005-05-05 16:15:13 -0700295
296#ifndef __ASSEMBLY__
297
298typedef unsigned long mm_context_id_t;
299
300typedef struct {
301 mm_context_id_t id;
302#ifdef CONFIG_HUGETLB_PAGE
303 pgd_t *huge_pgdir;
304 u16 htlb_segs; /* bitmask */
305#endif
306} mm_context_t;
307
308
309static inline unsigned long vsid_scramble(unsigned long protovsid)
310{
311#if 0
312 /* The code below is equivalent to this function for arguments
313 * < 2^VSID_BITS, which is all this should ever be called
314 * with. However gcc is not clever enough to compute the
315 * modulus (2^n-1) without a second multiply. */
316 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
317#else /* 1 */
318 unsigned long x;
319
320 x = protovsid * VSID_MULTIPLIER;
321 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
322 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
323#endif /* 1 */
324}
325
326/* This is only valid for addresses >= KERNELBASE */
327static inline unsigned long get_kernel_vsid(unsigned long ea)
328{
329 return vsid_scramble(ea >> SID_SHIFT);
330}
331
332/* This is only valid for user addresses (which are below 2^41) */
333static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
334{
335 return vsid_scramble((context << USER_ESID_BITS)
336 | (ea >> SID_SHIFT));
337}
338
339#endif /* __ASSEMBLY */
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#endif /* _PPC64_MMU_H_ */