blob: 17aa6dfceb3498cbb0a09302485bdf1ab8049cbf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Low-level SLB routines
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 *
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/ppc_asm.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020019#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/cputable.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110021#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/pgtable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100024#include <asm/firmware.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110026/* void slb_allocate_realmode(unsigned long ea);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 *
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
32 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110033_GLOBAL(slb_allocate_realmode)
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +000034 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110040
41 srdi r9,r3,60 /* get region */
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +000042 srdi r10,r3,SID_SHIFT /* get esid */
Michael Ellermanb5666f72005-12-05 10:24:33 -060043 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110044
Michael Ellermanb5666f72005-12-05 10:24:33 -060045 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110046 blt cr7,0f /* user or kernel? */
47
48 /* kernel address: proto-VSID = ESID */
49 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
50 * this code will generate the protoVSID 0xfffffffff for the
51 * top segment. That's ok, the scramble below will translate
52 * it to VSID 0, which is reserved as a bad VSID - one which
53 * will never have any pages in it. */
54
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100055 /* Check if hitting the linear mapping or some other kernel space
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110056 */
57 bne cr7,1f
58
59 /* Linear mapping encoding bits, the "li" instruction below will
60 * be patched by the kernel at boot
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110062_GLOBAL(slb_miss_kernel_load_linear)
63 li r11,0
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +000064 /*
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +000065 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
66 * r9 = region id.
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +000067 */
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +000068 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
Paul Mackerras1189be62007-10-11 20:37:10 +100072BEGIN_FTR_SECTION
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110073 b slb_finish_load
Matt Evans44ae3ab2011-04-06 19:48:50 +000074END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
Paul Mackerras1189be62007-10-11 20:37:10 +100075 b slb_finish_load_1T
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110076
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000771:
78#ifdef CONFIG_SPARSEMEM_VMEMMAP
79 /* Check virtual memmap region. To be patches at kernel boot */
80 cmpldi cr0,r9,0xf
81 bne 1f
82_GLOBAL(slb_miss_kernel_load_vmemmap)
83 li r11,0
84 b 6f
851:
86#endif /* CONFIG_SPARSEMEM_VMEMMAP */
87
Benjamin Herrenschmidt8d8997f2009-10-12 20:43:47 +000088 /* vmalloc mapping gets the encoding from the PACA as the mapping
89 * can be demoted from 64K -> 4K dynamically on some machines
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110090 */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100091 clrldi r11,r10,48
92 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
93 bgt 5f
94 lhz r11,PACAVMALLOCSLLP(r13)
Paul Mackerras1189be62007-10-11 20:37:10 +100095 b 6f
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000965:
Benjamin Herrenschmidt8d8997f2009-10-12 20:43:47 +000097 /* IO mapping */
98 _GLOBAL(slb_miss_kernel_load_io)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110099 li r11,0
Paul Mackerras1189be62007-10-11 20:37:10 +10001006:
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000101 /*
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
103 * r9 = region id.
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000104 */
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
Paul Mackerras1189be62007-10-11 20:37:10 +1000108BEGIN_FTR_SECTION
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100109 b slb_finish_load
Matt Evans44ae3ab2011-04-06 19:48:50 +0000110END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
Paul Mackerras1189be62007-10-11 20:37:10 +1000111 b slb_finish_load_1T
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100112
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +00001130:
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000114 /* when using slices, we extract the psize off the slice bitmaps
115 * and then we need to get the sllp encoding off the mmu_psize_defs
116 * array.
117 *
118 * XXX This is a bit inefficient especially for the normal case,
119 * so we should try to implement a fast path for the standard page
120 * size using the old sllp value so we avoid the array. We cannot
121 * really do dynamic patching unfortunately as processes might flip
122 * between 4k and 64k standard page size
123 */
124#ifdef CONFIG_PPC_MM_SLICES
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000125 /* r10 have esid */
David Gibson7d24f0b2005-11-07 00:57:52 -0800126 cmpldi r10,16
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000127 /* below SLICE_LOW_TOP */
David Gibson7d24f0b2005-11-07 00:57:52 -0800128 blt 5f
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000129 /*
130 * Handle hpsizes,
131 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
132 */
133 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
134 addi r9,r11,PACAHIGHSLICEPSIZE
135 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
136 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
137 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
138 b 6f
David Gibson7d24f0b2005-11-07 00:57:52 -0800139
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +00001405:
141 /*
142 * Handle lpsizes
143 * r9 is get_paca()->context.low_slices_psize, r11 is index
144 */
145 ld r9,PACALOWSLICESPSIZE(r13)
146 mr r11,r10
1476:
148 sldi r11,r11,2 /* index * 4 */
149 /* Extract the psize and multiply to get an array offset */
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000150 srd r9,r9,r11
151 andi. r9,r9,0xf
152 mulli r9,r9,MMUPSIZEDEFSIZE
David Gibson7d24f0b2005-11-07 00:57:52 -0800153
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000154 /* Now get to the array and obtain the sllp
155 */
156 ld r11,PACATOC(r13)
157 ld r11,mmu_psize_defs@got(r11)
158 add r11,r11,r9
159 ld r11,MMUPSIZESLLP(r11)
160 ori r11,r11,SLB_VSID_USER
161#else
162 /* paca context sllp already contains the SLB_VSID_USER bits */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000163 lhz r11,PACACONTEXTSLLP(r13)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000164#endif /* CONFIG_PPC_MM_SLICES */
165
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100166 ld r9,PACACONTEXTID(r13)
Paul Mackerras1189be62007-10-11 20:37:10 +1000167BEGIN_FTR_SECTION
168 cmpldi r10,0x1000
Paul Mackerras1189be62007-10-11 20:37:10 +1000169 bge slb_finish_load_1T
Matt Evans44ae3ab2011-04-06 19:48:50 +0000170END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100171 b slb_finish_load
172
1738: /* invalid EA */
174 li r10,0 /* BAD_VSID */
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000175 li r9,0 /* BAD_VSID */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load
178
179#ifdef __DISABLED__
180
181/* void slb_allocate_user(unsigned long ea);
182 *
183 * Create an SLB entry for the given EA (user or kernel).
184 * r3 = faulting address, r13 = PACA
185 * r9, r10, r11 are clobbered by this function
186 * No other registers are examined or changed.
187 *
188 * It is called with translation enabled in order to be able to walk the
189 * page tables. This is not currently used.
190 */
191_GLOBAL(slb_allocate_user)
192 /* r3 = faulting address */
193 srdi r10,r3,28 /* get esid */
194
195 crset 4*cr7+lt /* set "user" flag for later */
196
197 /* check if we fit in the range covered by the pagetables*/
198 srdi. r9,r3,PGTABLE_EADDR_SIZE
199 crnot 4*cr0+eq,4*cr0+eq
200 beqlr
201
202 /* now we need to get to the page tables in order to get the page
203 * size encoding from the PMD. In the future, we'll be able to deal
204 * with 1T segments too by getting the encoding from the PGD instead
205 */
206 ld r9,PACAPGDIR(r13)
207 cmpldi cr0,r9,0
208 beqlr
209 rlwinm r11,r10,8,25,28
210 ldx r9,r9,r11 /* get pgd_t */
211 cmpldi cr0,r9,0
212 beqlr
213 rlwinm r11,r10,3,17,28
214 ldx r9,r9,r11 /* get pmd_t */
215 cmpldi cr0,r9,0
216 beqlr
217
218 /* build vsid flags */
219 andi. r11,r9,SLB_VSID_LLP
220 ori r11,r11,SLB_VSID_USER
221
222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100224 /* fall through slb_finish_load */
225
226#endif /* __DISABLED__ */
227
228
229/*
230 * Finish loading of an SLB entry and return
231 *
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100233 */
234slb_finish_load:
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +0000235 rldimi r10,r9,ESID_BITS,0
Paul Mackerras1189be62007-10-11 20:37:10 +1000236 ASM_VSID_SCRAMBLE(r10,r9,256M)
Aneesh Kumar K.Vac8dc282012-09-10 02:52:53 +0000237 /*
238 * bits above VSID_BITS_256M need to be ignored from r10
239 * also combine VSID and flags
240 */
241 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100242
243 /* r3 = EA, r11 = VSID data */
244 /*
245 * Find a slot, round robin. Previously we tried to find a
246 * free slot first but that took too long. Unfortunately we
247 * dont have any LRU information to help us choose a slot.
248 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Paul Mackerras1189be62007-10-11 20:37:10 +10002507: ld r10,PACASTABRR(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 addi r10,r10,1
Michael Neuling584f8b72007-12-06 17:24:48 +1100252 /* This gets soft patched on boot. */
253_GLOBAL(slb_compare_rr_to_size)
254 cmpldi r10,0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 blt+ 4f
257 li r10,SLB_NUM_BOLTED
258
2594:
260 std r10,PACASTABRR(r13)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623:
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100263 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
264 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100266 /* r3 = ESID data, r11 = VSID data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /*
269 * No need for an isync before or after this slbmte. The exception
270 * we enter with and the rfid we exit with are context synchronizing.
271 */
272 slbmte r11,r10
273
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100274 /* we're done for kernel addresses */
275 crclr 4*cr0+eq /* set result to "success" */
276 bgelr cr7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278 /* Update the slb cache */
279 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
280 cmpldi r3,SLB_CACHE_ENTRIES
281 bge 1f
282
283 /* still room in the slb cache */
Aneesh Kumar K.V735cafc2012-09-10 02:52:54 +0000284 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
285 srdi r10,r10,28 /* get the 36 bits of the ESID */
286 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
287 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 addi r3,r3,1 /* offset++ */
289 b 2f
2901: /* offset >= SLB_CACHE_ENTRIES */
291 li r3,SLB_CACHE_ENTRIES+1
2922:
293 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100294 crclr 4*cr0+eq /* set result to "success" */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 blr
296
Paul Mackerras1189be62007-10-11 20:37:10 +1000297/*
298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
Paul Mackerras1189be62007-10-11 20:37:10 +1000299 *
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
Paul Mackerras1189be62007-10-11 20:37:10 +1000301 */
302slb_finish_load_1T:
Aneesh Kumar K.Vc60ac562013-03-13 03:34:54 +0000303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +0000304 rldimi r10,r9,ESID_BITS_1T,0
Paul Mackerras1189be62007-10-11 20:37:10 +1000305 ASM_VSID_SCRAMBLE(r10,r9,1T)
Aneesh Kumar K.Vac8dc282012-09-10 02:52:53 +0000306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10
308 * also combine VSID and flags
309 */
310 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
Paul Mackerras1189be62007-10-11 20:37:10 +1000311 li r10,MMU_SEGSIZE_1T
312 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
313
314 /* r3 = EA, r11 = VSID data */
315 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
316 b 7b
317