blob: 14be408dfc9bdfcdad5ecd9855f2e06e07e4390f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110022#undef DEBUG_LOW
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080034#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/page.h>
41#include <asm/types.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/machdep.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080045#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/abs_addr.h>
47#include <asm/tlbflush.h>
48#include <asm/io.h>
49#include <asm/eeh.h>
50#include <asm/tlb.h>
51#include <asm/cacheflush.h>
52#include <asm/cputable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/sections.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100054#include <asm/spu.h>
will schmidtaa39be02007-10-30 06:24:19 +110055#include <asm/udbg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110063#ifdef DEBUG_LOW
64#define DBG_LOW(fmt...) udbg_printf(fmt)
65#else
66#define DBG_LOW(fmt...)
67#endif
68
69#define KB (1024)
70#define MB (1024*KB)
Jon Tollefson658013e2008-07-23 21:27:54 -070071#define GB (1024L*MB)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/*
74 * Note: pte --> Linux PTE
75 * HPTE --> PowerPC Hashed Page Table Entry
76 *
77 * Execution context:
78 * htab_initialize is called with the MMU off (of course), but
79 * the kernel has been copied down to zero so it can directly
80 * reference global data. At this point it is very difficult
81 * to print debug info.
82 *
83 */
84
85#ifdef CONFIG_U3_DART
86extern unsigned long dart_tablebase;
87#endif /* CONFIG_U3_DART */
88
Paul Mackerras799d6042005-11-10 13:37:51 +110089static unsigned long _SDR1;
90struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91
David Gibson8e561e72007-06-13 14:52:56 +100092struct hash_pte *htab_address;
Michael Ellerman337a7122006-02-21 17:22:55 +110093unsigned long htab_size_bytes;
David Gibson96e28442005-07-13 01:11:42 -070094unsigned long htab_hash_mask;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110095int mmu_linear_psize = MMU_PAGE_4K;
96int mmu_virtual_psize = MMU_PAGE_4K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100097int mmu_vmalloc_psize = MMU_PAGE_4K;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100098#ifdef CONFIG_SPARSEMEM_VMEMMAP
99int mmu_vmemmap_psize = MMU_PAGE_4K;
100#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000101int mmu_io_psize = MMU_PAGE_4K;
Paul Mackerras1189be62007-10-11 20:37:10 +1000102int mmu_kernel_ssize = MMU_SEGSIZE_256M;
103int mmu_highuser_ssize = MMU_SEGSIZE_256M;
Michael Neuling584f8b72007-12-06 17:24:48 +1100104u16 mmu_slb_size = 64;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100105#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100106unsigned int HPAGE_SHIFT;
107#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000108#ifdef CONFIG_PPC_64K_PAGES
109int mmu_ci_restrictions;
110#endif
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000111#ifdef CONFIG_DEBUG_PAGEALLOC
112static u8 *linear_map_hash_slots;
113static unsigned long linear_map_hash_count;
Michael Ellermaned166692007-04-18 11:50:09 +1000114static DEFINE_SPINLOCK(linear_map_hash_lock);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000115#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100117/* There are definitions of page sizes arrays to be used when none
118 * is provided by the firmware.
119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100121/* Pre-POWER4 CPUs (4k pages only)
122 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000123static struct mmu_psize_def mmu_psize_defaults_old[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100124 [MMU_PAGE_4K] = {
125 .shift = 12,
126 .sllp = 0,
127 .penc = 0,
128 .avpnm = 0,
129 .tlbiel = 0,
130 },
131};
132
133/* POWER4, GPUL, POWER5
134 *
135 * Support for 16Mb large pages
136 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000137static struct mmu_psize_def mmu_psize_defaults_gp[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100138 [MMU_PAGE_4K] = {
139 .shift = 12,
140 .sllp = 0,
141 .penc = 0,
142 .avpnm = 0,
143 .tlbiel = 1,
144 },
145 [MMU_PAGE_16M] = {
146 .shift = 24,
147 .sllp = SLB_VSID_L,
148 .penc = 0,
149 .avpnm = 0x1UL,
150 .tlbiel = 0,
151 },
152};
153
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000154static unsigned long htab_convert_pte_flags(unsigned long pteflags)
155{
156 unsigned long rflags = pteflags & 0x1fa;
157
158 /* _PAGE_EXEC -> NOEXEC */
159 if ((pteflags & _PAGE_EXEC) == 0)
160 rflags |= HPTE_R_N;
161
162 /* PP bits. PAGE_USER is already PP bit 0x2, so we only
163 * need to add in 0x1 if it's a read-only user page
164 */
165 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
166 (pteflags & _PAGE_DIRTY)))
167 rflags |= 1;
168
169 /* Always add C */
170 return rflags | HPTE_R_C;
171}
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100172
173int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000174 unsigned long pstart, unsigned long prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000175 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100177 unsigned long vaddr, paddr;
178 unsigned int step, shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100179 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100181 shift = mmu_psize_defs[psize].shift;
182 step = 1 << shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000184 prot = htab_convert_pte_flags(prot);
185
186 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
187 vstart, vend, pstart, prot, psize, ssize);
188
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100189 for (vaddr = vstart, paddr = pstart; vaddr < vend;
190 vaddr += step, paddr += step) {
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000191 unsigned long hash, hpteg;
Paul Mackerras1189be62007-10-11 20:37:10 +1000192 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
193 unsigned long va = hpt_va(vaddr, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Paul Mackerras1189be62007-10-11 20:37:10 +1000195 hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
197
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000198 BUG_ON(!ppc_md.hpte_insert);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000199 ret = ppc_md.hpte_insert(hpteg, va, paddr, prot,
200 HPTE_V_BOLTED, psize, ssize);
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000201
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100202 if (ret < 0)
203 break;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000204#ifdef CONFIG_DEBUG_PAGEALLOC
205 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
206 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
207#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100209 return ret < 0 ? ret : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Stephen Rothwellae86f002008-03-27 16:08:57 +1100212#ifdef CONFIG_MEMORY_HOTPLUG
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100213static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100214 int psize, int ssize)
215{
216 unsigned long vaddr;
217 unsigned int step, shift;
218
219 shift = mmu_psize_defs[psize].shift;
220 step = 1 << shift;
221
222 if (!ppc_md.hpte_removebolted) {
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100223 printk(KERN_WARNING "Platform doesn't implement "
224 "hpte_removebolted\n");
225 return -EINVAL;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100226 }
227
228 for (vaddr = vstart; vaddr < vend; vaddr += step)
229 ppc_md.hpte_removebolted(vaddr, psize, ssize);
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100230
231 return 0;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100232}
Stephen Rothwellae86f002008-03-27 16:08:57 +1100233#endif /* CONFIG_MEMORY_HOTPLUG */
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100234
Paul Mackerras1189be62007-10-11 20:37:10 +1000235static int __init htab_dt_scan_seg_sizes(unsigned long node,
236 const char *uname, int depth,
237 void *data)
238{
239 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
240 u32 *prop;
241 unsigned long size = 0;
242
243 /* We are scanning "cpu" nodes only */
244 if (type == NULL || strcmp(type, "cpu") != 0)
245 return 0;
246
247 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
248 &size);
249 if (prop == NULL)
250 return 0;
251 for (; size >= 4; size -= 4, ++prop) {
252 if (prop[0] == 40) {
253 DBG("1T segment support detected\n");
254 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
Olof Johanssonf5534002007-10-12 16:44:55 +1000255 return 1;
Paul Mackerras1189be62007-10-11 20:37:10 +1000256 }
Paul Mackerras1189be62007-10-11 20:37:10 +1000257 }
Olof Johanssonf66bce52007-10-16 00:58:59 +1000258 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
Paul Mackerras1189be62007-10-11 20:37:10 +1000259 return 0;
260}
261
262static void __init htab_init_seg_sizes(void)
263{
264 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
265}
266
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100267static int __init htab_dt_scan_page_sizes(unsigned long node,
268 const char *uname, int depth,
269 void *data)
270{
271 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
272 u32 *prop;
273 unsigned long size = 0;
274
275 /* We are scanning "cpu" nodes only */
276 if (type == NULL || strcmp(type, "cpu") != 0)
277 return 0;
278
279 prop = (u32 *)of_get_flat_dt_prop(node,
280 "ibm,segment-page-sizes", &size);
281 if (prop != NULL) {
282 DBG("Page sizes from device-tree:\n");
283 size /= 4;
284 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
285 while(size > 0) {
286 unsigned int shift = prop[0];
287 unsigned int slbenc = prop[1];
288 unsigned int lpnum = prop[2];
289 unsigned int lpenc = 0;
290 struct mmu_psize_def *def;
291 int idx = -1;
292
293 size -= 3; prop += 3;
294 while(size > 0 && lpnum) {
295 if (prop[0] == shift)
296 lpenc = prop[1];
297 prop += 2; size -= 2;
298 lpnum--;
299 }
300 switch(shift) {
301 case 0xc:
302 idx = MMU_PAGE_4K;
303 break;
304 case 0x10:
305 idx = MMU_PAGE_64K;
306 break;
307 case 0x14:
308 idx = MMU_PAGE_1M;
309 break;
310 case 0x18:
311 idx = MMU_PAGE_16M;
312 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
313 break;
314 case 0x22:
315 idx = MMU_PAGE_16G;
316 break;
317 }
318 if (idx < 0)
319 continue;
320 def = &mmu_psize_defs[idx];
321 def->shift = shift;
322 if (shift <= 23)
323 def->avpnm = 0;
324 else
325 def->avpnm = (1 << (shift - 23)) - 1;
326 def->sllp = slbenc;
327 def->penc = lpenc;
328 /* We don't know for sure what's up with tlbiel, so
329 * for now we only set it for 4K and 64K pages
330 */
331 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
332 def->tlbiel = 1;
333 else
334 def->tlbiel = 0;
335
336 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
337 "tlbiel=%d, penc=%d\n",
338 idx, shift, def->sllp, def->avpnm, def->tlbiel,
339 def->penc);
340 }
341 return 1;
342 }
343 return 0;
344}
345
Jon Tollefson658013e2008-07-23 21:27:54 -0700346/* Scan for 16G memory blocks that have been set aside for huge pages
347 * and reserve those blocks for 16G huge pages.
348 */
349static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
350 const char *uname, int depth,
351 void *data) {
352 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
353 unsigned long *addr_prop;
354 u32 *page_count_prop;
355 unsigned int expected_pages;
356 long unsigned int phys_addr;
357 long unsigned int block_size;
358
359 /* We are scanning "memory" nodes only */
360 if (type == NULL || strcmp(type, "memory") != 0)
361 return 0;
362
363 /* This property is the log base 2 of the number of virtual pages that
364 * will represent this memory block. */
365 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
366 if (page_count_prop == NULL)
367 return 0;
368 expected_pages = (1 << page_count_prop[0]);
369 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
370 if (addr_prop == NULL)
371 return 0;
372 phys_addr = addr_prop[0];
373 block_size = addr_prop[1];
374 if (block_size != (16 * GB))
375 return 0;
376 printk(KERN_INFO "Huge page(16GB) memory: "
377 "addr = 0x%lX size = 0x%lX pages = %d\n",
378 phys_addr, block_size, expected_pages);
379 lmb_reserve(phys_addr, block_size * expected_pages);
380 add_gpage(phys_addr, block_size, expected_pages);
381 return 0;
382}
383
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100384static void __init htab_init_page_sizes(void)
385{
386 int rc;
387
388 /* Default to 4K pages only */
389 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
390 sizeof(mmu_psize_defaults_old));
391
392 /*
393 * Try to find the available page sizes in the device-tree
394 */
395 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
396 if (rc != 0) /* Found */
397 goto found;
398
399 /*
400 * Not in the device-tree, let's fallback on known size
401 * list for 16M capable GP & GR
402 */
Stephen Rothwell04704662006-11-30 11:46:22 +1100403 if (cpu_has_feature(CPU_FTR_16M_PAGE))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100404 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
405 sizeof(mmu_psize_defaults_gp));
406 found:
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000407#ifndef CONFIG_DEBUG_PAGEALLOC
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100408 /*
409 * Pick a size for the linear mapping. Currently, we only support
410 * 16M, 1M and 4K which is the default
411 */
412 if (mmu_psize_defs[MMU_PAGE_16M].shift)
413 mmu_linear_psize = MMU_PAGE_16M;
414 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
415 mmu_linear_psize = MMU_PAGE_1M;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000416#endif /* CONFIG_DEBUG_PAGEALLOC */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100417
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000418#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100419 /*
420 * Pick a size for the ordinary pages. Default is 4K, we support
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000421 * 64K for user mappings and vmalloc if supported by the processor.
422 * We only use 64k for ioremap if the processor
423 * (and firmware) support cache-inhibited large pages.
424 * If not, we use 4k and set mmu_ci_restrictions so that
425 * hash_page knows to switch processes that use cache-inhibited
426 * mappings to 4k pages.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100427 */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000428 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100429 mmu_virtual_psize = MMU_PAGE_64K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000430 mmu_vmalloc_psize = MMU_PAGE_64K;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000431 if (mmu_linear_psize == MMU_PAGE_4K)
432 mmu_linear_psize = MMU_PAGE_64K;
Paul Mackerrascfe666b2008-03-24 17:41:22 +1100433 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
434 /*
435 * Don't use 64k pages for ioremap on pSeries, since
436 * that would stop us accessing the HEA ethernet.
437 */
438 if (!machine_is(pseries))
439 mmu_io_psize = MMU_PAGE_64K;
440 } else
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000441 mmu_ci_restrictions = 1;
442 }
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000443#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100444
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000445#ifdef CONFIG_SPARSEMEM_VMEMMAP
446 /* We try to use 16M pages for vmemmap if that is supported
447 * and we have at least 1G of RAM at boot
448 */
449 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
450 lmb_phys_mem_size() >= 0x40000000)
451 mmu_vmemmap_psize = MMU_PAGE_16M;
452 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
453 mmu_vmemmap_psize = MMU_PAGE_64K;
454 else
455 mmu_vmemmap_psize = MMU_PAGE_4K;
456#endif /* CONFIG_SPARSEMEM_VMEMMAP */
457
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000458 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000459 "virtual = %d, io = %d"
460#ifdef CONFIG_SPARSEMEM_VMEMMAP
461 ", vmemmap = %d"
462#endif
463 "\n",
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100464 mmu_psize_defs[mmu_linear_psize].shift,
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000465 mmu_psize_defs[mmu_virtual_psize].shift,
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000466 mmu_psize_defs[mmu_io_psize].shift
467#ifdef CONFIG_SPARSEMEM_VMEMMAP
468 ,mmu_psize_defs[mmu_vmemmap_psize].shift
469#endif
470 );
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100471
472#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700473 /* Reserve 16G huge page memory sections for huge pages */
474 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
475
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700476/* Set default large page size. Currently, we pick 16M or 1M depending
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100477 * on what is available
478 */
479 if (mmu_psize_defs[MMU_PAGE_16M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700480 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
David Gibson7d24f0b2005-11-07 00:57:52 -0800481 /* With 4k/4level pagetables, we can't (for now) cope with a
482 * huge page size < PMD_SIZE */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100483 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700484 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100485#endif /* CONFIG_HUGETLB_PAGE */
486}
487
488static int __init htab_dt_scan_pftsize(unsigned long node,
489 const char *uname, int depth,
490 void *data)
491{
492 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
493 u32 *prop;
494
495 /* We are scanning "cpu" nodes only */
496 if (type == NULL || strcmp(type, "cpu") != 0)
497 return 0;
498
499 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
500 if (prop != NULL) {
501 /* pft_size[0] is the NUMA CEC cookie */
502 ppc64_pft_size = prop[1];
503 return 1;
504 }
505 return 0;
506}
507
508static unsigned long __init htab_get_table_size(void)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000509{
Paul Mackerras799d6042005-11-10 13:37:51 +1100510 unsigned long mem_size, rnd_mem_size, pteg_count;
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000511
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100512 /* If hash size isn't already provided by the platform, we try to
Adrian Bunk943ffb52006-01-10 00:10:13 +0100513 * retrieve it from the device-tree. If it's not there neither, we
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100514 * calculate it now based on the total RAM size
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000515 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100516 if (ppc64_pft_size == 0)
517 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000518 if (ppc64_pft_size)
519 return 1UL << ppc64_pft_size;
520
521 /* round mem_size up to next power of 2 */
Paul Mackerras799d6042005-11-10 13:37:51 +1100522 mem_size = lmb_phys_mem_size();
523 rnd_mem_size = 1UL << __ilog2(mem_size);
524 if (rnd_mem_size < mem_size)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000525 rnd_mem_size <<= 1;
526
527 /* # pages / 2 */
528 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
529
530 return pteg_count << 7;
531}
532
Mike Kravetz54b79242005-11-07 16:25:48 -0800533#ifdef CONFIG_MEMORY_HOTPLUG
534void create_section_mapping(unsigned long start, unsigned long end)
535{
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000536 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
537 PAGE_KERNEL, mmu_linear_psize,
538 mmu_kernel_ssize));
Mike Kravetz54b79242005-11-07 16:25:48 -0800539}
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100540
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100541int remove_section_mapping(unsigned long start, unsigned long end)
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100542{
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100543 return htab_remove_mapping(start, end, mmu_linear_psize,
544 mmu_kernel_ssize);
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100545}
Mike Kravetz54b79242005-11-07 16:25:48 -0800546#endif /* CONFIG_MEMORY_HOTPLUG */
547
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000548static inline void make_bl(unsigned int *insn_addr, void *func)
549{
550 unsigned long funcp = *((unsigned long *)func);
551 int offset = funcp - (unsigned long)insn_addr;
552
553 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
554 flush_icache_range((unsigned long)insn_addr, 4+
555 (unsigned long)insn_addr);
556}
557
558static void __init htab_finish_init(void)
559{
560 extern unsigned int *htab_call_hpte_insert1;
561 extern unsigned int *htab_call_hpte_insert2;
562 extern unsigned int *htab_call_hpte_remove;
563 extern unsigned int *htab_call_hpte_updatepp;
564
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000565#ifdef CONFIG_PPC_HAS_HASH_64K
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000566 extern unsigned int *ht64_call_hpte_insert1;
567 extern unsigned int *ht64_call_hpte_insert2;
568 extern unsigned int *ht64_call_hpte_remove;
569 extern unsigned int *ht64_call_hpte_updatepp;
570
571 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
572 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
573 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
574 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
Jon Tollefson5b825832007-05-17 04:43:02 +1000575#endif /* CONFIG_PPC_HAS_HASH_64K */
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000576
577 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
578 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
579 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
580 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583void __init htab_initialize(void)
584{
Michael Ellerman337a7122006-02-21 17:22:55 +1100585 unsigned long table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 unsigned long pteg_count;
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000587 unsigned long prot, tprot;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100588 unsigned long base = 0, size = 0, limit;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100589 int i;
590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 DBG(" -> htab_initialize()\n");
592
Paul Mackerras1189be62007-10-11 20:37:10 +1000593 /* Initialize segment sizes */
594 htab_init_seg_sizes();
595
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100596 /* Initialize page sizes */
597 htab_init_page_sizes();
598
Paul Mackerras1189be62007-10-11 20:37:10 +1000599 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
600 mmu_kernel_ssize = MMU_SEGSIZE_1T;
601 mmu_highuser_ssize = MMU_SEGSIZE_1T;
602 printk(KERN_INFO "Using 1TB segments\n");
603 }
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 /*
606 * Calculate the required size of the htab. We want the number of
607 * PTEGs to equal one half the number of real pages.
608 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100609 htab_size_bytes = htab_get_table_size();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 pteg_count = htab_size_bytes >> 7;
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 htab_hash_mask = pteg_count - 1;
613
Michael Ellerman57cfb812006-03-21 20:45:59 +1100614 if (firmware_has_feature(FW_FEATURE_LPAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 /* Using a hypervisor which owns the htab */
616 htab_address = NULL;
617 _SDR1 = 0;
618 } else {
619 /* Find storage for the HPT. Must be contiguous in
Michael Ellerman41d824b2008-01-30 01:13:59 +1100620 * the absolute address space. On cell we want it to be
Michael Ellerman31bf1112008-03-12 18:03:24 +1100621 * in the first 2 Gig so we can use it for IOMMU hacks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 */
Michael Ellerman41d824b2008-01-30 01:13:59 +1100623 if (machine_is(cell))
Michael Ellerman31bf1112008-03-12 18:03:24 +1100624 limit = 0x80000000;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100625 else
626 limit = 0;
627
628 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 DBG("Hash table allocated at %lx, size: %lx\n", table,
631 htab_size_bytes);
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 htab_address = abs_to_virt(table);
634
635 /* htab absolute addr + encoded htabsize */
636 _SDR1 = table + __ilog2(pteg_count) - 11;
637
638 /* Initialize the HPT with no entries */
639 memset((void *)table, 0, htab_size_bytes);
Paul Mackerras799d6042005-11-10 13:37:51 +1100640
641 /* Set SDR1 */
642 mtspr(SPRN_SDR1, _SDR1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 }
644
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000645 prot = PAGE_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000647#ifdef CONFIG_DEBUG_PAGEALLOC
648 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
649 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
650 1, lmb.rmo_size));
651 memset(linear_map_hash_slots, 0, linear_map_hash_count);
652#endif /* CONFIG_DEBUG_PAGEALLOC */
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 /* On U3 based machines, we need to reserve the DART area and
655 * _NOT_ map it to avoid cache paradoxes as it's remapped non
656 * cacheable later on
657 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659 /* create bolted the linear mapping in the hash table */
660 for (i=0; i < lmb.memory.cnt; i++) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600661 base = (unsigned long)__va(lmb.memory.region[i].base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 size = lmb.memory.region[i].size;
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000663 tprot = prot | (in_kernel_text(base) ? _PAGE_EXEC : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000665 DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
666 base, size, tprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668#ifdef CONFIG_U3_DART
669 /* Do not map the DART space. Fortunately, it will be aligned
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100670 * in such a way that it will not cross two lmb regions and
671 * will fit within a single 16Mb page.
672 * The DART space is assumed to be a full 16Mb region even if
673 * we only use 2Mb of that space. We will use more of it later
674 * for AGP GART. We have to use a full 16Mb large page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 */
676 DBG("DART base: %lx\n", dart_tablebase);
677
678 if (dart_tablebase != 0 && dart_tablebase >= base
679 && dart_tablebase < (base + size)) {
Michael Ellermancaf80e52006-03-21 20:45:51 +1100680 unsigned long dart_table_end = dart_tablebase + 16 * MB;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (base != dart_tablebase)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100682 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000683 __pa(base), tprot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000684 mmu_linear_psize,
685 mmu_kernel_ssize));
Michael Ellermancaf80e52006-03-21 20:45:51 +1100686 if ((base + size) > dart_table_end)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100687 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
Michael Ellermancaf80e52006-03-21 20:45:51 +1100688 base + size,
689 __pa(dart_table_end),
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000690 tprot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000691 mmu_linear_psize,
692 mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 continue;
694 }
695#endif /* CONFIG_U3_DART */
Michael Ellermancaf80e52006-03-21 20:45:51 +1100696 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000697 tprot, mmu_linear_psize, mmu_kernel_ssize));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 /*
701 * If we have a memory_limit and we've allocated TCEs then we need to
702 * explicitly map the TCE area at the top of RAM. We also cope with the
703 * case that the TCEs start below memory_limit.
704 * tce_alloc_start/end are 16MB aligned so the mapping should work
705 * for either 4K or 16MB pages.
706 */
707 if (tce_alloc_start) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600708 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
709 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 if (base + size >= tce_alloc_start)
712 tce_alloc_start = base + size + 1;
713
Michael Ellermancaf80e52006-03-21 20:45:51 +1100714 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000715 __pa(tce_alloc_start), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000716 mmu_linear_psize, mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 }
718
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000719 htab_finish_init();
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 DBG(" <- htab_initialize()\n");
722}
723#undef KB
724#undef MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Anton Blancharde597cb322005-12-29 10:46:29 +1100726void htab_initialize_secondary(void)
Paul Mackerras799d6042005-11-10 13:37:51 +1100727{
Michael Ellerman57cfb812006-03-21 20:45:59 +1100728 if (!firmware_has_feature(FW_FEATURE_LPAR))
Paul Mackerras799d6042005-11-10 13:37:51 +1100729 mtspr(SPRN_SDR1, _SDR1);
730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732/*
733 * Called by asm hashtable.S for doing lazy icache flush
734 */
735unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
736{
737 struct page *page;
738
Benjamin Herrenschmidt76c8e252005-11-08 11:21:05 +1100739 if (!pfn_valid(pte_pfn(pte)))
740 return pp;
741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 page = pte_page(pte);
743
744 /* page is dirty */
745 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
746 if (trap == 0x400) {
747 __flush_dcache_icache(page_address(page));
748 set_bit(PG_arch_1, &page->flags);
749 } else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100750 pp |= HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 }
752 return pp;
753}
754
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000755#ifdef CONFIG_PPC_MM_SLICES
756unsigned int get_paca_psize(unsigned long addr)
757{
758 unsigned long index, slices;
759
760 if (addr < SLICE_LOW_TOP) {
761 slices = get_paca()->context.low_slices_psize;
762 index = GET_LOW_SLICE_INDEX(addr);
763 } else {
764 slices = get_paca()->context.high_slices_psize;
765 index = GET_HIGH_SLICE_INDEX(addr);
766 }
767 return (slices >> (index * 4)) & 0xF;
768}
769
770#else
771unsigned int get_paca_psize(unsigned long addr)
772{
773 return get_paca()->context.user_psize;
774}
775#endif
776
Paul Mackerras721151d2007-04-03 21:24:02 +1000777/*
778 * Demote a segment to using 4k pages.
779 * For now this makes the whole process use 4k pages.
780 */
Paul Mackerras721151d2007-04-03 21:24:02 +1000781#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerrasfa282372008-01-24 08:35:13 +1100782void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000783{
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000784 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
Paul Mackerras721151d2007-04-03 21:24:02 +1000785 return;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000786 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000787#ifdef CONFIG_SPU_BASE
Paul Mackerras721151d2007-04-03 21:24:02 +1000788 spu_flush_all_slbs(mm);
789#endif
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000790 if (get_paca_psize(addr) != MMU_PAGE_4K) {
Paul Mackerrasfa282372008-01-24 08:35:13 +1100791 get_paca()->context = mm->context;
792 slb_flush_and_rebolt();
793 }
Paul Mackerras721151d2007-04-03 21:24:02 +1000794}
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000795#endif /* CONFIG_PPC_64K_PAGES */
Paul Mackerras721151d2007-04-03 21:24:02 +1000796
Paul Mackerrasfa282372008-01-24 08:35:13 +1100797#ifdef CONFIG_PPC_SUBPAGE_PROT
798/*
799 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
800 * Userspace sets the subpage permissions using the subpage_prot system call.
801 *
802 * Result is 0: full permissions, _PAGE_RW: read-only,
803 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
804 */
805static int subpage_protection(pgd_t *pgdir, unsigned long ea)
806{
807 struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
808 u32 spp = 0;
809 u32 **sbpm, *sbpp;
810
811 if (ea >= spt->maxaddr)
812 return 0;
813 if (ea < 0x100000000) {
814 /* addresses below 4GB use spt->low_prot */
815 sbpm = spt->low_prot;
816 } else {
817 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
818 if (!sbpm)
819 return 0;
820 }
821 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
822 if (!sbpp)
823 return 0;
824 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
825
826 /* extract 2-bit bitfield for this 4k subpage */
827 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
828
829 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
830 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
831 return spp;
832}
833
834#else /* CONFIG_PPC_SUBPAGE_PROT */
835static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
836{
837 return 0;
838}
839#endif
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841/* Result code is:
842 * 0 - handled
843 * 1 - normal page fault
844 * -1 - critical hash insertion error
Paul Mackerrasfa282372008-01-24 08:35:13 +1100845 * -2 - access not permitted by subpage protection mechanism
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 */
847int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
848{
849 void *pgdir;
850 unsigned long vsid;
851 struct mm_struct *mm;
852 pte_t *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 cpumask_t tmp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100854 int rc, user_region = 0, local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +1000855 int psize, ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100857 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
858 ea, access, trap);
David Gibson1f8d4192005-05-05 16:15:13 -0700859
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100860 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
861 DBG_LOW(" out of pgtable range !\n");
862 return 1;
863 }
864
865 /* Get region & vsid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 switch (REGION_ID(ea)) {
867 case USER_REGION_ID:
868 user_region = 1;
869 mm = current->mm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100870 if (! mm) {
871 DBG_LOW(" user region with no mm !\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return 1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100873 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000874 psize = get_slice_psize(mm, ea);
Paul Mackerras1189be62007-10-11 20:37:10 +1000875 ssize = user_segment_size(ea);
876 vsid = get_vsid(mm->context.id, ea, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 case VMALLOC_REGION_ID:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 mm = &init_mm;
Paul Mackerras1189be62007-10-11 20:37:10 +1000880 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000881 if (ea < VMALLOC_END)
882 psize = mmu_vmalloc_psize;
883 else
884 psize = mmu_io_psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000885 ssize = mmu_kernel_ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 default:
888 /* Not a valid range
889 * Send the problem up to do_page_fault
890 */
891 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100893 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100895 /* Get pgdir */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 pgdir = mm->pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (pgdir == NULL)
898 return 1;
899
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100900 /* Check CPU locality */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 tmp = cpumask_of_cpu(smp_processor_id());
902 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
903 local = 1;
904
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000905#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100906 /* Handle hugepage regions */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700907 if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100908 DBG_LOW(" -> huge page !\n");
David Gibsoncbf52af2005-12-09 14:20:52 +1100909 return hash_huge_page(mm, access, ea, vsid, local, trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000911#endif /* CONFIG_HUGETLB_PAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000913#ifndef CONFIG_PPC_64K_PAGES
914 /* If we use 4K pages and our psize is not 4K, then we are hitting
915 * a special driver mapping, we need to align the address before
916 * we fetch the PTE
917 */
918 if (psize != MMU_PAGE_4K)
919 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
920#endif /* CONFIG_PPC_64K_PAGES */
921
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100922 /* Get PTE and page size from page tables */
923 ptep = find_linux_pte(pgdir, ea);
924 if (ptep == NULL || !pte_present(*ptep)) {
925 DBG_LOW(" no PTE !\n");
926 return 1;
927 }
928
929#ifndef CONFIG_PPC_64K_PAGES
930 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
931#else
932 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
933 pte_val(*(ptep + PTRS_PER_PTE)));
934#endif
935 /* Pre-check access permissions (will be re-checked atomically
936 * in __hash_page_XX but this pre-check is a fast path
937 */
938 if (access & ~pte_val(*ptep)) {
939 DBG_LOW(" no access !\n");
940 return 1;
941 }
942
943 /* Do actual hashing */
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000944#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerras721151d2007-04-03 21:24:02 +1000945 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000946 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
Paul Mackerras721151d2007-04-03 21:24:02 +1000947 demote_segment_4k(mm, ea);
948 psize = MMU_PAGE_4K;
949 }
950
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000951 /* If this PTE is non-cacheable and we have restrictions on
952 * using non cacheable large pages, then we switch to 4k
953 */
954 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
955 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
956 if (user_region) {
957 demote_segment_4k(mm, ea);
958 psize = MMU_PAGE_4K;
959 } else if (ea < VMALLOC_END) {
960 /*
961 * some driver did a non-cacheable mapping
962 * in vmalloc space, so switch vmalloc
963 * to 4k pages
964 */
965 printk(KERN_ALERT "Reducing vmalloc segment "
966 "to 4kB pages because of "
967 "non-cacheable mapping\n");
968 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000969#ifdef CONFIG_SPU_BASE
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100970 spu_flush_all_slbs(mm);
971#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000972 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000973 }
974 if (user_region) {
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000975 if (psize != get_paca_psize(ea)) {
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +1100976 get_paca()->context = mm->context;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000977 slb_flush_and_rebolt();
978 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000979 } else if (get_paca()->vmalloc_sllp !=
980 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
981 get_paca()->vmalloc_sllp =
982 mmu_psize_defs[mmu_vmalloc_psize].sllp;
Michael Neuling67439b72007-08-03 11:55:39 +1000983 slb_vmalloc_update();
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000984 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000985#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000986
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000987#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000988 if (psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +1000989 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100990 else
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000991#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +1100992 {
993 int spp = subpage_protection(pgdir, ea);
994 if (access & spp)
995 rc = -2;
996 else
997 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
998 local, ssize, spp);
999 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001000
1001#ifndef CONFIG_PPC_64K_PAGES
1002 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1003#else
1004 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1005 pte_val(*(ptep + PTRS_PER_PTE)));
1006#endif
1007 DBG_LOW(" -> rc=%d\n", rc);
1008 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
Arnd Bergmann67207b92005-11-15 15:53:48 -05001010EXPORT_SYMBOL_GPL(hash_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001012void hash_preload(struct mm_struct *mm, unsigned long ea,
1013 unsigned long access, unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001015 unsigned long vsid;
1016 void *pgdir;
1017 pte_t *ptep;
1018 cpumask_t mask;
1019 unsigned long flags;
1020 int local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +10001021 int ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001023 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1024
1025#ifdef CONFIG_PPC_MM_SLICES
1026 /* We only prefault standard pages for now */
Ilpo Järvinen2b02d132007-08-16 08:03:35 +10001027 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001028 return;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001029#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001030
1031 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1032 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1033
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001034 /* Get Linux PTE if available */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001035 pgdir = mm->pgd;
1036 if (pgdir == NULL)
1037 return;
1038 ptep = find_linux_pte(pgdir, ea);
1039 if (!ptep)
1040 return;
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001041
1042#ifdef CONFIG_PPC_64K_PAGES
1043 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1044 * a 64K kernel), then we don't preload, hash_page() will take
1045 * care of it once we actually try to access the page.
1046 * That way we don't have to duplicate all of the logic for segment
1047 * page size demotion here
1048 */
1049 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1050 return;
1051#endif /* CONFIG_PPC_64K_PAGES */
1052
1053 /* Get VSID */
Paul Mackerras1189be62007-10-11 20:37:10 +10001054 ssize = user_segment_size(ea);
1055 vsid = get_vsid(mm->context.id, ea, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001056
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001057 /* Hash doesn't like irqs */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001058 local_irq_save(flags);
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001059
1060 /* Is that local to this CPU ? */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001061 mask = cpumask_of_cpu(smp_processor_id());
1062 if (cpus_equal(mm->cpu_vm_mask, mask))
1063 local = 1;
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001064
1065 /* Hash it in */
1066#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001067 if (mm->context.user_psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +10001068 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 else
Jon Tollefson5b825832007-05-17 04:43:02 +10001070#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001071 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1072 subpage_protection(pgdir, ea));
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001073
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001074 local_irq_restore(flags);
1075}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +11001077/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1078 * do not forget to update the assembly call site !
1079 */
Paul Mackerras1189be62007-10-11 20:37:10 +10001080void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1081 int local)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001082{
1083 unsigned long hash, index, shift, hidx, slot;
1084
1085 DBG_LOW("flush_hash_page(va=%016x)\n", va);
1086 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
Paul Mackerras1189be62007-10-11 20:37:10 +10001087 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001088 hidx = __rpte_to_hidx(pte, index);
1089 if (hidx & _PTEIDX_SECONDARY)
1090 hash = ~hash;
1091 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1092 slot += hidx & _PTEIDX_GROUP_IX;
1093 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
Paul Mackerras1189be62007-10-11 20:37:10 +10001094 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001095 } pte_iterate_hashed_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096}
1097
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001098void flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001100 if (ppc_md.flush_hash_range)
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001101 ppc_md.flush_hash_range(number, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001102 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 int i;
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001104 struct ppc64_tlb_batch *batch =
1105 &__get_cpu_var(ppc64_tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 for (i = 0; i < number; i++)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001108 flush_hash_page(batch->vaddr[i], batch->pte[i],
Paul Mackerras1189be62007-10-11 20:37:10 +10001109 batch->psize, batch->ssize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
1111}
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113/*
1114 * low_hash_fault is called when we the low level hash code failed
1115 * to instert a PTE due to an hypervisor error
1116 */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001117void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118{
1119 if (user_mode(regs)) {
Paul Mackerrasfa282372008-01-24 08:35:13 +11001120#ifdef CONFIG_PPC_SUBPAGE_PROT
1121 if (rc == -2)
1122 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1123 else
1124#endif
1125 _exception(SIGBUS, regs, BUS_ADRERR, address);
1126 } else
1127 bad_page_fault(regs, address, SIGBUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001129
1130#ifdef CONFIG_DEBUG_PAGEALLOC
1131static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1132{
Paul Mackerras1189be62007-10-11 20:37:10 +10001133 unsigned long hash, hpteg;
1134 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1135 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +10001136 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001137 int ret;
1138
Paul Mackerras1189be62007-10-11 20:37:10 +10001139 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001140 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1141
1142 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
Paul Mackerras1189be62007-10-11 20:37:10 +10001143 mode, HPTE_V_BOLTED,
1144 mmu_linear_psize, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001145 BUG_ON (ret < 0);
1146 spin_lock(&linear_map_hash_lock);
1147 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1148 linear_map_hash_slots[lmi] = ret | 0x80;
1149 spin_unlock(&linear_map_hash_lock);
1150}
1151
1152static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1153{
Paul Mackerras1189be62007-10-11 20:37:10 +10001154 unsigned long hash, hidx, slot;
1155 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1156 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001157
Paul Mackerras1189be62007-10-11 20:37:10 +10001158 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001159 spin_lock(&linear_map_hash_lock);
1160 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1161 hidx = linear_map_hash_slots[lmi] & 0x7f;
1162 linear_map_hash_slots[lmi] = 0;
1163 spin_unlock(&linear_map_hash_lock);
1164 if (hidx & _PTEIDX_SECONDARY)
1165 hash = ~hash;
1166 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1167 slot += hidx & _PTEIDX_GROUP_IX;
Paul Mackerras1189be62007-10-11 20:37:10 +10001168 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001169}
1170
1171void kernel_map_pages(struct page *page, int numpages, int enable)
1172{
1173 unsigned long flags, vaddr, lmi;
1174 int i;
1175
1176 local_irq_save(flags);
1177 for (i = 0; i < numpages; i++, page++) {
1178 vaddr = (unsigned long)page_address(page);
1179 lmi = __pa(vaddr) >> PAGE_SHIFT;
1180 if (lmi >= linear_map_hash_count)
1181 continue;
1182 if (enable)
1183 kernel_map_linear_page(vaddr, lmi);
1184 else
1185 kernel_unmap_linear_page(vaddr, lmi);
1186 }
1187 local_irq_restore(flags);
1188}
1189#endif /* CONFIG_DEBUG_PAGEALLOC */