blob: f5bc1b213f2478d986c91ad5e55c54cdaa7b6849 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110022#undef DEBUG_LOW
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080034#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/page.h>
41#include <asm/types.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/machdep.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080045#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/abs_addr.h>
47#include <asm/tlbflush.h>
48#include <asm/io.h>
49#include <asm/eeh.h>
50#include <asm/tlb.h>
51#include <asm/cacheflush.h>
52#include <asm/cputable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/sections.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100054#include <asm/spu.h>
will schmidtaa39be02007-10-30 06:24:19 +110055#include <asm/udbg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110063#ifdef DEBUG_LOW
64#define DBG_LOW(fmt...) udbg_printf(fmt)
65#else
66#define DBG_LOW(fmt...)
67#endif
68
69#define KB (1024)
70#define MB (1024*KB)
Jon Tollefson658013e2008-07-23 21:27:54 -070071#define GB (1024L*MB)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/*
74 * Note: pte --> Linux PTE
75 * HPTE --> PowerPC Hashed Page Table Entry
76 *
77 * Execution context:
78 * htab_initialize is called with the MMU off (of course), but
79 * the kernel has been copied down to zero so it can directly
80 * reference global data. At this point it is very difficult
81 * to print debug info.
82 *
83 */
84
85#ifdef CONFIG_U3_DART
86extern unsigned long dart_tablebase;
87#endif /* CONFIG_U3_DART */
88
Paul Mackerras799d6042005-11-10 13:37:51 +110089static unsigned long _SDR1;
90struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91
David Gibson8e561e72007-06-13 14:52:56 +100092struct hash_pte *htab_address;
Michael Ellerman337a7122006-02-21 17:22:55 +110093unsigned long htab_size_bytes;
David Gibson96e28442005-07-13 01:11:42 -070094unsigned long htab_hash_mask;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110095int mmu_linear_psize = MMU_PAGE_4K;
96int mmu_virtual_psize = MMU_PAGE_4K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100097int mmu_vmalloc_psize = MMU_PAGE_4K;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100098#ifdef CONFIG_SPARSEMEM_VMEMMAP
99int mmu_vmemmap_psize = MMU_PAGE_4K;
100#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000101int mmu_io_psize = MMU_PAGE_4K;
Paul Mackerras1189be62007-10-11 20:37:10 +1000102int mmu_kernel_ssize = MMU_SEGSIZE_256M;
103int mmu_highuser_ssize = MMU_SEGSIZE_256M;
Michael Neuling584f8b72007-12-06 17:24:48 +1100104u16 mmu_slb_size = 64;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100105#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100106unsigned int HPAGE_SHIFT;
107#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000108#ifdef CONFIG_PPC_64K_PAGES
109int mmu_ci_restrictions;
110#endif
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000111#ifdef CONFIG_DEBUG_PAGEALLOC
112static u8 *linear_map_hash_slots;
113static unsigned long linear_map_hash_count;
Michael Ellermaned166692007-04-18 11:50:09 +1000114static DEFINE_SPINLOCK(linear_map_hash_lock);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000115#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100117/* There are definitions of page sizes arrays to be used when none
118 * is provided by the firmware.
119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100121/* Pre-POWER4 CPUs (4k pages only)
122 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000123static struct mmu_psize_def mmu_psize_defaults_old[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100124 [MMU_PAGE_4K] = {
125 .shift = 12,
126 .sllp = 0,
127 .penc = 0,
128 .avpnm = 0,
129 .tlbiel = 0,
130 },
131};
132
133/* POWER4, GPUL, POWER5
134 *
135 * Support for 16Mb large pages
136 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000137static struct mmu_psize_def mmu_psize_defaults_gp[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100138 [MMU_PAGE_4K] = {
139 .shift = 12,
140 .sllp = 0,
141 .penc = 0,
142 .avpnm = 0,
143 .tlbiel = 1,
144 },
145 [MMU_PAGE_16M] = {
146 .shift = 24,
147 .sllp = SLB_VSID_L,
148 .penc = 0,
149 .avpnm = 0x1UL,
150 .tlbiel = 0,
151 },
152};
153
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000154static unsigned long htab_convert_pte_flags(unsigned long pteflags)
155{
156 unsigned long rflags = pteflags & 0x1fa;
157
158 /* _PAGE_EXEC -> NOEXEC */
159 if ((pteflags & _PAGE_EXEC) == 0)
160 rflags |= HPTE_R_N;
161
162 /* PP bits. PAGE_USER is already PP bit 0x2, so we only
163 * need to add in 0x1 if it's a read-only user page
164 */
165 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
166 (pteflags & _PAGE_DIRTY)))
167 rflags |= 1;
168
169 /* Always add C */
170 return rflags | HPTE_R_C;
171}
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100172
173int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000174 unsigned long pstart, unsigned long prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000175 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100177 unsigned long vaddr, paddr;
178 unsigned int step, shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100179 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100181 shift = mmu_psize_defs[psize].shift;
182 step = 1 << shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000184 prot = htab_convert_pte_flags(prot);
185
186 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
187 vstart, vend, pstart, prot, psize, ssize);
188
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100189 for (vaddr = vstart, paddr = pstart; vaddr < vend;
190 vaddr += step, paddr += step) {
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000191 unsigned long hash, hpteg;
Paul Mackerras1189be62007-10-11 20:37:10 +1000192 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
193 unsigned long va = hpt_va(vaddr, vsid, ssize);
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000194 unsigned long tprot = prot;
195
196 /* Make kernel text executable */
Paul Mackerras549e8152008-08-30 11:43:47 +1000197 if (overlaps_kernel_text(vaddr, vaddr + step))
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000198 tprot &= ~HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Paul Mackerras1189be62007-10-11 20:37:10 +1000200 hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
202
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000203 BUG_ON(!ppc_md.hpte_insert);
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000204 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000205 HPTE_V_BOLTED, psize, ssize);
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000206
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100207 if (ret < 0)
208 break;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000209#ifdef CONFIG_DEBUG_PAGEALLOC
210 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
211 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
212#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100214 return ret < 0 ? ret : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Stephen Rothwellae86f002008-03-27 16:08:57 +1100217#ifdef CONFIG_MEMORY_HOTPLUG
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100218static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100219 int psize, int ssize)
220{
221 unsigned long vaddr;
222 unsigned int step, shift;
223
224 shift = mmu_psize_defs[psize].shift;
225 step = 1 << shift;
226
227 if (!ppc_md.hpte_removebolted) {
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100228 printk(KERN_WARNING "Platform doesn't implement "
229 "hpte_removebolted\n");
230 return -EINVAL;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100231 }
232
233 for (vaddr = vstart; vaddr < vend; vaddr += step)
234 ppc_md.hpte_removebolted(vaddr, psize, ssize);
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100235
236 return 0;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100237}
Stephen Rothwellae86f002008-03-27 16:08:57 +1100238#endif /* CONFIG_MEMORY_HOTPLUG */
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100239
Paul Mackerras1189be62007-10-11 20:37:10 +1000240static int __init htab_dt_scan_seg_sizes(unsigned long node,
241 const char *uname, int depth,
242 void *data)
243{
244 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
245 u32 *prop;
246 unsigned long size = 0;
247
248 /* We are scanning "cpu" nodes only */
249 if (type == NULL || strcmp(type, "cpu") != 0)
250 return 0;
251
252 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
253 &size);
254 if (prop == NULL)
255 return 0;
256 for (; size >= 4; size -= 4, ++prop) {
257 if (prop[0] == 40) {
258 DBG("1T segment support detected\n");
259 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
Olof Johanssonf5534002007-10-12 16:44:55 +1000260 return 1;
Paul Mackerras1189be62007-10-11 20:37:10 +1000261 }
Paul Mackerras1189be62007-10-11 20:37:10 +1000262 }
Olof Johanssonf66bce52007-10-16 00:58:59 +1000263 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
Paul Mackerras1189be62007-10-11 20:37:10 +1000264 return 0;
265}
266
267static void __init htab_init_seg_sizes(void)
268{
269 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
270}
271
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100272static int __init htab_dt_scan_page_sizes(unsigned long node,
273 const char *uname, int depth,
274 void *data)
275{
276 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
277 u32 *prop;
278 unsigned long size = 0;
279
280 /* We are scanning "cpu" nodes only */
281 if (type == NULL || strcmp(type, "cpu") != 0)
282 return 0;
283
284 prop = (u32 *)of_get_flat_dt_prop(node,
285 "ibm,segment-page-sizes", &size);
286 if (prop != NULL) {
287 DBG("Page sizes from device-tree:\n");
288 size /= 4;
289 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
290 while(size > 0) {
291 unsigned int shift = prop[0];
292 unsigned int slbenc = prop[1];
293 unsigned int lpnum = prop[2];
294 unsigned int lpenc = 0;
295 struct mmu_psize_def *def;
296 int idx = -1;
297
298 size -= 3; prop += 3;
299 while(size > 0 && lpnum) {
300 if (prop[0] == shift)
301 lpenc = prop[1];
302 prop += 2; size -= 2;
303 lpnum--;
304 }
305 switch(shift) {
306 case 0xc:
307 idx = MMU_PAGE_4K;
308 break;
309 case 0x10:
310 idx = MMU_PAGE_64K;
311 break;
312 case 0x14:
313 idx = MMU_PAGE_1M;
314 break;
315 case 0x18:
316 idx = MMU_PAGE_16M;
317 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
318 break;
319 case 0x22:
320 idx = MMU_PAGE_16G;
321 break;
322 }
323 if (idx < 0)
324 continue;
325 def = &mmu_psize_defs[idx];
326 def->shift = shift;
327 if (shift <= 23)
328 def->avpnm = 0;
329 else
330 def->avpnm = (1 << (shift - 23)) - 1;
331 def->sllp = slbenc;
332 def->penc = lpenc;
333 /* We don't know for sure what's up with tlbiel, so
334 * for now we only set it for 4K and 64K pages
335 */
336 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
337 def->tlbiel = 1;
338 else
339 def->tlbiel = 0;
340
341 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
342 "tlbiel=%d, penc=%d\n",
343 idx, shift, def->sllp, def->avpnm, def->tlbiel,
344 def->penc);
345 }
346 return 1;
347 }
348 return 0;
349}
350
Tony Breedse16a9c02008-07-31 13:51:42 +1000351#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700352/* Scan for 16G memory blocks that have been set aside for huge pages
353 * and reserve those blocks for 16G huge pages.
354 */
355static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
356 const char *uname, int depth,
357 void *data) {
358 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
359 unsigned long *addr_prop;
360 u32 *page_count_prop;
361 unsigned int expected_pages;
362 long unsigned int phys_addr;
363 long unsigned int block_size;
364
365 /* We are scanning "memory" nodes only */
366 if (type == NULL || strcmp(type, "memory") != 0)
367 return 0;
368
369 /* This property is the log base 2 of the number of virtual pages that
370 * will represent this memory block. */
371 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
372 if (page_count_prop == NULL)
373 return 0;
374 expected_pages = (1 << page_count_prop[0]);
375 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
376 if (addr_prop == NULL)
377 return 0;
378 phys_addr = addr_prop[0];
379 block_size = addr_prop[1];
380 if (block_size != (16 * GB))
381 return 0;
382 printk(KERN_INFO "Huge page(16GB) memory: "
383 "addr = 0x%lX size = 0x%lX pages = %d\n",
384 phys_addr, block_size, expected_pages);
Jon Tollefson4792adb2008-10-21 15:27:36 +0000385 if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
386 lmb_reserve(phys_addr, block_size * expected_pages);
387 add_gpage(phys_addr, block_size, expected_pages);
388 }
Jon Tollefson658013e2008-07-23 21:27:54 -0700389 return 0;
390}
Tony Breedse16a9c02008-07-31 13:51:42 +1000391#endif /* CONFIG_HUGETLB_PAGE */
Jon Tollefson658013e2008-07-23 21:27:54 -0700392
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100393static void __init htab_init_page_sizes(void)
394{
395 int rc;
396
397 /* Default to 4K pages only */
398 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
399 sizeof(mmu_psize_defaults_old));
400
401 /*
402 * Try to find the available page sizes in the device-tree
403 */
404 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
405 if (rc != 0) /* Found */
406 goto found;
407
408 /*
409 * Not in the device-tree, let's fallback on known size
410 * list for 16M capable GP & GR
411 */
Stephen Rothwell04704662006-11-30 11:46:22 +1100412 if (cpu_has_feature(CPU_FTR_16M_PAGE))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100413 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
414 sizeof(mmu_psize_defaults_gp));
415 found:
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000416#ifndef CONFIG_DEBUG_PAGEALLOC
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100417 /*
418 * Pick a size for the linear mapping. Currently, we only support
419 * 16M, 1M and 4K which is the default
420 */
421 if (mmu_psize_defs[MMU_PAGE_16M].shift)
422 mmu_linear_psize = MMU_PAGE_16M;
423 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
424 mmu_linear_psize = MMU_PAGE_1M;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000425#endif /* CONFIG_DEBUG_PAGEALLOC */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100426
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000427#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100428 /*
429 * Pick a size for the ordinary pages. Default is 4K, we support
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000430 * 64K for user mappings and vmalloc if supported by the processor.
431 * We only use 64k for ioremap if the processor
432 * (and firmware) support cache-inhibited large pages.
433 * If not, we use 4k and set mmu_ci_restrictions so that
434 * hash_page knows to switch processes that use cache-inhibited
435 * mappings to 4k pages.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100436 */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000437 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100438 mmu_virtual_psize = MMU_PAGE_64K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000439 mmu_vmalloc_psize = MMU_PAGE_64K;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000440 if (mmu_linear_psize == MMU_PAGE_4K)
441 mmu_linear_psize = MMU_PAGE_64K;
Paul Mackerrascfe666b2008-03-24 17:41:22 +1100442 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
443 /*
444 * Don't use 64k pages for ioremap on pSeries, since
445 * that would stop us accessing the HEA ethernet.
446 */
447 if (!machine_is(pseries))
448 mmu_io_psize = MMU_PAGE_64K;
449 } else
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000450 mmu_ci_restrictions = 1;
451 }
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000452#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100453
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000454#ifdef CONFIG_SPARSEMEM_VMEMMAP
455 /* We try to use 16M pages for vmemmap if that is supported
456 * and we have at least 1G of RAM at boot
457 */
458 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
459 lmb_phys_mem_size() >= 0x40000000)
460 mmu_vmemmap_psize = MMU_PAGE_16M;
461 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
462 mmu_vmemmap_psize = MMU_PAGE_64K;
463 else
464 mmu_vmemmap_psize = MMU_PAGE_4K;
465#endif /* CONFIG_SPARSEMEM_VMEMMAP */
466
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000467 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000468 "virtual = %d, io = %d"
469#ifdef CONFIG_SPARSEMEM_VMEMMAP
470 ", vmemmap = %d"
471#endif
472 "\n",
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100473 mmu_psize_defs[mmu_linear_psize].shift,
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000474 mmu_psize_defs[mmu_virtual_psize].shift,
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000475 mmu_psize_defs[mmu_io_psize].shift
476#ifdef CONFIG_SPARSEMEM_VMEMMAP
477 ,mmu_psize_defs[mmu_vmemmap_psize].shift
478#endif
479 );
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100480
481#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700482 /* Reserve 16G huge page memory sections for huge pages */
483 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
484
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700485/* Set default large page size. Currently, we pick 16M or 1M depending
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100486 * on what is available
487 */
488 if (mmu_psize_defs[MMU_PAGE_16M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700489 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
David Gibson7d24f0b2005-11-07 00:57:52 -0800490 /* With 4k/4level pagetables, we can't (for now) cope with a
491 * huge page size < PMD_SIZE */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100492 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700493 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100494#endif /* CONFIG_HUGETLB_PAGE */
495}
496
497static int __init htab_dt_scan_pftsize(unsigned long node,
498 const char *uname, int depth,
499 void *data)
500{
501 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
502 u32 *prop;
503
504 /* We are scanning "cpu" nodes only */
505 if (type == NULL || strcmp(type, "cpu") != 0)
506 return 0;
507
508 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
509 if (prop != NULL) {
510 /* pft_size[0] is the NUMA CEC cookie */
511 ppc64_pft_size = prop[1];
512 return 1;
513 }
514 return 0;
515}
516
517static unsigned long __init htab_get_table_size(void)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000518{
Anton Blanchard13870b62009-02-13 11:57:30 +0000519 unsigned long mem_size, rnd_mem_size, pteg_count, psize;
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000520
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100521 /* If hash size isn't already provided by the platform, we try to
Adrian Bunk943ffb52006-01-10 00:10:13 +0100522 * retrieve it from the device-tree. If it's not there neither, we
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100523 * calculate it now based on the total RAM size
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000524 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100525 if (ppc64_pft_size == 0)
526 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000527 if (ppc64_pft_size)
528 return 1UL << ppc64_pft_size;
529
530 /* round mem_size up to next power of 2 */
Paul Mackerras799d6042005-11-10 13:37:51 +1100531 mem_size = lmb_phys_mem_size();
532 rnd_mem_size = 1UL << __ilog2(mem_size);
533 if (rnd_mem_size < mem_size)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000534 rnd_mem_size <<= 1;
535
536 /* # pages / 2 */
Anton Blanchard13870b62009-02-13 11:57:30 +0000537 psize = mmu_psize_defs[mmu_virtual_psize].shift;
538 pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000539
540 return pteg_count << 7;
541}
542
Mike Kravetz54b79242005-11-07 16:25:48 -0800543#ifdef CONFIG_MEMORY_HOTPLUG
544void create_section_mapping(unsigned long start, unsigned long end)
545{
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000546 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
David Gibsonf5ea64d2008-10-12 17:54:24 +0000547 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000548 mmu_kernel_ssize));
Mike Kravetz54b79242005-11-07 16:25:48 -0800549}
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100550
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100551int remove_section_mapping(unsigned long start, unsigned long end)
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100552{
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100553 return htab_remove_mapping(start, end, mmu_linear_psize,
554 mmu_kernel_ssize);
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100555}
Mike Kravetz54b79242005-11-07 16:25:48 -0800556#endif /* CONFIG_MEMORY_HOTPLUG */
557
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000558static inline void make_bl(unsigned int *insn_addr, void *func)
559{
560 unsigned long funcp = *((unsigned long *)func);
561 int offset = funcp - (unsigned long)insn_addr;
562
563 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
564 flush_icache_range((unsigned long)insn_addr, 4+
565 (unsigned long)insn_addr);
566}
567
568static void __init htab_finish_init(void)
569{
570 extern unsigned int *htab_call_hpte_insert1;
571 extern unsigned int *htab_call_hpte_insert2;
572 extern unsigned int *htab_call_hpte_remove;
573 extern unsigned int *htab_call_hpte_updatepp;
574
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000575#ifdef CONFIG_PPC_HAS_HASH_64K
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000576 extern unsigned int *ht64_call_hpte_insert1;
577 extern unsigned int *ht64_call_hpte_insert2;
578 extern unsigned int *ht64_call_hpte_remove;
579 extern unsigned int *ht64_call_hpte_updatepp;
580
581 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
582 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
583 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
584 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
Jon Tollefson5b825832007-05-17 04:43:02 +1000585#endif /* CONFIG_PPC_HAS_HASH_64K */
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000586
587 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
588 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
589 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
590 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593void __init htab_initialize(void)
594{
Michael Ellerman337a7122006-02-21 17:22:55 +1100595 unsigned long table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 unsigned long pteg_count;
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000597 unsigned long prot;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100598 unsigned long base = 0, size = 0, limit;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100599 int i;
600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 DBG(" -> htab_initialize()\n");
602
Paul Mackerras1189be62007-10-11 20:37:10 +1000603 /* Initialize segment sizes */
604 htab_init_seg_sizes();
605
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100606 /* Initialize page sizes */
607 htab_init_page_sizes();
608
Paul Mackerras1189be62007-10-11 20:37:10 +1000609 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
610 mmu_kernel_ssize = MMU_SEGSIZE_1T;
611 mmu_highuser_ssize = MMU_SEGSIZE_1T;
612 printk(KERN_INFO "Using 1TB segments\n");
613 }
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 /*
616 * Calculate the required size of the htab. We want the number of
617 * PTEGs to equal one half the number of real pages.
618 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100619 htab_size_bytes = htab_get_table_size();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 pteg_count = htab_size_bytes >> 7;
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 htab_hash_mask = pteg_count - 1;
623
Michael Ellerman57cfb812006-03-21 20:45:59 +1100624 if (firmware_has_feature(FW_FEATURE_LPAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /* Using a hypervisor which owns the htab */
626 htab_address = NULL;
627 _SDR1 = 0;
628 } else {
629 /* Find storage for the HPT. Must be contiguous in
Michael Ellerman41d824b2008-01-30 01:13:59 +1100630 * the absolute address space. On cell we want it to be
Michael Ellerman31bf1112008-03-12 18:03:24 +1100631 * in the first 2 Gig so we can use it for IOMMU hacks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 */
Michael Ellerman41d824b2008-01-30 01:13:59 +1100633 if (machine_is(cell))
Michael Ellerman31bf1112008-03-12 18:03:24 +1100634 limit = 0x80000000;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100635 else
636 limit = 0;
637
638 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 DBG("Hash table allocated at %lx, size: %lx\n", table,
641 htab_size_bytes);
642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 htab_address = abs_to_virt(table);
644
645 /* htab absolute addr + encoded htabsize */
646 _SDR1 = table + __ilog2(pteg_count) - 11;
647
648 /* Initialize the HPT with no entries */
649 memset((void *)table, 0, htab_size_bytes);
Paul Mackerras799d6042005-11-10 13:37:51 +1100650
651 /* Set SDR1 */
652 mtspr(SPRN_SDR1, _SDR1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
David Gibsonf5ea64d2008-10-12 17:54:24 +0000655 prot = pgprot_val(PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000657#ifdef CONFIG_DEBUG_PAGEALLOC
658 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
659 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
660 1, lmb.rmo_size));
661 memset(linear_map_hash_slots, 0, linear_map_hash_count);
662#endif /* CONFIG_DEBUG_PAGEALLOC */
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 /* On U3 based machines, we need to reserve the DART area and
665 * _NOT_ map it to avoid cache paradoxes as it's remapped non
666 * cacheable later on
667 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 /* create bolted the linear mapping in the hash table */
670 for (i=0; i < lmb.memory.cnt; i++) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600671 base = (unsigned long)__va(lmb.memory.region[i].base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 size = lmb.memory.region[i].size;
673
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000674 DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000675 base, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677#ifdef CONFIG_U3_DART
678 /* Do not map the DART space. Fortunately, it will be aligned
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100679 * in such a way that it will not cross two lmb regions and
680 * will fit within a single 16Mb page.
681 * The DART space is assumed to be a full 16Mb region even if
682 * we only use 2Mb of that space. We will use more of it later
683 * for AGP GART. We have to use a full 16Mb large page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 */
685 DBG("DART base: %lx\n", dart_tablebase);
686
687 if (dart_tablebase != 0 && dart_tablebase >= base
688 && dart_tablebase < (base + size)) {
Michael Ellermancaf80e52006-03-21 20:45:51 +1100689 unsigned long dart_table_end = dart_tablebase + 16 * MB;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (base != dart_tablebase)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100691 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000692 __pa(base), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000693 mmu_linear_psize,
694 mmu_kernel_ssize));
Michael Ellermancaf80e52006-03-21 20:45:51 +1100695 if ((base + size) > dart_table_end)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100696 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
Michael Ellermancaf80e52006-03-21 20:45:51 +1100697 base + size,
698 __pa(dart_table_end),
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000699 prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000700 mmu_linear_psize,
701 mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 continue;
703 }
704#endif /* CONFIG_U3_DART */
Michael Ellermancaf80e52006-03-21 20:45:51 +1100705 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000706 prot, mmu_linear_psize, mmu_kernel_ssize));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
709 /*
710 * If we have a memory_limit and we've allocated TCEs then we need to
711 * explicitly map the TCE area at the top of RAM. We also cope with the
712 * case that the TCEs start below memory_limit.
713 * tce_alloc_start/end are 16MB aligned so the mapping should work
714 * for either 4K or 16MB pages.
715 */
716 if (tce_alloc_start) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600717 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
718 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 if (base + size >= tce_alloc_start)
721 tce_alloc_start = base + size + 1;
722
Michael Ellermancaf80e52006-03-21 20:45:51 +1100723 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000724 __pa(tce_alloc_start), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000725 mmu_linear_psize, mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
727
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000728 htab_finish_init();
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 DBG(" <- htab_initialize()\n");
731}
732#undef KB
733#undef MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Anton Blancharde597cb32005-12-29 10:46:29 +1100735void htab_initialize_secondary(void)
Paul Mackerras799d6042005-11-10 13:37:51 +1100736{
Michael Ellerman57cfb812006-03-21 20:45:59 +1100737 if (!firmware_has_feature(FW_FEATURE_LPAR))
Paul Mackerras799d6042005-11-10 13:37:51 +1100738 mtspr(SPRN_SDR1, _SDR1);
739}
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741/*
742 * Called by asm hashtable.S for doing lazy icache flush
743 */
744unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
745{
746 struct page *page;
747
Benjamin Herrenschmidt76c8e252005-11-08 11:21:05 +1100748 if (!pfn_valid(pte_pfn(pte)))
749 return pp;
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 page = pte_page(pte);
752
753 /* page is dirty */
754 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
755 if (trap == 0x400) {
756 __flush_dcache_icache(page_address(page));
757 set_bit(PG_arch_1, &page->flags);
758 } else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100759 pp |= HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
761 return pp;
762}
763
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000764#ifdef CONFIG_PPC_MM_SLICES
765unsigned int get_paca_psize(unsigned long addr)
766{
767 unsigned long index, slices;
768
769 if (addr < SLICE_LOW_TOP) {
770 slices = get_paca()->context.low_slices_psize;
771 index = GET_LOW_SLICE_INDEX(addr);
772 } else {
773 slices = get_paca()->context.high_slices_psize;
774 index = GET_HIGH_SLICE_INDEX(addr);
775 }
776 return (slices >> (index * 4)) & 0xF;
777}
778
779#else
780unsigned int get_paca_psize(unsigned long addr)
781{
782 return get_paca()->context.user_psize;
783}
784#endif
785
Paul Mackerras721151d2007-04-03 21:24:02 +1000786/*
787 * Demote a segment to using 4k pages.
788 * For now this makes the whole process use 4k pages.
789 */
Paul Mackerras721151d2007-04-03 21:24:02 +1000790#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerrasfa282372008-01-24 08:35:13 +1100791void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000792{
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000793 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
Paul Mackerras721151d2007-04-03 21:24:02 +1000794 return;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000795 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000796#ifdef CONFIG_SPU_BASE
Paul Mackerras721151d2007-04-03 21:24:02 +1000797 spu_flush_all_slbs(mm);
798#endif
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000799 if (get_paca_psize(addr) != MMU_PAGE_4K) {
Paul Mackerrasfa282372008-01-24 08:35:13 +1100800 get_paca()->context = mm->context;
801 slb_flush_and_rebolt();
802 }
Paul Mackerras721151d2007-04-03 21:24:02 +1000803}
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000804#endif /* CONFIG_PPC_64K_PAGES */
Paul Mackerras721151d2007-04-03 21:24:02 +1000805
Paul Mackerrasfa282372008-01-24 08:35:13 +1100806#ifdef CONFIG_PPC_SUBPAGE_PROT
807/*
808 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
809 * Userspace sets the subpage permissions using the subpage_prot system call.
810 *
811 * Result is 0: full permissions, _PAGE_RW: read-only,
812 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
813 */
814static int subpage_protection(pgd_t *pgdir, unsigned long ea)
815{
816 struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
817 u32 spp = 0;
818 u32 **sbpm, *sbpp;
819
820 if (ea >= spt->maxaddr)
821 return 0;
822 if (ea < 0x100000000) {
823 /* addresses below 4GB use spt->low_prot */
824 sbpm = spt->low_prot;
825 } else {
826 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
827 if (!sbpm)
828 return 0;
829 }
830 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
831 if (!sbpp)
832 return 0;
833 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
834
835 /* extract 2-bit bitfield for this 4k subpage */
836 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
837
838 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
839 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
840 return spp;
841}
842
843#else /* CONFIG_PPC_SUBPAGE_PROT */
844static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
845{
846 return 0;
847}
848#endif
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850/* Result code is:
851 * 0 - handled
852 * 1 - normal page fault
853 * -1 - critical hash insertion error
Paul Mackerrasfa282372008-01-24 08:35:13 +1100854 * -2 - access not permitted by subpage protection mechanism
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 */
856int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
857{
858 void *pgdir;
859 unsigned long vsid;
860 struct mm_struct *mm;
861 pte_t *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 cpumask_t tmp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100863 int rc, user_region = 0, local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +1000864 int psize, ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100866 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
867 ea, access, trap);
David Gibson1f8d4192005-05-05 16:15:13 -0700868
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100869 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
870 DBG_LOW(" out of pgtable range !\n");
871 return 1;
872 }
873
874 /* Get region & vsid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 switch (REGION_ID(ea)) {
876 case USER_REGION_ID:
877 user_region = 1;
878 mm = current->mm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100879 if (! mm) {
880 DBG_LOW(" user region with no mm !\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 return 1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100882 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000883 psize = get_slice_psize(mm, ea);
Paul Mackerras1189be62007-10-11 20:37:10 +1000884 ssize = user_segment_size(ea);
885 vsid = get_vsid(mm->context.id, ea, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 case VMALLOC_REGION_ID:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 mm = &init_mm;
Paul Mackerras1189be62007-10-11 20:37:10 +1000889 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000890 if (ea < VMALLOC_END)
891 psize = mmu_vmalloc_psize;
892 else
893 psize = mmu_io_psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000894 ssize = mmu_kernel_ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 default:
897 /* Not a valid range
898 * Send the problem up to do_page_fault
899 */
900 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100902 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100904 /* Get pgdir */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 pgdir = mm->pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (pgdir == NULL)
907 return 1;
908
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100909 /* Check CPU locality */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 tmp = cpumask_of_cpu(smp_processor_id());
911 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
912 local = 1;
913
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000914#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100915 /* Handle hugepage regions */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700916 if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100917 DBG_LOW(" -> huge page !\n");
David Gibsoncbf52af2005-12-09 14:20:52 +1100918 return hash_huge_page(mm, access, ea, vsid, local, trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000920#endif /* CONFIG_HUGETLB_PAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000922#ifndef CONFIG_PPC_64K_PAGES
923 /* If we use 4K pages and our psize is not 4K, then we are hitting
924 * a special driver mapping, we need to align the address before
925 * we fetch the PTE
926 */
927 if (psize != MMU_PAGE_4K)
928 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
929#endif /* CONFIG_PPC_64K_PAGES */
930
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100931 /* Get PTE and page size from page tables */
932 ptep = find_linux_pte(pgdir, ea);
933 if (ptep == NULL || !pte_present(*ptep)) {
934 DBG_LOW(" no PTE !\n");
935 return 1;
936 }
937
938#ifndef CONFIG_PPC_64K_PAGES
939 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
940#else
941 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
942 pte_val(*(ptep + PTRS_PER_PTE)));
943#endif
944 /* Pre-check access permissions (will be re-checked atomically
945 * in __hash_page_XX but this pre-check is a fast path
946 */
947 if (access & ~pte_val(*ptep)) {
948 DBG_LOW(" no access !\n");
949 return 1;
950 }
951
952 /* Do actual hashing */
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000953#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerras721151d2007-04-03 21:24:02 +1000954 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000955 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
Paul Mackerras721151d2007-04-03 21:24:02 +1000956 demote_segment_4k(mm, ea);
957 psize = MMU_PAGE_4K;
958 }
959
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000960 /* If this PTE is non-cacheable and we have restrictions on
961 * using non cacheable large pages, then we switch to 4k
962 */
963 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
964 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
965 if (user_region) {
966 demote_segment_4k(mm, ea);
967 psize = MMU_PAGE_4K;
968 } else if (ea < VMALLOC_END) {
969 /*
970 * some driver did a non-cacheable mapping
971 * in vmalloc space, so switch vmalloc
972 * to 4k pages
973 */
974 printk(KERN_ALERT "Reducing vmalloc segment "
975 "to 4kB pages because of "
976 "non-cacheable mapping\n");
977 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000978#ifdef CONFIG_SPU_BASE
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100979 spu_flush_all_slbs(mm);
980#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000981 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000982 }
983 if (user_region) {
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000984 if (psize != get_paca_psize(ea)) {
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +1100985 get_paca()->context = mm->context;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000986 slb_flush_and_rebolt();
987 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000988 } else if (get_paca()->vmalloc_sllp !=
989 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
990 get_paca()->vmalloc_sllp =
991 mmu_psize_defs[mmu_vmalloc_psize].sllp;
Michael Neuling67439b72007-08-03 11:55:39 +1000992 slb_vmalloc_update();
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000993 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000994#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000995
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000996#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000997 if (psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +1000998 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100999 else
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001000#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001001 {
1002 int spp = subpage_protection(pgdir, ea);
1003 if (access & spp)
1004 rc = -2;
1005 else
1006 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1007 local, ssize, spp);
1008 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001009
1010#ifndef CONFIG_PPC_64K_PAGES
1011 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1012#else
1013 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1014 pte_val(*(ptep + PTRS_PER_PTE)));
1015#endif
1016 DBG_LOW(" -> rc=%d\n", rc);
1017 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
Arnd Bergmann67207b92005-11-15 15:53:48 -05001019EXPORT_SYMBOL_GPL(hash_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001021void hash_preload(struct mm_struct *mm, unsigned long ea,
1022 unsigned long access, unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001024 unsigned long vsid;
1025 void *pgdir;
1026 pte_t *ptep;
1027 cpumask_t mask;
1028 unsigned long flags;
1029 int local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +10001030 int ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001032 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1033
1034#ifdef CONFIG_PPC_MM_SLICES
1035 /* We only prefault standard pages for now */
Ilpo Järvinen2b02d132007-08-16 08:03:35 +10001036 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001037 return;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001038#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001039
1040 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1041 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1042
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001043 /* Get Linux PTE if available */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001044 pgdir = mm->pgd;
1045 if (pgdir == NULL)
1046 return;
1047 ptep = find_linux_pte(pgdir, ea);
1048 if (!ptep)
1049 return;
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001050
1051#ifdef CONFIG_PPC_64K_PAGES
1052 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1053 * a 64K kernel), then we don't preload, hash_page() will take
1054 * care of it once we actually try to access the page.
1055 * That way we don't have to duplicate all of the logic for segment
1056 * page size demotion here
1057 */
1058 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1059 return;
1060#endif /* CONFIG_PPC_64K_PAGES */
1061
1062 /* Get VSID */
Paul Mackerras1189be62007-10-11 20:37:10 +10001063 ssize = user_segment_size(ea);
1064 vsid = get_vsid(mm->context.id, ea, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001065
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001066 /* Hash doesn't like irqs */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001067 local_irq_save(flags);
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001068
1069 /* Is that local to this CPU ? */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001070 mask = cpumask_of_cpu(smp_processor_id());
1071 if (cpus_equal(mm->cpu_vm_mask, mask))
1072 local = 1;
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001073
1074 /* Hash it in */
1075#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001076 if (mm->context.user_psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +10001077 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 else
Jon Tollefson5b825832007-05-17 04:43:02 +10001079#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001080 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1081 subpage_protection(pgdir, ea));
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001082
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001083 local_irq_restore(flags);
1084}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +11001086/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1087 * do not forget to update the assembly call site !
1088 */
Paul Mackerras1189be62007-10-11 20:37:10 +10001089void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1090 int local)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001091{
1092 unsigned long hash, index, shift, hidx, slot;
1093
1094 DBG_LOW("flush_hash_page(va=%016x)\n", va);
1095 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
Paul Mackerras1189be62007-10-11 20:37:10 +10001096 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001097 hidx = __rpte_to_hidx(pte, index);
1098 if (hidx & _PTEIDX_SECONDARY)
1099 hash = ~hash;
1100 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1101 slot += hidx & _PTEIDX_GROUP_IX;
1102 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
Paul Mackerras1189be62007-10-11 20:37:10 +10001103 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001104 } pte_iterate_hashed_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105}
1106
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001107void flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001109 if (ppc_md.flush_hash_range)
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001110 ppc_md.flush_hash_range(number, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001111 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 int i;
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001113 struct ppc64_tlb_batch *batch =
1114 &__get_cpu_var(ppc64_tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 for (i = 0; i < number; i++)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001117 flush_hash_page(batch->vaddr[i], batch->pte[i],
Paul Mackerras1189be62007-10-11 20:37:10 +10001118 batch->psize, batch->ssize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
1120}
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122/*
1123 * low_hash_fault is called when we the low level hash code failed
1124 * to instert a PTE due to an hypervisor error
1125 */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001126void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127{
1128 if (user_mode(regs)) {
Paul Mackerrasfa282372008-01-24 08:35:13 +11001129#ifdef CONFIG_PPC_SUBPAGE_PROT
1130 if (rc == -2)
1131 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1132 else
1133#endif
1134 _exception(SIGBUS, regs, BUS_ADRERR, address);
1135 } else
1136 bad_page_fault(regs, address, SIGBUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137}
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001138
1139#ifdef CONFIG_DEBUG_PAGEALLOC
1140static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1141{
Paul Mackerras1189be62007-10-11 20:37:10 +10001142 unsigned long hash, hpteg;
1143 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1144 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +10001145 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001146 int ret;
1147
Paul Mackerras1189be62007-10-11 20:37:10 +10001148 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001149 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1150
1151 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
Paul Mackerras1189be62007-10-11 20:37:10 +10001152 mode, HPTE_V_BOLTED,
1153 mmu_linear_psize, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001154 BUG_ON (ret < 0);
1155 spin_lock(&linear_map_hash_lock);
1156 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1157 linear_map_hash_slots[lmi] = ret | 0x80;
1158 spin_unlock(&linear_map_hash_lock);
1159}
1160
1161static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1162{
Paul Mackerras1189be62007-10-11 20:37:10 +10001163 unsigned long hash, hidx, slot;
1164 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1165 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001166
Paul Mackerras1189be62007-10-11 20:37:10 +10001167 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001168 spin_lock(&linear_map_hash_lock);
1169 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1170 hidx = linear_map_hash_slots[lmi] & 0x7f;
1171 linear_map_hash_slots[lmi] = 0;
1172 spin_unlock(&linear_map_hash_lock);
1173 if (hidx & _PTEIDX_SECONDARY)
1174 hash = ~hash;
1175 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1176 slot += hidx & _PTEIDX_GROUP_IX;
Paul Mackerras1189be62007-10-11 20:37:10 +10001177 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001178}
1179
1180void kernel_map_pages(struct page *page, int numpages, int enable)
1181{
1182 unsigned long flags, vaddr, lmi;
1183 int i;
1184
1185 local_irq_save(flags);
1186 for (i = 0; i < numpages; i++, page++) {
1187 vaddr = (unsigned long)page_address(page);
1188 lmi = __pa(vaddr) >> PAGE_SHIFT;
1189 if (lmi >= linear_map_hash_count)
1190 continue;
1191 if (enable)
1192 kernel_map_linear_page(vaddr, lmi);
1193 else
1194 kernel_unmap_linear_page(vaddr, lmi);
1195 }
1196 local_irq_restore(flags);
1197}
1198#endif /* CONFIG_DEBUG_PAGEALLOC */