blob: d95d8f484d2f432109c51eedd36d3c583ff0b98e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110022#undef DEBUG_LOW
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100034#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/page.h>
41#include <asm/types.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/machdep.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080045#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/abs_addr.h>
47#include <asm/tlbflush.h>
48#include <asm/io.h>
49#include <asm/eeh.h>
50#include <asm/tlb.h>
51#include <asm/cacheflush.h>
52#include <asm/cputable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/sections.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100054#include <asm/spu.h>
will schmidtaa39be02007-10-30 06:24:19 +110055#include <asm/udbg.h>
Anton Blanchardb68a70c2011-04-04 23:56:18 +000056#include <asm/code-patching.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58#ifdef DEBUG
59#define DBG(fmt...) udbg_printf(fmt)
60#else
61#define DBG(fmt...)
62#endif
63
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110064#ifdef DEBUG_LOW
65#define DBG_LOW(fmt...) udbg_printf(fmt)
66#else
67#define DBG_LOW(fmt...)
68#endif
69
70#define KB (1024)
71#define MB (1024*KB)
Jon Tollefson658013e2008-07-23 21:27:54 -070072#define GB (1024L*MB)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110073
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/*
75 * Note: pte --> Linux PTE
76 * HPTE --> PowerPC Hashed Page Table Entry
77 *
78 * Execution context:
79 * htab_initialize is called with the MMU off (of course), but
80 * the kernel has been copied down to zero so it can directly
81 * reference global data. At this point it is very difficult
82 * to print debug info.
83 *
84 */
85
86#ifdef CONFIG_U3_DART
87extern unsigned long dart_tablebase;
88#endif /* CONFIG_U3_DART */
89
Paul Mackerras799d6042005-11-10 13:37:51 +110090static unsigned long _SDR1;
91struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
92
David Gibson8e561e72007-06-13 14:52:56 +100093struct hash_pte *htab_address;
Michael Ellerman337a7122006-02-21 17:22:55 +110094unsigned long htab_size_bytes;
David Gibson96e28442005-07-13 01:11:42 -070095unsigned long htab_hash_mask;
Alexander Graf4ab79aa2009-10-30 05:47:19 +000096EXPORT_SYMBOL_GPL(htab_hash_mask);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110097int mmu_linear_psize = MMU_PAGE_4K;
98int mmu_virtual_psize = MMU_PAGE_4K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100099int mmu_vmalloc_psize = MMU_PAGE_4K;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000100#ifdef CONFIG_SPARSEMEM_VMEMMAP
101int mmu_vmemmap_psize = MMU_PAGE_4K;
102#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000103int mmu_io_psize = MMU_PAGE_4K;
Paul Mackerras1189be62007-10-11 20:37:10 +1000104int mmu_kernel_ssize = MMU_SEGSIZE_256M;
105int mmu_highuser_ssize = MMU_SEGSIZE_256M;
Michael Neuling584f8b72007-12-06 17:24:48 +1100106u16 mmu_slb_size = 64;
Alexander Graf4ab79aa2009-10-30 05:47:19 +0000107EXPORT_SYMBOL_GPL(mmu_slb_size);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100108#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100109unsigned int HPAGE_SHIFT;
110#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000111#ifdef CONFIG_PPC_64K_PAGES
112int mmu_ci_restrictions;
113#endif
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000114#ifdef CONFIG_DEBUG_PAGEALLOC
115static u8 *linear_map_hash_slots;
116static unsigned long linear_map_hash_count;
Michael Ellermaned166692007-04-18 11:50:09 +1000117static DEFINE_SPINLOCK(linear_map_hash_lock);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000118#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100120/* There are definitions of page sizes arrays to be used when none
121 * is provided by the firmware.
122 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100124/* Pre-POWER4 CPUs (4k pages only)
125 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000126static struct mmu_psize_def mmu_psize_defaults_old[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100127 [MMU_PAGE_4K] = {
128 .shift = 12,
129 .sllp = 0,
130 .penc = 0,
131 .avpnm = 0,
132 .tlbiel = 0,
133 },
134};
135
136/* POWER4, GPUL, POWER5
137 *
138 * Support for 16Mb large pages
139 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000140static struct mmu_psize_def mmu_psize_defaults_gp[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100141 [MMU_PAGE_4K] = {
142 .shift = 12,
143 .sllp = 0,
144 .penc = 0,
145 .avpnm = 0,
146 .tlbiel = 1,
147 },
148 [MMU_PAGE_16M] = {
149 .shift = 24,
150 .sllp = SLB_VSID_L,
151 .penc = 0,
152 .avpnm = 0x1UL,
153 .tlbiel = 0,
154 },
155};
156
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000157static unsigned long htab_convert_pte_flags(unsigned long pteflags)
158{
159 unsigned long rflags = pteflags & 0x1fa;
160
161 /* _PAGE_EXEC -> NOEXEC */
162 if ((pteflags & _PAGE_EXEC) == 0)
163 rflags |= HPTE_R_N;
164
165 /* PP bits. PAGE_USER is already PP bit 0x2, so we only
166 * need to add in 0x1 if it's a read-only user page
167 */
168 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
169 (pteflags & _PAGE_DIRTY)))
170 rflags |= 1;
171
172 /* Always add C */
173 return rflags | HPTE_R_C;
174}
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100175
176int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000177 unsigned long pstart, unsigned long prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000178 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100180 unsigned long vaddr, paddr;
181 unsigned int step, shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100182 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100184 shift = mmu_psize_defs[psize].shift;
185 step = 1 << shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000187 prot = htab_convert_pte_flags(prot);
188
189 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
190 vstart, vend, pstart, prot, psize, ssize);
191
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100192 for (vaddr = vstart, paddr = pstart; vaddr < vend;
193 vaddr += step, paddr += step) {
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000194 unsigned long hash, hpteg;
Paul Mackerras1189be62007-10-11 20:37:10 +1000195 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
196 unsigned long va = hpt_va(vaddr, vsid, ssize);
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000197 unsigned long tprot = prot;
198
199 /* Make kernel text executable */
Paul Mackerras549e8152008-08-30 11:43:47 +1000200 if (overlaps_kernel_text(vaddr, vaddr + step))
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000201 tprot &= ~HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Paul Mackerras1189be62007-10-11 20:37:10 +1000203 hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
205
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000206 BUG_ON(!ppc_md.hpte_insert);
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000207 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000208 HPTE_V_BOLTED, psize, ssize);
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000209
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100210 if (ret < 0)
211 break;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000212#ifdef CONFIG_DEBUG_PAGEALLOC
213 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
214 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
215#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100217 return ret < 0 ? ret : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Stephen Rothwellae86f002008-03-27 16:08:57 +1100220#ifdef CONFIG_MEMORY_HOTPLUG
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100221static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100222 int psize, int ssize)
223{
224 unsigned long vaddr;
225 unsigned int step, shift;
226
227 shift = mmu_psize_defs[psize].shift;
228 step = 1 << shift;
229
230 if (!ppc_md.hpte_removebolted) {
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100231 printk(KERN_WARNING "Platform doesn't implement "
232 "hpte_removebolted\n");
233 return -EINVAL;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100234 }
235
236 for (vaddr = vstart; vaddr < vend; vaddr += step)
237 ppc_md.hpte_removebolted(vaddr, psize, ssize);
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100238
239 return 0;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100240}
Stephen Rothwellae86f002008-03-27 16:08:57 +1100241#endif /* CONFIG_MEMORY_HOTPLUG */
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100242
Paul Mackerras1189be62007-10-11 20:37:10 +1000243static int __init htab_dt_scan_seg_sizes(unsigned long node,
244 const char *uname, int depth,
245 void *data)
246{
247 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
248 u32 *prop;
249 unsigned long size = 0;
250
251 /* We are scanning "cpu" nodes only */
252 if (type == NULL || strcmp(type, "cpu") != 0)
253 return 0;
254
255 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
256 &size);
257 if (prop == NULL)
258 return 0;
259 for (; size >= 4; size -= 4, ++prop) {
260 if (prop[0] == 40) {
261 DBG("1T segment support detected\n");
262 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
Olof Johanssonf5534002007-10-12 16:44:55 +1000263 return 1;
Paul Mackerras1189be62007-10-11 20:37:10 +1000264 }
Paul Mackerras1189be62007-10-11 20:37:10 +1000265 }
Olof Johanssonf66bce52007-10-16 00:58:59 +1000266 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
Paul Mackerras1189be62007-10-11 20:37:10 +1000267 return 0;
268}
269
270static void __init htab_init_seg_sizes(void)
271{
272 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
273}
274
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100275static int __init htab_dt_scan_page_sizes(unsigned long node,
276 const char *uname, int depth,
277 void *data)
278{
279 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
280 u32 *prop;
281 unsigned long size = 0;
282
283 /* We are scanning "cpu" nodes only */
284 if (type == NULL || strcmp(type, "cpu") != 0)
285 return 0;
286
287 prop = (u32 *)of_get_flat_dt_prop(node,
288 "ibm,segment-page-sizes", &size);
289 if (prop != NULL) {
290 DBG("Page sizes from device-tree:\n");
291 size /= 4;
292 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
293 while(size > 0) {
294 unsigned int shift = prop[0];
295 unsigned int slbenc = prop[1];
296 unsigned int lpnum = prop[2];
297 unsigned int lpenc = 0;
298 struct mmu_psize_def *def;
299 int idx = -1;
300
301 size -= 3; prop += 3;
302 while(size > 0 && lpnum) {
303 if (prop[0] == shift)
304 lpenc = prop[1];
305 prop += 2; size -= 2;
306 lpnum--;
307 }
308 switch(shift) {
309 case 0xc:
310 idx = MMU_PAGE_4K;
311 break;
312 case 0x10:
313 idx = MMU_PAGE_64K;
314 break;
315 case 0x14:
316 idx = MMU_PAGE_1M;
317 break;
318 case 0x18:
319 idx = MMU_PAGE_16M;
320 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
321 break;
322 case 0x22:
323 idx = MMU_PAGE_16G;
324 break;
325 }
326 if (idx < 0)
327 continue;
328 def = &mmu_psize_defs[idx];
329 def->shift = shift;
330 if (shift <= 23)
331 def->avpnm = 0;
332 else
333 def->avpnm = (1 << (shift - 23)) - 1;
334 def->sllp = slbenc;
335 def->penc = lpenc;
336 /* We don't know for sure what's up with tlbiel, so
337 * for now we only set it for 4K and 64K pages
338 */
339 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
340 def->tlbiel = 1;
341 else
342 def->tlbiel = 0;
343
Sachin P. Sant5c339912009-12-13 21:15:12 +0000344 DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100345 "tlbiel=%d, penc=%d\n",
346 idx, shift, def->sllp, def->avpnm, def->tlbiel,
347 def->penc);
348 }
349 return 1;
350 }
351 return 0;
352}
353
Tony Breedse16a9c02008-07-31 13:51:42 +1000354#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700355/* Scan for 16G memory blocks that have been set aside for huge pages
356 * and reserve those blocks for 16G huge pages.
357 */
358static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
359 const char *uname, int depth,
360 void *data) {
361 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
362 unsigned long *addr_prop;
363 u32 *page_count_prop;
364 unsigned int expected_pages;
365 long unsigned int phys_addr;
366 long unsigned int block_size;
367
368 /* We are scanning "memory" nodes only */
369 if (type == NULL || strcmp(type, "memory") != 0)
370 return 0;
371
372 /* This property is the log base 2 of the number of virtual pages that
373 * will represent this memory block. */
374 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
375 if (page_count_prop == NULL)
376 return 0;
377 expected_pages = (1 << page_count_prop[0]);
378 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
379 if (addr_prop == NULL)
380 return 0;
381 phys_addr = addr_prop[0];
382 block_size = addr_prop[1];
383 if (block_size != (16 * GB))
384 return 0;
385 printk(KERN_INFO "Huge page(16GB) memory: "
386 "addr = 0x%lX size = 0x%lX pages = %d\n",
387 phys_addr, block_size, expected_pages);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000388 if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
389 memblock_reserve(phys_addr, block_size * expected_pages);
Jon Tollefson4792adb2008-10-21 15:27:36 +0000390 add_gpage(phys_addr, block_size, expected_pages);
391 }
Jon Tollefson658013e2008-07-23 21:27:54 -0700392 return 0;
393}
Tony Breedse16a9c02008-07-31 13:51:42 +1000394#endif /* CONFIG_HUGETLB_PAGE */
Jon Tollefson658013e2008-07-23 21:27:54 -0700395
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100396static void __init htab_init_page_sizes(void)
397{
398 int rc;
399
400 /* Default to 4K pages only */
401 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
402 sizeof(mmu_psize_defaults_old));
403
404 /*
405 * Try to find the available page sizes in the device-tree
406 */
407 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
408 if (rc != 0) /* Found */
409 goto found;
410
411 /*
412 * Not in the device-tree, let's fallback on known size
413 * list for 16M capable GP & GR
414 */
Stephen Rothwell04704662006-11-30 11:46:22 +1100415 if (cpu_has_feature(CPU_FTR_16M_PAGE))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100416 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
417 sizeof(mmu_psize_defaults_gp));
418 found:
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000419#ifndef CONFIG_DEBUG_PAGEALLOC
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100420 /*
421 * Pick a size for the linear mapping. Currently, we only support
422 * 16M, 1M and 4K which is the default
423 */
424 if (mmu_psize_defs[MMU_PAGE_16M].shift)
425 mmu_linear_psize = MMU_PAGE_16M;
426 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
427 mmu_linear_psize = MMU_PAGE_1M;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000428#endif /* CONFIG_DEBUG_PAGEALLOC */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100429
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000430#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100431 /*
432 * Pick a size for the ordinary pages. Default is 4K, we support
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000433 * 64K for user mappings and vmalloc if supported by the processor.
434 * We only use 64k for ioremap if the processor
435 * (and firmware) support cache-inhibited large pages.
436 * If not, we use 4k and set mmu_ci_restrictions so that
437 * hash_page knows to switch processes that use cache-inhibited
438 * mappings to 4k pages.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100439 */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000440 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100441 mmu_virtual_psize = MMU_PAGE_64K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000442 mmu_vmalloc_psize = MMU_PAGE_64K;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000443 if (mmu_linear_psize == MMU_PAGE_4K)
444 mmu_linear_psize = MMU_PAGE_64K;
Paul Mackerrascfe666b2008-03-24 17:41:22 +1100445 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
446 /*
447 * Don't use 64k pages for ioremap on pSeries, since
448 * that would stop us accessing the HEA ethernet.
449 */
450 if (!machine_is(pseries))
451 mmu_io_psize = MMU_PAGE_64K;
452 } else
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000453 mmu_ci_restrictions = 1;
454 }
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000455#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100456
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000457#ifdef CONFIG_SPARSEMEM_VMEMMAP
458 /* We try to use 16M pages for vmemmap if that is supported
459 * and we have at least 1G of RAM at boot
460 */
461 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
Yinghai Lu95f72d12010-07-12 14:36:09 +1000462 memblock_phys_mem_size() >= 0x40000000)
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000463 mmu_vmemmap_psize = MMU_PAGE_16M;
464 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
465 mmu_vmemmap_psize = MMU_PAGE_64K;
466 else
467 mmu_vmemmap_psize = MMU_PAGE_4K;
468#endif /* CONFIG_SPARSEMEM_VMEMMAP */
469
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000470 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000471 "virtual = %d, io = %d"
472#ifdef CONFIG_SPARSEMEM_VMEMMAP
473 ", vmemmap = %d"
474#endif
475 "\n",
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100476 mmu_psize_defs[mmu_linear_psize].shift,
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000477 mmu_psize_defs[mmu_virtual_psize].shift,
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000478 mmu_psize_defs[mmu_io_psize].shift
479#ifdef CONFIG_SPARSEMEM_VMEMMAP
480 ,mmu_psize_defs[mmu_vmemmap_psize].shift
481#endif
482 );
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100483
484#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700485 /* Reserve 16G huge page memory sections for huge pages */
486 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100487#endif /* CONFIG_HUGETLB_PAGE */
488}
489
490static int __init htab_dt_scan_pftsize(unsigned long node,
491 const char *uname, int depth,
492 void *data)
493{
494 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
495 u32 *prop;
496
497 /* We are scanning "cpu" nodes only */
498 if (type == NULL || strcmp(type, "cpu") != 0)
499 return 0;
500
501 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
502 if (prop != NULL) {
503 /* pft_size[0] is the NUMA CEC cookie */
504 ppc64_pft_size = prop[1];
505 return 1;
506 }
507 return 0;
508}
509
510static unsigned long __init htab_get_table_size(void)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000511{
Anton Blanchard13870b62009-02-13 11:57:30 +0000512 unsigned long mem_size, rnd_mem_size, pteg_count, psize;
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000513
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100514 /* If hash size isn't already provided by the platform, we try to
Adrian Bunk943ffb52006-01-10 00:10:13 +0100515 * retrieve it from the device-tree. If it's not there neither, we
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100516 * calculate it now based on the total RAM size
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000517 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100518 if (ppc64_pft_size == 0)
519 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000520 if (ppc64_pft_size)
521 return 1UL << ppc64_pft_size;
522
523 /* round mem_size up to next power of 2 */
Yinghai Lu95f72d12010-07-12 14:36:09 +1000524 mem_size = memblock_phys_mem_size();
Paul Mackerras799d6042005-11-10 13:37:51 +1100525 rnd_mem_size = 1UL << __ilog2(mem_size);
526 if (rnd_mem_size < mem_size)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000527 rnd_mem_size <<= 1;
528
529 /* # pages / 2 */
Anton Blanchard13870b62009-02-13 11:57:30 +0000530 psize = mmu_psize_defs[mmu_virtual_psize].shift;
531 pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000532
533 return pteg_count << 7;
534}
535
Mike Kravetz54b79242005-11-07 16:25:48 -0800536#ifdef CONFIG_MEMORY_HOTPLUG
537void create_section_mapping(unsigned long start, unsigned long end)
538{
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000539 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
David Gibsonf5ea64d2008-10-12 17:54:24 +0000540 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000541 mmu_kernel_ssize));
Mike Kravetz54b79242005-11-07 16:25:48 -0800542}
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100543
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100544int remove_section_mapping(unsigned long start, unsigned long end)
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100545{
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100546 return htab_remove_mapping(start, end, mmu_linear_psize,
547 mmu_kernel_ssize);
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100548}
Mike Kravetz54b79242005-11-07 16:25:48 -0800549#endif /* CONFIG_MEMORY_HOTPLUG */
550
Anton Blanchardb68a70c2011-04-04 23:56:18 +0000551#define FUNCTION_TEXT(A) ((*(unsigned long *)(A)))
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000552
553static void __init htab_finish_init(void)
554{
555 extern unsigned int *htab_call_hpte_insert1;
556 extern unsigned int *htab_call_hpte_insert2;
557 extern unsigned int *htab_call_hpte_remove;
558 extern unsigned int *htab_call_hpte_updatepp;
559
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000560#ifdef CONFIG_PPC_HAS_HASH_64K
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000561 extern unsigned int *ht64_call_hpte_insert1;
562 extern unsigned int *ht64_call_hpte_insert2;
563 extern unsigned int *ht64_call_hpte_remove;
564 extern unsigned int *ht64_call_hpte_updatepp;
565
Anton Blanchardb68a70c2011-04-04 23:56:18 +0000566 patch_branch(ht64_call_hpte_insert1,
567 FUNCTION_TEXT(ppc_md.hpte_insert),
568 BRANCH_SET_LINK);
569 patch_branch(ht64_call_hpte_insert2,
570 FUNCTION_TEXT(ppc_md.hpte_insert),
571 BRANCH_SET_LINK);
572 patch_branch(ht64_call_hpte_remove,
573 FUNCTION_TEXT(ppc_md.hpte_remove),
574 BRANCH_SET_LINK);
575 patch_branch(ht64_call_hpte_updatepp,
576 FUNCTION_TEXT(ppc_md.hpte_updatepp),
577 BRANCH_SET_LINK);
578
Jon Tollefson5b825832007-05-17 04:43:02 +1000579#endif /* CONFIG_PPC_HAS_HASH_64K */
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000580
Anton Blanchardb68a70c2011-04-04 23:56:18 +0000581 patch_branch(htab_call_hpte_insert1,
582 FUNCTION_TEXT(ppc_md.hpte_insert),
583 BRANCH_SET_LINK);
584 patch_branch(htab_call_hpte_insert2,
585 FUNCTION_TEXT(ppc_md.hpte_insert),
586 BRANCH_SET_LINK);
587 patch_branch(htab_call_hpte_remove,
588 FUNCTION_TEXT(ppc_md.hpte_remove),
589 BRANCH_SET_LINK);
590 patch_branch(htab_call_hpte_updatepp,
591 FUNCTION_TEXT(ppc_md.hpte_updatepp),
592 BRANCH_SET_LINK);
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000593}
594
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000595static void __init htab_initialize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
Michael Ellerman337a7122006-02-21 17:22:55 +1100597 unsigned long table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 unsigned long pteg_count;
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000599 unsigned long prot;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100600 unsigned long base = 0, size = 0, limit;
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000601 struct memblock_region *reg;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 DBG(" -> htab_initialize()\n");
604
Paul Mackerras1189be62007-10-11 20:37:10 +1000605 /* Initialize segment sizes */
606 htab_init_seg_sizes();
607
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100608 /* Initialize page sizes */
609 htab_init_page_sizes();
610
Paul Mackerras1189be62007-10-11 20:37:10 +1000611 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
612 mmu_kernel_ssize = MMU_SEGSIZE_1T;
613 mmu_highuser_ssize = MMU_SEGSIZE_1T;
614 printk(KERN_INFO "Using 1TB segments\n");
615 }
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 /*
618 * Calculate the required size of the htab. We want the number of
619 * PTEGs to equal one half the number of real pages.
620 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100621 htab_size_bytes = htab_get_table_size();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 pteg_count = htab_size_bytes >> 7;
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 htab_hash_mask = pteg_count - 1;
625
Michael Ellerman57cfb812006-03-21 20:45:59 +1100626 if (firmware_has_feature(FW_FEATURE_LPAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 /* Using a hypervisor which owns the htab */
628 htab_address = NULL;
629 _SDR1 = 0;
630 } else {
631 /* Find storage for the HPT. Must be contiguous in
Michael Ellerman41d824b2008-01-30 01:13:59 +1100632 * the absolute address space. On cell we want it to be
Michael Ellerman31bf1112008-03-12 18:03:24 +1100633 * in the first 2 Gig so we can use it for IOMMU hacks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 */
Michael Ellerman41d824b2008-01-30 01:13:59 +1100635 if (machine_is(cell))
Michael Ellerman31bf1112008-03-12 18:03:24 +1100636 limit = 0x80000000;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100637 else
Benjamin Herrenschmidt27f574c2010-07-06 15:39:00 -0700638 limit = MEMBLOCK_ALLOC_ANYWHERE;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100639
Yinghai Lu95f72d12010-07-12 14:36:09 +1000640 table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642 DBG("Hash table allocated at %lx, size: %lx\n", table,
643 htab_size_bytes);
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 htab_address = abs_to_virt(table);
646
647 /* htab absolute addr + encoded htabsize */
648 _SDR1 = table + __ilog2(pteg_count) - 11;
649
650 /* Initialize the HPT with no entries */
651 memset((void *)table, 0, htab_size_bytes);
Paul Mackerras799d6042005-11-10 13:37:51 +1100652
653 /* Set SDR1 */
654 mtspr(SPRN_SDR1, _SDR1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656
David Gibsonf5ea64d2008-10-12 17:54:24 +0000657 prot = pgprot_val(PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000659#ifdef CONFIG_DEBUG_PAGEALLOC
Yinghai Lu95f72d12010-07-12 14:36:09 +1000660 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
661 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700662 1, ppc64_rma_size));
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000663 memset(linear_map_hash_slots, 0, linear_map_hash_count);
664#endif /* CONFIG_DEBUG_PAGEALLOC */
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 /* On U3 based machines, we need to reserve the DART area and
667 * _NOT_ map it to avoid cache paradoxes as it's remapped non
668 * cacheable later on
669 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
671 /* create bolted the linear mapping in the hash table */
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000672 for_each_memblock(memory, reg) {
673 base = (unsigned long)__va(reg->base);
674 size = reg->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Sachin P. Sant5c339912009-12-13 21:15:12 +0000676 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000677 base, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679#ifdef CONFIG_U3_DART
680 /* Do not map the DART space. Fortunately, it will be aligned
Yinghai Lu95f72d12010-07-12 14:36:09 +1000681 * in such a way that it will not cross two memblock regions and
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100682 * will fit within a single 16Mb page.
683 * The DART space is assumed to be a full 16Mb region even if
684 * we only use 2Mb of that space. We will use more of it later
685 * for AGP GART. We have to use a full 16Mb large page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 */
687 DBG("DART base: %lx\n", dart_tablebase);
688
689 if (dart_tablebase != 0 && dart_tablebase >= base
690 && dart_tablebase < (base + size)) {
Michael Ellermancaf80e52006-03-21 20:45:51 +1100691 unsigned long dart_table_end = dart_tablebase + 16 * MB;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (base != dart_tablebase)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100693 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000694 __pa(base), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000695 mmu_linear_psize,
696 mmu_kernel_ssize));
Michael Ellermancaf80e52006-03-21 20:45:51 +1100697 if ((base + size) > dart_table_end)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100698 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
Michael Ellermancaf80e52006-03-21 20:45:51 +1100699 base + size,
700 __pa(dart_table_end),
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000701 prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000702 mmu_linear_psize,
703 mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 continue;
705 }
706#endif /* CONFIG_U3_DART */
Michael Ellermancaf80e52006-03-21 20:45:51 +1100707 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
Paul Mackerras9e88ba42008-08-30 11:26:27 +1000708 prot, mmu_linear_psize, mmu_kernel_ssize));
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700709 }
710 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 /*
713 * If we have a memory_limit and we've allocated TCEs then we need to
714 * explicitly map the TCE area at the top of RAM. We also cope with the
715 * case that the TCEs start below memory_limit.
716 * tce_alloc_start/end are 16MB aligned so the mapping should work
717 * for either 4K or 16MB pages.
718 */
719 if (tce_alloc_start) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600720 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
721 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 if (base + size >= tce_alloc_start)
724 tce_alloc_start = base + size + 1;
725
Michael Ellermancaf80e52006-03-21 20:45:51 +1100726 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000727 __pa(tce_alloc_start), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000728 mmu_linear_psize, mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
730
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000731 htab_finish_init();
732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 DBG(" <- htab_initialize()\n");
734}
735#undef KB
736#undef MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000738void __init early_init_mmu(void)
Paul Mackerras799d6042005-11-10 13:37:51 +1100739{
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000740 /* Setup initial STAB address in the PACA */
741 get_paca()->stab_real = __pa((u64)&initial_stab);
742 get_paca()->stab_addr = (u64)&initial_stab;
743
744 /* Initialize the MMU Hash table and create the linear mapping
745 * of memory. Has to be done before stab/slb initialization as
746 * this is currently where the page size encoding is obtained
747 */
748 htab_initialize();
749
750 /* Initialize stab / SLB management except on iSeries
751 */
752 if (cpu_has_feature(CPU_FTR_SLB))
753 slb_initialize();
754 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
755 stab_initialize(get_paca()->stab_real);
756}
757
758#ifdef CONFIG_SMP
Michael Ellerman24f1ce82009-04-16 04:47:32 +0000759void __cpuinit early_init_mmu_secondary(void)
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000760{
761 /* Initialize hash table for that CPU */
Michael Ellerman57cfb812006-03-21 20:45:59 +1100762 if (!firmware_has_feature(FW_FEATURE_LPAR))
Paul Mackerras799d6042005-11-10 13:37:51 +1100763 mtspr(SPRN_SDR1, _SDR1);
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000764
765 /* Initialize STAB/SLB. We use a virtual address as it works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300766 * in real mode on pSeries and we want a virtual address on
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000767 * iSeries anyway
768 */
769 if (cpu_has_feature(CPU_FTR_SLB))
770 slb_initialize();
771 else
772 stab_initialize(get_paca()->stab_addr);
Paul Mackerras799d6042005-11-10 13:37:51 +1100773}
Benjamin Herrenschmidt757c74d2009-03-19 19:34:16 +0000774#endif /* CONFIG_SMP */
Paul Mackerras799d6042005-11-10 13:37:51 +1100775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776/*
777 * Called by asm hashtable.S for doing lazy icache flush
778 */
779unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
780{
781 struct page *page;
782
Benjamin Herrenschmidt76c8e252005-11-08 11:21:05 +1100783 if (!pfn_valid(pte_pfn(pte)))
784 return pp;
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 page = pte_page(pte);
787
788 /* page is dirty */
789 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
790 if (trap == 0x400) {
David Gibson0895ecd2009-10-26 19:24:31 +0000791 flush_dcache_icache_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 set_bit(PG_arch_1, &page->flags);
793 } else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100794 pp |= HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796 return pp;
797}
798
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000799#ifdef CONFIG_PPC_MM_SLICES
800unsigned int get_paca_psize(unsigned long addr)
801{
802 unsigned long index, slices;
803
804 if (addr < SLICE_LOW_TOP) {
805 slices = get_paca()->context.low_slices_psize;
806 index = GET_LOW_SLICE_INDEX(addr);
807 } else {
808 slices = get_paca()->context.high_slices_psize;
809 index = GET_HIGH_SLICE_INDEX(addr);
810 }
811 return (slices >> (index * 4)) & 0xF;
812}
813
814#else
815unsigned int get_paca_psize(unsigned long addr)
816{
817 return get_paca()->context.user_psize;
818}
819#endif
820
Paul Mackerras721151d2007-04-03 21:24:02 +1000821/*
822 * Demote a segment to using 4k pages.
823 * For now this makes the whole process use 4k pages.
824 */
Paul Mackerras721151d2007-04-03 21:24:02 +1000825#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerrasfa282372008-01-24 08:35:13 +1100826void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000827{
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000828 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
Paul Mackerras721151d2007-04-03 21:24:02 +1000829 return;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000830 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000831#ifdef CONFIG_SPU_BASE
Paul Mackerras721151d2007-04-03 21:24:02 +1000832 spu_flush_all_slbs(mm);
833#endif
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000834 if (get_paca_psize(addr) != MMU_PAGE_4K) {
Paul Mackerrasfa282372008-01-24 08:35:13 +1100835 get_paca()->context = mm->context;
836 slb_flush_and_rebolt();
837 }
Paul Mackerras721151d2007-04-03 21:24:02 +1000838}
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000839#endif /* CONFIG_PPC_64K_PAGES */
Paul Mackerras721151d2007-04-03 21:24:02 +1000840
Paul Mackerrasfa282372008-01-24 08:35:13 +1100841#ifdef CONFIG_PPC_SUBPAGE_PROT
842/*
843 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
844 * Userspace sets the subpage permissions using the subpage_prot system call.
845 *
846 * Result is 0: full permissions, _PAGE_RW: read-only,
847 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
848 */
David Gibsond28513b2009-11-26 18:56:04 +0000849static int subpage_protection(struct mm_struct *mm, unsigned long ea)
Paul Mackerrasfa282372008-01-24 08:35:13 +1100850{
David Gibsond28513b2009-11-26 18:56:04 +0000851 struct subpage_prot_table *spt = &mm->context.spt;
Paul Mackerrasfa282372008-01-24 08:35:13 +1100852 u32 spp = 0;
853 u32 **sbpm, *sbpp;
854
855 if (ea >= spt->maxaddr)
856 return 0;
857 if (ea < 0x100000000) {
858 /* addresses below 4GB use spt->low_prot */
859 sbpm = spt->low_prot;
860 } else {
861 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
862 if (!sbpm)
863 return 0;
864 }
865 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
866 if (!sbpp)
867 return 0;
868 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
869
870 /* extract 2-bit bitfield for this 4k subpage */
871 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
872
873 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
874 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
875 return spp;
876}
877
878#else /* CONFIG_PPC_SUBPAGE_PROT */
David Gibsond28513b2009-11-26 18:56:04 +0000879static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
Paul Mackerrasfa282372008-01-24 08:35:13 +1100880{
881 return 0;
882}
883#endif
884
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +1000885void hash_failure_debug(unsigned long ea, unsigned long access,
886 unsigned long vsid, unsigned long trap,
887 int ssize, int psize, unsigned long pte)
888{
889 if (!printk_ratelimit())
890 return;
891 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
892 ea, access, current->comm);
893 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
894 trap, vsid, ssize, psize, pte);
895}
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897/* Result code is:
898 * 0 - handled
899 * 1 - normal page fault
900 * -1 - critical hash insertion error
Paul Mackerrasfa282372008-01-24 08:35:13 +1100901 * -2 - access not permitted by subpage protection mechanism
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
903int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
904{
David Gibsona1128f82009-12-16 14:29:56 +0000905 pgd_t *pgdir;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 unsigned long vsid;
907 struct mm_struct *mm;
908 pte_t *ptep;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000909 unsigned hugeshift;
Rusty Russell56aa4122009-03-15 18:16:43 +0000910 const struct cpumask *tmp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100911 int rc, user_region = 0, local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +1000912 int psize, ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100914 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
915 ea, access, trap);
David Gibson1f8d4192005-05-05 16:15:13 -0700916
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100917 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
918 DBG_LOW(" out of pgtable range !\n");
919 return 1;
920 }
921
922 /* Get region & vsid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 switch (REGION_ID(ea)) {
924 case USER_REGION_ID:
925 user_region = 1;
926 mm = current->mm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100927 if (! mm) {
928 DBG_LOW(" user region with no mm !\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 return 1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100930 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000931 psize = get_slice_psize(mm, ea);
Paul Mackerras1189be62007-10-11 20:37:10 +1000932 ssize = user_segment_size(ea);
933 vsid = get_vsid(mm->context.id, ea, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 case VMALLOC_REGION_ID:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 mm = &init_mm;
Paul Mackerras1189be62007-10-11 20:37:10 +1000937 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000938 if (ea < VMALLOC_END)
939 psize = mmu_vmalloc_psize;
940 else
941 psize = mmu_io_psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000942 ssize = mmu_kernel_ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 default:
945 /* Not a valid range
946 * Send the problem up to do_page_fault
947 */
948 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100950 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100952 /* Get pgdir */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 pgdir = mm->pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 if (pgdir == NULL)
955 return 1;
956
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100957 /* Check CPU locality */
Rusty Russell56aa4122009-03-15 18:16:43 +0000958 tmp = cpumask_of(smp_processor_id());
959 if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 local = 1;
961
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000962#ifndef CONFIG_PPC_64K_PAGES
David Gibsona4fe3ce2009-10-26 19:24:31 +0000963 /* If we use 4K pages and our psize is not 4K, then we might
964 * be hitting a special driver mapping, and need to align the
965 * address before we fetch the PTE.
966 *
967 * It could also be a hugepage mapping, in which case this is
968 * not necessary, but it's not harmful, either.
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000969 */
970 if (psize != MMU_PAGE_4K)
971 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
972#endif /* CONFIG_PPC_64K_PAGES */
973
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100974 /* Get PTE and page size from page tables */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000975 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100976 if (ptep == NULL || !pte_present(*ptep)) {
977 DBG_LOW(" no PTE !\n");
978 return 1;
979 }
980
Benjamin Herrenschmidtca91e6c2010-07-23 08:53:23 +1000981 /* Add _PAGE_PRESENT to the required access perm */
982 access |= _PAGE_PRESENT;
983
984 /* Pre-check access permissions (will be re-checked atomically
985 * in __hash_page_XX but this pre-check is a fast path
986 */
987 if (access & ~pte_val(*ptep)) {
988 DBG_LOW(" no access !\n");
989 return 1;
990 }
991
David Gibsona4fe3ce2009-10-26 19:24:31 +0000992#ifdef CONFIG_HUGETLB_PAGE
993 if (hugeshift)
994 return __hash_page_huge(ea, access, vsid, ptep, trap, local,
995 ssize, hugeshift, psize);
996#endif /* CONFIG_HUGETLB_PAGE */
997
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100998#ifndef CONFIG_PPC_64K_PAGES
999 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
1000#else
1001 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
1002 pte_val(*(ptep + PTRS_PER_PTE)));
1003#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001004 /* Do actual hashing */
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001005#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerras721151d2007-04-03 21:24:02 +10001006 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
Paul Mackerras3a8247c2008-06-18 15:29:12 +10001007 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
Paul Mackerras721151d2007-04-03 21:24:02 +10001008 demote_segment_4k(mm, ea);
1009 psize = MMU_PAGE_4K;
1010 }
1011
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001012 /* If this PTE is non-cacheable and we have restrictions on
1013 * using non cacheable large pages, then we switch to 4k
1014 */
1015 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
1016 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
1017 if (user_region) {
1018 demote_segment_4k(mm, ea);
1019 psize = MMU_PAGE_4K;
1020 } else if (ea < VMALLOC_END) {
1021 /*
1022 * some driver did a non-cacheable mapping
1023 * in vmalloc space, so switch vmalloc
1024 * to 4k pages
1025 */
1026 printk(KERN_ALERT "Reducing vmalloc segment "
1027 "to 4kB pages because of "
1028 "non-cacheable mapping\n");
1029 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +10001030#ifdef CONFIG_SPU_BASE
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +01001031 spu_flush_all_slbs(mm);
1032#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001033 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001034 }
1035 if (user_region) {
Paul Mackerras3a8247c2008-06-18 15:29:12 +10001036 if (psize != get_paca_psize(ea)) {
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +11001037 get_paca()->context = mm->context;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001038 slb_flush_and_rebolt();
1039 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001040 } else if (get_paca()->vmalloc_sllp !=
1041 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1042 get_paca()->vmalloc_sllp =
1043 mmu_psize_defs[mmu_vmalloc_psize].sllp;
Michael Neuling67439b72007-08-03 11:55:39 +10001044 slb_vmalloc_update();
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001045 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001046#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001047
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001048#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001049 if (psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +10001050 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001051 else
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001052#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001053 {
David Gibsona1128f82009-12-16 14:29:56 +00001054 int spp = subpage_protection(mm, ea);
Paul Mackerrasfa282372008-01-24 08:35:13 +11001055 if (access & spp)
1056 rc = -2;
1057 else
1058 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1059 local, ssize, spp);
1060 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001061
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +10001062 /* Dump some info in case of hash insertion failure, they should
1063 * never happen so it is really useful to know if/when they do
1064 */
1065 if (rc == -1)
1066 hash_failure_debug(ea, access, vsid, trap, ssize, psize,
1067 pte_val(*ptep));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001068#ifndef CONFIG_PPC_64K_PAGES
1069 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1070#else
1071 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1072 pte_val(*(ptep + PTRS_PER_PTE)));
1073#endif
1074 DBG_LOW(" -> rc=%d\n", rc);
1075 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076}
Arnd Bergmann67207b92005-11-15 15:53:48 -05001077EXPORT_SYMBOL_GPL(hash_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001079void hash_preload(struct mm_struct *mm, unsigned long ea,
1080 unsigned long access, unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001082 unsigned long vsid;
Michael Neuling0b97fee2010-11-17 18:52:45 +00001083 pgd_t *pgdir;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001084 pte_t *ptep;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001085 unsigned long flags;
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +10001086 int rc, ssize, local = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001088 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1089
1090#ifdef CONFIG_PPC_MM_SLICES
1091 /* We only prefault standard pages for now */
Ilpo Järvinen2b02d132007-08-16 08:03:35 +10001092 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001093 return;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001094#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001095
1096 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1097 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1098
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001099 /* Get Linux PTE if available */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001100 pgdir = mm->pgd;
1101 if (pgdir == NULL)
1102 return;
1103 ptep = find_linux_pte(pgdir, ea);
1104 if (!ptep)
1105 return;
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001106
1107#ifdef CONFIG_PPC_64K_PAGES
1108 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1109 * a 64K kernel), then we don't preload, hash_page() will take
1110 * care of it once we actually try to access the page.
1111 * That way we don't have to duplicate all of the logic for segment
1112 * page size demotion here
1113 */
1114 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1115 return;
1116#endif /* CONFIG_PPC_64K_PAGES */
1117
1118 /* Get VSID */
Paul Mackerras1189be62007-10-11 20:37:10 +10001119 ssize = user_segment_size(ea);
1120 vsid = get_vsid(mm->context.id, ea, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001121
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001122 /* Hash doesn't like irqs */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001123 local_irq_save(flags);
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001124
1125 /* Is that local to this CPU ? */
Rusty Russell56aa4122009-03-15 18:16:43 +00001126 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001127 local = 1;
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001128
1129 /* Hash it in */
1130#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001131 if (mm->context.user_psize == MMU_PAGE_64K)
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +10001132 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 else
Jon Tollefson5b825832007-05-17 04:43:02 +10001134#endif /* CONFIG_PPC_HAS_HASH_64K */
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +10001135 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
Michael Neuling1c2c25c2010-11-17 16:32:59 +00001136 subpage_protection(mm, ea));
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +10001137
1138 /* Dump some info in case of hash insertion failure, they should
1139 * never happen so it is really useful to know if/when they do
1140 */
1141 if (rc == -1)
1142 hash_failure_debug(ea, access, vsid, trap, ssize,
1143 mm->context.user_psize, pte_val(*ptep));
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001144
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001145 local_irq_restore(flags);
1146}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +11001148/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1149 * do not forget to update the assembly call site !
1150 */
Paul Mackerras1189be62007-10-11 20:37:10 +10001151void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1152 int local)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001153{
1154 unsigned long hash, index, shift, hidx, slot;
1155
Sachin P. Sant5c339912009-12-13 21:15:12 +00001156 DBG_LOW("flush_hash_page(va=%016lx)\n", va);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001157 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
Paul Mackerras1189be62007-10-11 20:37:10 +10001158 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001159 hidx = __rpte_to_hidx(pte, index);
1160 if (hidx & _PTEIDX_SECONDARY)
1161 hash = ~hash;
1162 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1163 slot += hidx & _PTEIDX_GROUP_IX;
Sachin P. Sant5c339912009-12-13 21:15:12 +00001164 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
Paul Mackerras1189be62007-10-11 20:37:10 +10001165 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001166 } pte_iterate_hashed_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167}
1168
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001169void flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001171 if (ppc_md.flush_hash_range)
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001172 ppc_md.flush_hash_range(number, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001173 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 int i;
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001175 struct ppc64_tlb_batch *batch =
1176 &__get_cpu_var(ppc64_tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 for (i = 0; i < number; i++)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001179 flush_hash_page(batch->vaddr[i], batch->pte[i],
Paul Mackerras1189be62007-10-11 20:37:10 +10001180 batch->psize, batch->ssize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
1182}
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184/*
1185 * low_hash_fault is called when we the low level hash code failed
1186 * to instert a PTE due to an hypervisor error
1187 */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001188void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
1190 if (user_mode(regs)) {
Paul Mackerrasfa282372008-01-24 08:35:13 +11001191#ifdef CONFIG_PPC_SUBPAGE_PROT
1192 if (rc == -2)
1193 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1194 else
1195#endif
1196 _exception(SIGBUS, regs, BUS_ADRERR, address);
1197 } else
1198 bad_page_fault(regs, address, SIGBUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199}
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001200
1201#ifdef CONFIG_DEBUG_PAGEALLOC
1202static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1203{
Paul Mackerras1189be62007-10-11 20:37:10 +10001204 unsigned long hash, hpteg;
1205 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1206 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +10001207 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001208 int ret;
1209
Paul Mackerras1189be62007-10-11 20:37:10 +10001210 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001211 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1212
1213 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
Paul Mackerras1189be62007-10-11 20:37:10 +10001214 mode, HPTE_V_BOLTED,
1215 mmu_linear_psize, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001216 BUG_ON (ret < 0);
1217 spin_lock(&linear_map_hash_lock);
1218 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1219 linear_map_hash_slots[lmi] = ret | 0x80;
1220 spin_unlock(&linear_map_hash_lock);
1221}
1222
1223static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1224{
Paul Mackerras1189be62007-10-11 20:37:10 +10001225 unsigned long hash, hidx, slot;
1226 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1227 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001228
Paul Mackerras1189be62007-10-11 20:37:10 +10001229 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001230 spin_lock(&linear_map_hash_lock);
1231 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1232 hidx = linear_map_hash_slots[lmi] & 0x7f;
1233 linear_map_hash_slots[lmi] = 0;
1234 spin_unlock(&linear_map_hash_lock);
1235 if (hidx & _PTEIDX_SECONDARY)
1236 hash = ~hash;
1237 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1238 slot += hidx & _PTEIDX_GROUP_IX;
Paul Mackerras1189be62007-10-11 20:37:10 +10001239 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001240}
1241
1242void kernel_map_pages(struct page *page, int numpages, int enable)
1243{
1244 unsigned long flags, vaddr, lmi;
1245 int i;
1246
1247 local_irq_save(flags);
1248 for (i = 0; i < numpages; i++, page++) {
1249 vaddr = (unsigned long)page_address(page);
1250 lmi = __pa(vaddr) >> PAGE_SHIFT;
1251 if (lmi >= linear_map_hash_count)
1252 continue;
1253 if (enable)
1254 kernel_map_linear_page(vaddr, lmi);
1255 else
1256 kernel_unmap_linear_page(vaddr, lmi);
1257 }
1258 local_irq_restore(flags);
1259}
1260#endif /* CONFIG_DEBUG_PAGEALLOC */
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -07001261
1262void setup_initial_memory_limit(phys_addr_t first_memblock_base,
1263 phys_addr_t first_memblock_size)
1264{
1265 /* We don't currently support the first MEMBLOCK not mapping 0
1266 * physical on those processors
1267 */
1268 BUG_ON(first_memblock_base != 0);
1269
1270 /* On LPAR systems, the first entry is our RMA region,
1271 * non-LPAR 64-bit hash MMU systems don't have a limitation
1272 * on real mode access, but using the first entry works well
1273 * enough. We also clamp it to 1G to avoid some funky things
1274 * such as RTAS bugs etc...
1275 */
1276 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
1277
1278 /* Finally limit subsequent allocations */
1279 memblock_set_current_limit(ppc64_rma_size);
1280}