blob: 089d939a0b3e9829b037cc95d38e27e2180746f9 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100046#include <asm/prom.h>
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100047#include <asm/lmb.h>
48#include <asm/sections.h>
Paul Mackerrasab1f9da2005-10-10 21:58:35 +100049#include <asm/vdso.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include "mmu_decl.h"
52
53#ifndef CPU_FTR_COHERENT_ICACHE
54#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55#define CPU_FTR_NOEXECUTE 0
56#endif
57
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100058int init_bootmem_done;
59int mem_init_done;
Paul Mackerrascf00a8d2005-10-31 13:07:02 +110060unsigned long memory_limit;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100061
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110062extern void hash_preload(struct mm_struct *mm, unsigned long ea,
63 unsigned long access, unsigned long trap);
64
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065/*
66 * This is called by /dev/mem to know if a given address has to
67 * be mapped non-cacheable or not
68 */
69int page_is_ram(unsigned long pfn)
70{
71 unsigned long paddr = (pfn << PAGE_SHIFT);
72
73#ifndef CONFIG_PPC64 /* XXX for now */
74 return paddr < __pa(high_memory);
75#else
76 int i;
77 for (i=0; i < lmb.memory.cnt; i++) {
78 unsigned long base;
79
80 base = lmb.memory.region[i].base;
81
82 if ((paddr >= base) &&
83 (paddr < (base + lmb.memory.region[i].size))) {
84 return 1;
85 }
86 }
87
88 return 0;
89#endif
90}
91EXPORT_SYMBOL(page_is_ram);
92
Roland Dreier8b150472005-10-28 17:46:18 -070093pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
Paul Mackerras14cf11a2005-09-26 16:04:21 +100094 unsigned long size, pgprot_t vma_prot)
95{
96 if (ppc_md.phys_mem_access_prot)
Roland Dreier8b150472005-10-28 17:46:18 -070097 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100098
Roland Dreier8b150472005-10-28 17:46:18 -070099 if (!page_is_ram(pfn))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000100 vma_prot = __pgprot(pgprot_val(vma_prot)
101 | _PAGE_GUARDED | _PAGE_NO_CACHE);
102 return vma_prot;
103}
104EXPORT_SYMBOL(phys_mem_access_prot);
105
Paul Mackerras23fd0772005-10-31 13:37:12 +1100106#ifdef CONFIG_MEMORY_HOTPLUG
107
108void online_page(struct page *page)
109{
110 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800111 init_page_count(page);
Nick Piggin70dc9912006-03-22 00:08:35 -0800112 __free_page(page);
Paul Mackerras23fd0772005-10-31 13:37:12 +1100113 totalram_pages++;
114 num_physpages++;
115}
116
Yasunori Gotobc02af92006-06-27 02:53:30 -0700117#ifdef CONFIG_NUMA
118int memory_add_physaddr_to_nid(u64 start)
119{
120 return hot_add_scn_to_nid(start);
121}
122#endif
123
124int __devinit arch_add_memory(int nid, u64 start, u64 size)
Paul Mackerras23fd0772005-10-31 13:37:12 +1100125{
Mike Kravetz237a0982005-12-05 12:06:42 -0800126 struct pglist_data *pgdata;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100127 struct zone *zone;
128 unsigned long start_pfn = start >> PAGE_SHIFT;
129 unsigned long nr_pages = size >> PAGE_SHIFT;
130
Mike Kravetz237a0982005-12-05 12:06:42 -0800131 pgdata = NODE_DATA(nid);
132
Andrew Morton2d0eee12006-03-21 23:00:05 -0800133 start = (unsigned long)__va(start);
Mike Kravetz54b79242005-11-07 16:25:48 -0800134 create_section_mapping(start, start + size);
135
Paul Mackerras23fd0772005-10-31 13:37:12 +1100136 /* this should work for most non-highmem platforms */
137 zone = pgdata->node_zones;
138
139 return __add_pages(zone, start_pfn, nr_pages);
140
141 return 0;
142}
143
144/*
145 * First pass at this code will check to determine if the remove
146 * request is within the RMO. Do not allow removal within the RMO.
147 */
148int __devinit remove_memory(u64 start, u64 size)
149{
150 struct zone *zone;
151 unsigned long start_pfn, end_pfn, nr_pages;
152
153 start_pfn = start >> PAGE_SHIFT;
154 nr_pages = size >> PAGE_SHIFT;
155 end_pfn = start_pfn + nr_pages;
156
157 printk("%s(): Attempting to remove memoy in range "
158 "%lx to %lx\n", __func__, start, start+size);
159 /*
160 * check for range within RMO
161 */
162 zone = page_zone(pfn_to_page(start_pfn));
163
164 printk("%s(): memory will be removed from "
165 "the %s zone\n", __func__, zone->name);
166
167 /*
168 * not handling removing memory ranges that
169 * overlap multiple zones yet
170 */
171 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
172 goto overlap;
173
174 /* make sure it is NOT in RMO */
175 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
176 printk("%s(): range to be removed must NOT be in RMO!\n",
177 __func__);
178 goto in_rmo;
179 }
180
181 return __remove_pages(zone, start_pfn, nr_pages);
182
183overlap:
184 printk("%s(): memory range to be removed overlaps "
185 "multiple zones!!!\n", __func__);
186in_rmo:
187 return -1;
188}
189#endif /* CONFIG_MEMORY_HOTPLUG */
190
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000191void show_mem(void)
192{
193 unsigned long total = 0, reserved = 0;
194 unsigned long shared = 0, cached = 0;
195 unsigned long highmem = 0;
196 struct page *page;
197 pg_data_t *pgdat;
198 unsigned long i;
199
200 printk("Mem-info:\n");
201 show_free_areas();
202 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800203 for_each_online_pgdat(pgdat) {
Paul Mackerras23fd0772005-10-31 13:37:12 +1100204 unsigned long flags;
205 pgdat_resize_lock(pgdat, &flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000206 for (i = 0; i < pgdat->node_spanned_pages; i++) {
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100207 if (!pfn_valid(pgdat->node_start_pfn + i))
208 continue;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000209 page = pgdat_page_nr(pgdat, i);
210 total++;
211 if (PageHighMem(page))
212 highmem++;
213 if (PageReserved(page))
214 reserved++;
215 else if (PageSwapCache(page))
216 cached++;
217 else if (page_count(page))
218 shared += page_count(page) - 1;
219 }
Paul Mackerras23fd0772005-10-31 13:37:12 +1100220 pgdat_resize_unlock(pgdat, &flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000221 }
222 printk("%ld pages of RAM\n", total);
223#ifdef CONFIG_HIGHMEM
224 printk("%ld pages of HIGHMEM\n", highmem);
225#endif
226 printk("%ld reserved pages\n", reserved);
227 printk("%ld pages shared\n", shared);
228 printk("%ld pages swap cached\n", cached);
229}
230
231/*
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000232 * Initialize the bootmem system and give it all the memory we
233 * have available. If we are using highmem, we only put the
234 * lowmem into the bootmem system.
235 */
236#ifndef CONFIG_NEED_MULTIPLE_NODES
237void __init do_init_bootmem(void)
238{
239 unsigned long i;
240 unsigned long start, bootmap_pages;
241 unsigned long total_pages;
242 int boot_mapsize;
243
244 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
245#ifdef CONFIG_HIGHMEM
246 total_pages = total_lowmem >> PAGE_SHIFT;
247#endif
248
249 /*
250 * Find an area to use for the bootmem bitmap. Calculate the size of
251 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
252 * Add 1 additional page in case the address isn't page-aligned.
253 */
254 bootmap_pages = bootmem_bootmap_pages(total_pages);
255
256 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000257
258 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
259
260 /* Add all physical memory to the bootmem map, mark each area
261 * present.
262 */
263 for (i = 0; i < lmb.memory.cnt; i++) {
264 unsigned long base = lmb.memory.region[i].base;
265 unsigned long size = lmb_size_bytes(&lmb.memory, i);
266#ifdef CONFIG_HIGHMEM
267 if (base >= total_lowmem)
268 continue;
269 if (base + size > total_lowmem)
270 size = total_lowmem - base;
271#endif
272 free_bootmem(base, size);
273 }
274
275 /* reserve the sections we're already using */
276 for (i = 0; i < lmb.reserved.cnt; i++)
277 reserve_bootmem(lmb.reserved.region[i].base,
278 lmb_size_bytes(&lmb.reserved, i));
279
280 /* XXX need to clip this if using highmem? */
281 for (i = 0; i < lmb.memory.cnt; i++)
282 memory_present(0, lmb_start_pfn(&lmb.memory, i),
283 lmb_end_pfn(&lmb.memory, i));
284 init_bootmem_done = 1;
285}
286
287/*
288 * paging_init() sets up the page tables - in fact we've already done this.
289 */
290void __init paging_init(void)
291{
292 unsigned long zones_size[MAX_NR_ZONES];
293 unsigned long zholes_size[MAX_NR_ZONES];
294 unsigned long total_ram = lmb_phys_mem_size();
295 unsigned long top_of_ram = lmb_end_of_DRAM();
296
297#ifdef CONFIG_HIGHMEM
298 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
299 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
300 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
301 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
302 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
303 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
304 kmap_prot = PAGE_KERNEL;
305#endif /* CONFIG_HIGHMEM */
306
Olof Johanssone110b282006-04-12 15:25:01 -0500307 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000308 top_of_ram, total_ram);
Olof Johanssone110b282006-04-12 15:25:01 -0500309 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000310 (top_of_ram - total_ram) >> 20);
311 /*
312 * All pages are DMA-able so we put them all in the DMA zone.
313 */
314 memset(zones_size, 0, sizeof(zones_size));
315 memset(zholes_size, 0, sizeof(zholes_size));
316
317 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
318 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
319
320#ifdef CONFIG_HIGHMEM
321 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
322 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
323 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
324#else
325 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
326 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
327#endif /* CONFIG_HIGHMEM */
328
329 free_area_init_node(0, NODE_DATA(0), zones_size,
330 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
331}
332#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
333
334void __init mem_init(void)
335{
336#ifdef CONFIG_NEED_MULTIPLE_NODES
337 int nid;
338#endif
339 pg_data_t *pgdat;
340 unsigned long i;
341 struct page *page;
342 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
343
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100344 num_physpages = lmb.memory.size >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000345 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
346
347#ifdef CONFIG_NEED_MULTIPLE_NODES
348 for_each_online_node(nid) {
349 if (NODE_DATA(nid)->node_spanned_pages != 0) {
Anton Blanchardc258dd42006-03-25 17:27:09 +1100350 printk("freeing bootmem node %d\n", nid);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000351 totalram_pages +=
352 free_all_bootmem_node(NODE_DATA(nid));
353 }
354 }
355#else
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100356 max_mapnr = max_pfn;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000357 totalram_pages += free_all_bootmem();
358#endif
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800359 for_each_online_pgdat(pgdat) {
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000360 for (i = 0; i < pgdat->node_spanned_pages; i++) {
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100361 if (!pfn_valid(pgdat->node_start_pfn + i))
362 continue;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000363 page = pgdat_page_nr(pgdat, i);
364 if (PageReserved(page))
365 reservedpages++;
366 }
367 }
368
369 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
Anton Blanchardbcb35572005-11-07 17:43:07 +1100370 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000371 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
372 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
373
374#ifdef CONFIG_HIGHMEM
375 {
376 unsigned long pfn, highmem_mapnr;
377
378 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
379 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
380 struct page *page = pfn_to_page(pfn);
381
382 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800383 init_page_count(page);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000384 __free_page(page);
385 totalhigh_pages++;
386 }
387 totalram_pages += totalhigh_pages;
Olof Johanssone110b282006-04-12 15:25:01 -0500388 printk(KERN_DEBUG "High memory: %luk\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000389 totalhigh_pages << (PAGE_SHIFT-10));
390 }
391#endif /* CONFIG_HIGHMEM */
392
393 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
394 "%luk reserved, %luk data, %luk bss, %luk init)\n",
395 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
396 num_physpages << (PAGE_SHIFT-10),
397 codesize >> 10,
398 reservedpages << (PAGE_SHIFT-10),
399 datasize >> 10,
400 bsssize >> 10,
401 initsize >> 10);
402
403 mem_init_done = 1;
404
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000405 /* Initialize the vDSO */
406 vdso_init();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000407}
408
409/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000410 * This is called when a page has been modified by the kernel.
411 * It just marks the page as not i-cache clean. We do the i-cache
412 * flush later when the page is given to a user process, if necessary.
413 */
414void flush_dcache_page(struct page *page)
415{
416 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
417 return;
418 /* avoid an atomic op if possible */
419 if (test_bit(PG_arch_1, &page->flags))
420 clear_bit(PG_arch_1, &page->flags);
421}
422EXPORT_SYMBOL(flush_dcache_page);
423
424void flush_dcache_icache_page(struct page *page)
425{
426#ifdef CONFIG_BOOKE
427 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
428 __flush_dcache_icache(start);
429 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
Paul Mackerrasab1f9da2005-10-10 21:58:35 +1000430#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000431 /* On 8xx there is no need to kmap since highmem is not supported */
432 __flush_dcache_icache(page_address(page));
433#else
434 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
435#endif
436
437}
438void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
439{
440 clear_page(page);
441
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000442 /*
443 * We shouldnt have to do this, but some versions of glibc
444 * require it (ld.so assumes zero filled pages are icache clean)
445 * - Anton
446 */
David Gibson09f5dc42006-02-06 13:24:53 +1100447 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000448}
449EXPORT_SYMBOL(clear_user_page);
450
451void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
452 struct page *pg)
453{
454 copy_page(vto, vfrom);
455
456 /*
457 * We should be able to use the following optimisation, however
458 * there are two problems.
459 * Firstly a bug in some versions of binutils meant PLT sections
460 * were not marked executable.
461 * Secondly the first word in the GOT section is blrl, used
462 * to establish the GOT address. Until recently the GOT was
463 * not marked executable.
464 * - Anton
465 */
466#if 0
467 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
468 return;
469#endif
470
David Gibson09f5dc42006-02-06 13:24:53 +1100471 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000472}
473
474void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
475 unsigned long addr, int len)
476{
477 unsigned long maddr;
478
479 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
480 flush_icache_range(maddr, maddr + len);
481 kunmap(page);
482}
483EXPORT_SYMBOL(flush_icache_user_range);
484
485/*
486 * This is called at the end of handling a user page fault, when the
487 * fault has been handled by updating a PTE in the linux page tables.
488 * We use it to preload an HPTE into the hash table corresponding to
489 * the updated linux PTE.
490 *
Hugh Dickins01edcd82005-11-23 13:37:39 -0800491 * This must always be called with the pte lock held.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000492 */
493void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
494 pte_t pte)
495{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100496#ifdef CONFIG_PPC_STD_MMU
497 unsigned long access = 0, trap;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000498#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100499 unsigned long pfn = pte_pfn(pte);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000500
501 /* handle i-cache coherency */
502 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
503 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
504 pfn_valid(pfn)) {
505 struct page *page = pfn_to_page(pfn);
506 if (!PageReserved(page)
507 && !test_bit(PG_arch_1, &page->flags)) {
508 if (vma->vm_mm == current->active_mm) {
509#ifdef CONFIG_8xx
510 /* On 8xx, cache control instructions (particularly
511 * "dcbst" from flush_dcache_icache) fault as write
512 * operation if there is an unpopulated TLB entry
513 * for the address in question. To workaround that,
514 * we invalidate the TLB here, thus avoiding dcbst
515 * misbehaviour.
516 */
517 _tlbie(address);
518#endif
519 __flush_dcache_icache((void *) address);
520 } else
521 flush_dcache_icache_page(page);
522 set_bit(PG_arch_1, &page->flags);
523 }
524 }
525
526#ifdef CONFIG_PPC_STD_MMU
527 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
528 if (!pte_young(pte) || address >= TASK_SIZE)
529 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100530
531 /* We try to figure out if we are coming from an instruction
532 * access fault and pass that down to __hash_page so we avoid
533 * double-faulting on execution of fresh text. We have to test
534 * for regs NULL since init will get here first thing at boot
535 *
536 * We also avoid filling the hash if not coming from a fault
537 */
538 if (current->thread.regs == NULL)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000539 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100540 trap = TRAP(current->thread.regs);
541 if (trap == 0x400)
542 access |= _PAGE_EXEC;
543 else if (trap != 0x300)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000544 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100545 hash_preload(vma->vm_mm, address, access, trap);
546#endif /* CONFIG_PPC_STD_MMU */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000547}